--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:49 +0000
+Subject: bpf: do not allow root to mangle valid pointers
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-11-fllinden@amazon.com>
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 82abbf8d2fc46d79611ab58daa7c608df14bb3ee upstream.
+
+Do not allow root to convert valid pointers into unknown scalars.
+In particular disallow:
+ ptr &= reg
+ ptr <<= reg
+ ptr += ptr
+and explicitly allow:
+ ptr -= ptr
+since pkt_end - pkt == length
+
+1.
+This minimizes amount of address leaks root can do.
+In the future may need to further tighten the leaks with kptr_restrict.
+
+2.
+If program has such pointer math it's likely a user mistake and
+when verifier complains about it right away instead of many instructions
+later on invalid memory access it's easier for users to fix their progs.
+
+3.
+when register holding a pointer cannot change to scalar it allows JITs to
+optimize better. Like 32-bit archs could use single register for pointers
+instead of a pair required to hold 64-bit scalars.
+
+4.
+reduces architecture dependent behavior. Since code:
+r1 = r10;
+r1 &= 0xff;
+if (r1 ...)
+will behave differently arm64 vs x64 and offloaded vs native.
+
+A significant chunk of ptr mangling was allowed by
+commit f1174f77b50c ("bpf/verifier: rework value tracking")
+yet some of it was allowed even earlier.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+[fllinden@amazon.com: backport to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 100 +++++++++-------------------
+ tools/testing/selftests/bpf/test_verifier.c | 56 ++++++++-------
+ 2 files changed, 62 insertions(+), 94 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2292,28 +2292,24 @@ static int adjust_ptr_min_max_vals(struc
+
+ if (BPF_CLASS(insn->code) != BPF_ALU64) {
+ /* 32-bit ALU ops on pointers produce (meaningless) scalars */
+- if (!env->allow_ptr_leaks)
+- verbose("R%d 32-bit pointer arithmetic prohibited\n",
+- dst);
++ verbose("R%d 32-bit pointer arithmetic prohibited\n",
++ dst);
+ return -EACCES;
+ }
+
+ if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
+- if (!env->allow_ptr_leaks)
+- verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
+- dst);
++ verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
++ dst);
+ return -EACCES;
+ }
+ if (ptr_reg->type == CONST_PTR_TO_MAP) {
+- if (!env->allow_ptr_leaks)
+- verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
+- dst);
++ verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
++ dst);
+ return -EACCES;
+ }
+ if (ptr_reg->type == PTR_TO_PACKET_END) {
+- if (!env->allow_ptr_leaks)
+- verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
+- dst);
++ verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
++ dst);
+ return -EACCES;
+ }
+
+@@ -2388,9 +2384,8 @@ static int adjust_ptr_min_max_vals(struc
+ case BPF_SUB:
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+- if (!env->allow_ptr_leaks)
+- verbose("R%d tried to subtract pointer from scalar\n",
+- dst);
++ verbose("R%d tried to subtract pointer from scalar\n",
++ dst);
+ return -EACCES;
+ }
+ /* We don't allow subtraction from FP, because (according to
+@@ -2398,9 +2393,8 @@ static int adjust_ptr_min_max_vals(struc
+ * be able to deal with it.
+ */
+ if (ptr_reg->type == PTR_TO_STACK) {
+- if (!env->allow_ptr_leaks)
+- verbose("R%d subtraction from stack pointer prohibited\n",
+- dst);
++ verbose("R%d subtraction from stack pointer prohibited\n",
++ dst);
+ return -EACCES;
+ }
+ if (known && (ptr_reg->off - smin_val ==
+@@ -2450,19 +2444,14 @@ static int adjust_ptr_min_max_vals(struc
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+- /* bitwise ops on pointers are troublesome, prohibit for now.
+- * (However, in principle we could allow some cases, e.g.
+- * ptr &= ~3 which would reduce min_value by 3.)
+- */
+- if (!env->allow_ptr_leaks)
+- verbose("R%d bitwise operator %s on pointer prohibited\n",
+- dst, bpf_alu_string[opcode >> 4]);
++ /* bitwise ops on pointers are troublesome. */
++ verbose("R%d bitwise operator %s on pointer prohibited\n",
++ dst, bpf_alu_string[opcode >> 4]);
+ return -EACCES;
+ default:
+ /* other operators (e.g. MUL,LSH) produce non-pointer results */
+- if (!env->allow_ptr_leaks)
+- verbose("R%d pointer arithmetic with %s operator prohibited\n",
+- dst, bpf_alu_string[opcode >> 4]);
++ verbose("R%d pointer arithmetic with %s operator prohibited\n",
++ dst, bpf_alu_string[opcode >> 4]);
+ return -EACCES;
+ }
+
+@@ -2752,7 +2741,6 @@ static int adjust_reg_min_max_vals(struc
+ struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
+ struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
+ u8 opcode = BPF_OP(insn->code);
+- int rc;
+
+ dst_reg = ®s[insn->dst_reg];
+ src_reg = NULL;
+@@ -2763,43 +2751,29 @@ static int adjust_reg_min_max_vals(struc
+ if (src_reg->type != SCALAR_VALUE) {
+ if (dst_reg->type != SCALAR_VALUE) {
+ /* Combining two pointers by any ALU op yields
+- * an arbitrary scalar.
++ * an arbitrary scalar. Disallow all math except
++ * pointer subtraction
+ */
+- if (!env->allow_ptr_leaks) {
+- verbose("R%d pointer %s pointer prohibited\n",
+- insn->dst_reg,
+- bpf_alu_string[opcode >> 4]);
+- return -EACCES;
++ if (opcode == BPF_SUB){
++ mark_reg_unknown(regs, insn->dst_reg);
++ return 0;
+ }
+- mark_reg_unknown(regs, insn->dst_reg);
+- return 0;
++ verbose("R%d pointer %s pointer prohibited\n",
++ insn->dst_reg,
++ bpf_alu_string[opcode >> 4]);
++ return -EACCES;
+ } else {
+ /* scalar += pointer
+ * This is legal, but we have to reverse our
+ * src/dest handling in computing the range
+ */
+- rc = adjust_ptr_min_max_vals(env, insn,
+- src_reg, dst_reg);
+- if (rc == -EACCES && env->allow_ptr_leaks) {
+- /* scalar += unknown scalar */
+- __mark_reg_unknown(&off_reg);
+- return adjust_scalar_min_max_vals(
+- env, insn,
+- dst_reg, off_reg);
+- }
+- return rc;
++ return adjust_ptr_min_max_vals(env, insn,
++ src_reg, dst_reg);
+ }
+ } else if (ptr_reg) {
+ /* pointer += scalar */
+- rc = adjust_ptr_min_max_vals(env, insn,
+- dst_reg, src_reg);
+- if (rc == -EACCES && env->allow_ptr_leaks) {
+- /* unknown scalar += scalar */
+- __mark_reg_unknown(dst_reg);
+- return adjust_scalar_min_max_vals(
+- env, insn, dst_reg, *src_reg);
+- }
+- return rc;
++ return adjust_ptr_min_max_vals(env, insn,
++ dst_reg, src_reg);
+ }
+ } else {
+ /* Pretend the src is a reg with a known value, since we only
+@@ -2808,17 +2782,9 @@ static int adjust_reg_min_max_vals(struc
+ off_reg.type = SCALAR_VALUE;
+ __mark_reg_known(&off_reg, insn->imm);
+ src_reg = &off_reg;
+- if (ptr_reg) { /* pointer += K */
+- rc = adjust_ptr_min_max_vals(env, insn,
+- ptr_reg, src_reg);
+- if (rc == -EACCES && env->allow_ptr_leaks) {
+- /* unknown scalar += K */
+- __mark_reg_unknown(dst_reg);
+- return adjust_scalar_min_max_vals(
+- env, insn, dst_reg, off_reg);
+- }
+- return rc;
+- }
++ if (ptr_reg) /* pointer += K */
++ return adjust_ptr_min_max_vals(env, insn,
++ ptr_reg, src_reg);
+ }
+
+ /* Got here implies adding two SCALAR_VALUEs */
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -462,9 +462,7 @@ static struct bpf_test tests[] = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 subtraction from stack pointer",
+- .result_unpriv = REJECT,
+- .errstr = "R1 invalid mem access",
++ .errstr = "R1 subtraction from stack pointer",
+ .result = REJECT,
+ },
+ {
+@@ -1900,9 +1898,8 @@ static struct bpf_test tests[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+- .result = ACCEPT,
+- .result_unpriv = REJECT,
+- .errstr_unpriv = "R1 pointer += pointer",
++ .result = REJECT,
++ .errstr = "R1 pointer += pointer",
+ },
+ {
+ "unpriv: neg pointer",
+@@ -2694,7 +2691,8 @@ static struct bpf_test tests[] = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct __sk_buff, len)),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+@@ -3001,7 +2999,7 @@ static struct bpf_test tests[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "invalid access to packet",
++ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+@@ -3988,9 +3986,7 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3, 11 },
+- .errstr_unpriv = "R0 pointer += pointer",
+- .errstr = "R0 invalid mem access 'inv'",
+- .result_unpriv = REJECT,
++ .errstr = "R0 pointer += pointer",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+@@ -4031,7 +4027,7 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+- .errstr = "R4 invalid mem access",
++ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+ },
+@@ -4052,7 +4048,7 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+- .errstr = "R4 invalid mem access",
++ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+ },
+@@ -4073,7 +4069,7 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 4 },
+- .errstr = "R4 invalid mem access",
++ .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS
+ },
+@@ -5304,10 +5300,8 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+- .errstr_unpriv = "R0 bitwise operator &= on pointer",
+- .errstr = "invalid mem access 'inv'",
++ .errstr = "R0 bitwise operator &= on pointer",
+ .result = REJECT,
+- .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 2",
+@@ -5323,10 +5317,8 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+- .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
+- .errstr = "invalid mem access 'inv'",
++ .errstr = "R0 32-bit pointer arithmetic prohibited",
+ .result = REJECT,
+- .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 3",
+@@ -5342,10 +5334,8 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map2 = { 3 },
+- .errstr_unpriv = "R0 pointer arithmetic with /= operator",
+- .errstr = "invalid mem access 'inv'",
++ .errstr = "R0 pointer arithmetic with /= operator",
+ .result = REJECT,
+- .result_unpriv = REJECT,
+ },
+ {
+ "map element value illegal alu op, 4",
+@@ -5938,8 +5928,7 @@ static struct bpf_test tests[] = {
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_in_map = { 3 },
+- .errstr = "R1 type=inv expected=map_ptr",
+- .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
++ .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
+ .result = REJECT,
+ },
+ {
+@@ -7300,6 +7289,19 @@ static struct bpf_test tests[] = {
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
++ "pkt_end - pkt_start is allowed",
++ .insns = {
++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
++ offsetof(struct __sk_buff, data_end)),
++ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
++ offsetof(struct __sk_buff, data)),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
++ BPF_EXIT_INSN(),
++ },
++ .result = ACCEPT,
++ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
++ },
++ {
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+@@ -7314,7 +7316,7 @@ static struct bpf_test tests[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "R1 offset is outside of the packet",
++ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+@@ -7333,7 +7335,7 @@ static struct bpf_test tests[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr = "R1 offset is outside of the packet",
++ .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
--- /dev/null
+From foo@baz Tue Jun 8 04:46:03 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:42 +0000
+Subject: bpf: Ensure off_reg has no mixed signed bounds for all types
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-4-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 24c109bb1537c12c02aeed2d51a347b4d6a9b76e upstream.
+
+The mixed signed bounds check really belongs into retrieve_ptr_limit()
+instead of outside of it in adjust_ptr_min_max_vals(). The reason is
+that this check is not tied to PTR_TO_MAP_VALUE only, but to all pointer
+types that we handle in retrieve_ptr_limit() and given errors from the latter
+propagate back to adjust_ptr_min_max_vals() and lead to rejection of the
+program, it's a better place to reside to avoid anything slipping through
+for future types. The reason why we must reject such off_reg is that we
+otherwise would not be able to derive a mask, see details in 9d7eceede769
+("bpf: restrict unknown scalars of mixed signed bounds for unprivileged").
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: backport to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2025,12 +2025,18 @@ static struct bpf_insn_aux_data *cur_aux
+ }
+
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+- u32 *ptr_limit, u8 opcode, bool off_is_neg)
++ const struct bpf_reg_state *off_reg,
++ u32 *ptr_limit, u8 opcode)
+ {
++ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+ u32 off, max;
+
++ if (!tnum_is_const(off_reg->var_off) &&
++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
++ return -EACCES;
++
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+@@ -2121,7 +2127,7 @@ static int sanitize_ptr_alu(struct bpf_v
+ alu_state |= ptr_is_dst_reg ?
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+
+- err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
++ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (err < 0)
+ return err;
+
+@@ -2164,8 +2170,8 @@ static int adjust_ptr_min_max_vals(struc
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+- u32 dst = insn->dst_reg, src = insn->src_reg;
+ u8 opcode = BPF_OP(insn->code);
++ u32 dst = insn->dst_reg;
+ int ret;
+
+ dst_reg = ®s[dst];
+@@ -2205,13 +2211,6 @@ static int adjust_ptr_min_max_vals(struc
+ dst);
+ return -EACCES;
+ }
+- if (ptr_reg->type == PTR_TO_MAP_VALUE) {
+- if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
+- verbose("R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
+- off_reg == dst_reg ? dst : src);
+- return -EACCES;
+- }
+- }
+
+ /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
+ * The id may be overwritten later if we create a new variable offset.
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:53 +0000
+Subject: bpf: Fix leakage of uninitialized bpf stack under speculation
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-15-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 801c6058d14a82179a7ee17a4b532cac6fad067f upstream.
+
+The current implemented mechanisms to mitigate data disclosure under
+speculation mainly address stack and map value oob access from the
+speculative domain. However, Piotr discovered that uninitialized BPF
+stack is not protected yet, and thus old data from the kernel stack,
+potentially including addresses of kernel structures, could still be
+extracted from that 512 bytes large window. The BPF stack is special
+compared to map values since it's not zero initialized for every
+program invocation, whereas map values /are/ zero initialized upon
+their initial allocation and thus cannot leak any prior data in either
+domain. In the non-speculative domain, the verifier ensures that every
+stack slot read must have a prior stack slot write by the BPF program
+to avoid such data leaking issue.
+
+However, this is not enough: for example, when the pointer arithmetic
+operation moves the stack pointer from the last valid stack offset to
+the first valid offset, the sanitation logic allows for any intermediate
+offsets during speculative execution, which could then be used to
+extract any restricted stack content via side-channel.
+
+Given for unprivileged stack pointer arithmetic the use of unknown
+but bounded scalars is generally forbidden, we can simply turn the
+register-based arithmetic operation into an immediate-based arithmetic
+operation without the need for masking. This also gives the benefit
+of reducing the needed instructions for the operation. Given after
+the work in 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic
+mask"), the aux->alu_limit already holds the final immediate value for
+the offset register with the known scalar. Thus, a simple mov of the
+immediate to AX register with using AX as the source for the original
+instruction is sufficient and possible now in this case.
+
+Reported-by: Piotr Krysiuk <piotras@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Piotr Krysiuk <piotras@gmail.com>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: fixed minor 4.14 conflict because of renamed function]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf_verifier.h | 5 +++--
+ kernel/bpf/verifier.c | 27 +++++++++++++++++----------
+ 2 files changed, 20 insertions(+), 12 deletions(-)
+
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -114,10 +114,11 @@ struct bpf_verifier_state_list {
+ };
+
+ /* Possible states for alu_state member. */
+-#define BPF_ALU_SANITIZE_SRC 1U
+-#define BPF_ALU_SANITIZE_DST 2U
++#define BPF_ALU_SANITIZE_SRC (1U << 0)
++#define BPF_ALU_SANITIZE_DST (1U << 1)
+ #define BPF_ALU_NEG_VALUE (1U << 2)
+ #define BPF_ALU_NON_POINTER (1U << 3)
++#define BPF_ALU_IMMEDIATE (1U << 4)
+ #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
+ BPF_ALU_SANITIZE_DST)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2120,6 +2120,7 @@ static int sanitize_ptr_alu(struct bpf_v
+ {
+ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_verifier_state *vstate = env->cur_state;
++ bool off_is_imm = tnum_is_const(off_reg->var_off);
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ u8 opcode = BPF_OP(insn->code);
+@@ -2150,6 +2151,7 @@ static int sanitize_ptr_alu(struct bpf_v
+ alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+ } else {
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++ alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+ alu_state |= ptr_is_dst_reg ?
+ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+ }
+@@ -4850,7 +4852,7 @@ static int fixup_bpf_calls(struct bpf_ve
+ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
+ struct bpf_insn insn_buf[16];
+ struct bpf_insn *patch = &insn_buf[0];
+- bool issrc, isneg;
++ bool issrc, isneg, isimm;
+ u32 off_reg;
+
+ aux = &env->insn_aux_data[i + delta];
+@@ -4861,16 +4863,21 @@ static int fixup_bpf_calls(struct bpf_ve
+ isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
+ issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
+ BPF_ALU_SANITIZE_SRC;
++ isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
+
+ off_reg = issrc ? insn->src_reg : insn->dst_reg;
+- if (isneg)
+- *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+- *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+- *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+- *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+- *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+- *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+- *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
++ if (isimm) {
++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
++ } else {
++ if (isneg)
++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
++ }
+ if (!issrc)
+ *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
+ insn->src_reg = BPF_REG_AX;
+@@ -4878,7 +4885,7 @@ static int fixup_bpf_calls(struct bpf_ve
+ insn->code = insn->code == code_add ?
+ code_sub : code_add;
+ *patch++ = *insn;
+- if (issrc && isneg)
++ if (issrc && isneg && !isimm)
+ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+ cnt = patch - insn_buf;
+
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:55 +0000
+Subject: bpf: Fix mask direction swap upon off reg sign change
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-17-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit bb01a1bba579b4b1c5566af24d95f1767859771e upstream.
+
+Masking direction as indicated via mask_to_left is considered to be
+calculated once and then used to derive pointer limits. Thus, this
+needs to be placed into bpf_sanitize_info instead so we can pass it
+to sanitize_ptr_alu() call after the pointer move. Piotr noticed a
+corner case where the off reg causes masking direction change which
+then results in an incorrect final aux->alu_limit.
+
+Fixes: 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic mask")
+Reported-by: Piotr Krysiuk <piotras@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2033,18 +2033,10 @@ enum {
+ };
+
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+- const struct bpf_reg_state *off_reg,
+- u32 *alu_limit, u8 opcode)
++ u32 *alu_limit, bool mask_to_left)
+ {
+- bool off_is_neg = off_reg->smin_value < 0;
+- bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+- (opcode == BPF_SUB && !off_is_neg);
+ u32 max = 0, ptr_limit = 0;
+
+- if (!tnum_is_const(off_reg->var_off) &&
+- (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+- return REASON_BOUNDS;
+-
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+@@ -2112,6 +2104,7 @@ static bool sanitize_needed(u8 opcode)
+
+ struct bpf_sanitize_info {
+ struct bpf_insn_aux_data aux;
++ bool mask_to_left;
+ };
+
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+@@ -2143,7 +2136,16 @@ static int sanitize_ptr_alu(struct bpf_v
+ if (vstate->speculative)
+ goto do_sim;
+
+- err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
++ if (!commit_window) {
++ if (!tnum_is_const(off_reg->var_off) &&
++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
++ return REASON_BOUNDS;
++
++ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
++ (opcode == BPF_SUB && !off_is_neg);
++ }
++
++ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
+ if (err < 0)
+ return err;
+
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:44 +0000
+Subject: bpf: Improve verifier error messages for users
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-6-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit a6aaece00a57fa6f22575364b3903dfbccf5345d upstream.
+
+Consolidate all error handling and provide more user-friendly error messages
+from sanitize_ptr_alu() and sanitize_val_alu().
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: backport to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 84 ++++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 62 insertions(+), 22 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2024,6 +2024,14 @@ static struct bpf_insn_aux_data *cur_aux
+ return &env->insn_aux_data[env->insn_idx];
+ }
+
++enum {
++ REASON_BOUNDS = -1,
++ REASON_TYPE = -2,
++ REASON_PATHS = -3,
++ REASON_LIMIT = -4,
++ REASON_STACK = -5,
++};
++
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+ u32 *alu_limit, u8 opcode)
+@@ -2035,7 +2043,7 @@ static int retrieve_ptr_limit(const stru
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+- return -EACCES;
++ return REASON_BOUNDS;
+
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+@@ -2059,11 +2067,11 @@ static int retrieve_ptr_limit(const stru
+ }
+ break;
+ default:
+- return -EINVAL;
++ return REASON_TYPE;
+ }
+
+ if (ptr_limit >= max)
+- return -ERANGE;
++ return REASON_LIMIT;
+ *alu_limit = ptr_limit;
+ return 0;
+ }
+@@ -2083,7 +2091,7 @@ static int update_alu_sanitation_state(s
+ if (aux->alu_state &&
+ (aux->alu_state != alu_state ||
+ aux->alu_limit != alu_limit))
+- return -EACCES;
++ return REASON_PATHS;
+
+ /* Corresponding fixup done in fixup_bpf_calls(). */
+ aux->alu_state = alu_state;
+@@ -2156,7 +2164,46 @@ do_sim:
+ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
+ if (!ptr_is_dst_reg && ret)
+ *dst_reg = tmp;
+- return !ret ? -EFAULT : 0;
++ return !ret ? REASON_STACK : 0;
++}
++
++static int sanitize_err(struct bpf_verifier_env *env,
++ const struct bpf_insn *insn, int reason,
++ const struct bpf_reg_state *off_reg,
++ const struct bpf_reg_state *dst_reg)
++{
++ static const char *err = "pointer arithmetic with it prohibited for !root";
++ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
++ u32 dst = insn->dst_reg, src = insn->src_reg;
++
++ switch (reason) {
++ case REASON_BOUNDS:
++ verbose("R%d has unknown scalar with mixed signed bounds, %s\n",
++ off_reg == dst_reg ? dst : src, err);
++ break;
++ case REASON_TYPE:
++ verbose("R%d has pointer with unsupported alu operation, %s\n",
++ off_reg == dst_reg ? src : dst, err);
++ break;
++ case REASON_PATHS:
++ verbose("R%d tried to %s from different maps, paths or scalars, %s\n",
++ dst, op, err);
++ break;
++ case REASON_LIMIT:
++ verbose("R%d tried to %s beyond pointer bounds, %s\n",
++ dst, op, err);
++ break;
++ case REASON_STACK:
++ verbose("R%d could not be pushed for speculative verification, %s\n",
++ dst, err);
++ break;
++ default:
++ verbose("verifier internal error: unknown reason (%d)\n",
++ reason);
++ break;
++ }
++
++ return -EACCES;
+ }
+
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+@@ -2230,10 +2277,9 @@ static int adjust_ptr_min_max_vals(struc
+ switch (opcode) {
+ case BPF_ADD:
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0) {
+- verbose("R%d tried to add from different maps, paths, or prohibited types\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ /* We can take a fixed offset as long as it doesn't overflow
+ * the s32 'off' field
+ */
+@@ -2285,10 +2331,9 @@ static int adjust_ptr_min_max_vals(struc
+ break;
+ case BPF_SUB:
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0) {
+- verbose("R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+ if (!env->allow_ptr_leaks)
+@@ -2412,7 +2457,6 @@ static int adjust_scalar_min_max_vals(st
+ s64 smin_val, smax_val;
+ u64 umin_val, umax_val;
+ u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+- u32 dst = insn->dst_reg;
+ int ret;
+
+ if (insn_bitness == 32) {
+@@ -2449,10 +2493,8 @@ static int adjust_scalar_min_max_vals(st
+ switch (opcode) {
+ case BPF_ADD:
+ ret = sanitize_val_alu(env, insn);
+- if (ret < 0) {
+- verbose("R%d tried to add from different pointers or scalars\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, NULL, NULL);
+ if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
+ signed_add_overflows(dst_reg->smax_value, smax_val)) {
+ dst_reg->smin_value = S64_MIN;
+@@ -2473,10 +2515,8 @@ static int adjust_scalar_min_max_vals(st
+ break;
+ case BPF_SUB:
+ ret = sanitize_val_alu(env, insn);
+- if (ret < 0) {
+- verbose("R%d tried to sub from different pointers or scalars\n", dst);
+- return ret;
+- }
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, NULL, NULL);
+ if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
+ signed_sub_overflows(dst_reg->smax_value, smin_val)) {
+ /* Overflow possible, we know nothing */
--- /dev/null
+From foo@baz Tue Jun 8 04:46:03 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:41 +0000
+Subject: bpf: Move off_reg into sanitize_ptr_alu
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-3-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 6f55b2f2a1178856c19bbce2f71449926e731914 upstream.
+
+Small refactor to drag off_reg into sanitize_ptr_alu(), so we later on can
+use off_reg for generalizing some of the checks for all pointer types.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: fix minor contextual conflict for 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2094,11 +2094,12 @@ static int sanitize_val_alu(struct bpf_v
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+- struct bpf_reg_state *dst_reg,
+- bool off_is_neg)
++ const struct bpf_reg_state *off_reg,
++ struct bpf_reg_state *dst_reg)
+ {
+ struct bpf_verifier_state *vstate = env->cur_state;
+ struct bpf_insn_aux_data *aux = cur_aux(env);
++ bool off_is_neg = off_reg->smin_value < 0;
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ u8 opcode = BPF_OP(insn->code);
+ u32 alu_state, alu_limit;
+@@ -2224,7 +2225,7 @@ static int adjust_ptr_min_max_vals(struc
+
+ switch (opcode) {
+ case BPF_ADD:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+ if (ret < 0) {
+ verbose("R%d tried to add from different maps, paths, or prohibited types\n", dst);
+ return ret;
+@@ -2279,7 +2280,7 @@ static int adjust_ptr_min_max_vals(struc
+ }
+ break;
+ case BPF_SUB:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+ if (ret < 0) {
+ verbose("R%d tried to sub from different maps, paths, or prohibited types\n", dst);
+ return ret;
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:46 +0000
+Subject: bpf: Move sanitize_val_alu out of op switch
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-8-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit f528819334881fd622fdadeddb3f7edaed8b7c9b upstream.
+
+Add a small sanitize_needed() helper function and move sanitize_val_alu()
+out of the main opcode switch. In upcoming work, we'll move sanitize_ptr_alu()
+as well out of its opcode switch so this helps to streamline both.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: backported to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2110,6 +2110,11 @@ static int sanitize_val_alu(struct bpf_v
+ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
+ }
+
++static bool sanitize_needed(u8 opcode)
++{
++ return opcode == BPF_ADD || opcode == BPF_SUB;
++}
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+@@ -2510,11 +2515,14 @@ static int adjust_scalar_min_max_vals(st
+ return 0;
+ }
+
+- switch (opcode) {
+- case BPF_ADD:
++ if (sanitize_needed(opcode)) {
+ ret = sanitize_val_alu(env, insn);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, NULL, NULL);
++ }
++
++ switch (opcode) {
++ case BPF_ADD:
+ if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
+ signed_add_overflows(dst_reg->smax_value, smax_val)) {
+ dst_reg->smin_value = S64_MIN;
+@@ -2534,9 +2542,6 @@ static int adjust_scalar_min_max_vals(st
+ dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
+ break;
+ case BPF_SUB:
+- ret = sanitize_val_alu(env, insn);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, NULL, NULL);
+ if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
+ signed_sub_overflows(dst_reg->smax_value, smin_val)) {
+ /* Overflow possible, we know nothing */
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:56 +0000
+Subject: bpf: No need to simulate speculative domain for immediates
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-18-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit a7036191277f9fa68d92f2071ddc38c09b1e5ee5 upstream.
+
+In 801c6058d14a ("bpf: Fix leakage of uninitialized bpf stack under
+speculation") we replaced masking logic with direct loads of immediates
+if the register is a known constant. Given in this case we do not apply
+any masking, there is also no reason for the operation to be truncated
+under the speculative domain.
+
+Therefore, there is also zero reason for the verifier to branch-off and
+simulate this case, it only needs to do it for unknown but bounded scalars.
+As a side-effect, this also enables few test cases that were previously
+rejected due to simulation under zero truncation.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2169,8 +2169,12 @@ do_sim:
+ /* If we're in commit phase, we're done here given we already
+ * pushed the truncated dst_reg into the speculative verification
+ * stack.
++ *
++ * Also, when register is a known constant, we rewrite register-based
++ * operation to immediate-based, and thus do not need masking (and as
++ * a consequence, do not need to simulate the zero-truncation either).
+ */
+- if (commit_window)
++ if (commit_window || off_is_imm)
+ return 0;
+
+ /* Simulate and find potential out-of-bounds access under
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:45 +0000
+Subject: bpf: Refactor and streamline bounds check into helper
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-7-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 073815b756c51ba9d8384d924c5d1c03ca3d1ae4 upstream.
+
+Move the bounds check in adjust_ptr_min_max_vals() into a small helper named
+sanitize_check_bounds() in order to simplify the former a bit.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: backport to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 54 ++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 37 insertions(+), 17 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2206,6 +2206,41 @@ static int sanitize_err(struct bpf_verif
+ return -EACCES;
+ }
+
++static int sanitize_check_bounds(struct bpf_verifier_env *env,
++ const struct bpf_insn *insn,
++ const struct bpf_reg_state *dst_reg)
++{
++ u32 dst = insn->dst_reg;
++
++ /* For unprivileged we require that resulting offset must be in bounds
++ * in order to be able to sanitize access later on.
++ */
++ if (env->allow_ptr_leaks)
++ return 0;
++
++ switch (dst_reg->type) {
++ case PTR_TO_STACK:
++ if (check_stack_access(env, dst_reg, dst_reg->off +
++ dst_reg->var_off.value, 1)) {
++ verbose("R%d stack pointer arithmetic goes out of range, "
++ "prohibited for !root\n", dst);
++ return -EACCES;
++ }
++ break;
++ case PTR_TO_MAP_VALUE:
++ if (check_map_access(env, dst, dst_reg->off, 1)) {
++ verbose("R%d pointer arithmetic of map value goes out of range, "
++ "prohibited for !root\n", dst);
++ return -EACCES;
++ }
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
+ /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
+ * Caller should also handle BPF_MOV case separately.
+ * If we return -EACCES, caller may want to try again treating pointer as a
+@@ -2421,23 +2456,8 @@ static int adjust_ptr_min_max_vals(struc
+ __reg_deduce_bounds(dst_reg);
+ __reg_bound_offset(dst_reg);
+
+- /* For unprivileged we require that resulting offset must be in bounds
+- * in order to be able to sanitize access later on.
+- */
+- if (!env->allow_ptr_leaks) {
+- if (dst_reg->type == PTR_TO_MAP_VALUE &&
+- check_map_access(env, dst, dst_reg->off, 1)) {
+- verbose("R%d pointer arithmetic of map value goes out of range, "
+- "prohibited for !root\n", dst);
+- return -EACCES;
+- } else if (dst_reg->type == PTR_TO_STACK &&
+- check_stack_access(env, dst_reg, dst_reg->off +
+- dst_reg->var_off.value, 1)) {
+- verbose("R%d stack pointer arithmetic goes out of range, "
+- "prohibited for !root\n", dst);
+- return -EACCES;
+- }
+- }
++ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
++ return -EACCES;
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:43 +0000
+Subject: bpf: Rework ptr_limit into alu_limit and add common error path
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-5-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit b658bbb844e28f1862867f37e8ca11a8e2aa94a3 upstream.
+
+Small refactor with no semantic changes in order to consolidate the max
+ptr_limit boundary check.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2026,12 +2026,12 @@ static struct bpf_insn_aux_data *cur_aux
+
+ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+- u32 *ptr_limit, u8 opcode)
++ u32 *alu_limit, u8 opcode)
+ {
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+- u32 off, max;
++ u32 off, max = 0, ptr_limit = 0;
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -2045,22 +2045,27 @@ static int retrieve_ptr_limit(const stru
+ max = MAX_BPF_STACK + mask_to_left;
+ off = ptr_reg->off + ptr_reg->var_off.value;
+ if (mask_to_left)
+- *ptr_limit = MAX_BPF_STACK + off;
++ ptr_limit = MAX_BPF_STACK + off;
+ else
+- *ptr_limit = -off - 1;
+- return *ptr_limit >= max ? -ERANGE : 0;
++ ptr_limit = -off - 1;
++ break;
+ case PTR_TO_MAP_VALUE:
+ max = ptr_reg->map_ptr->value_size;
+ if (mask_to_left) {
+- *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
++ ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+ } else {
+ off = ptr_reg->smin_value + ptr_reg->off;
+- *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
++ ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+ }
+- return *ptr_limit >= max ? -ERANGE : 0;
++ break;
+ default:
+ return -EINVAL;
+ }
++
++ if (ptr_limit >= max)
++ return -ERANGE;
++ *alu_limit = ptr_limit;
++ return 0;
+ }
+
+ static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
--- /dev/null
+From foo@baz Tue Jun 8 04:46:03 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:40 +0000
+Subject: bpf, selftests: Fix up some test_verifier cases for unprivileged
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-2-fllinden@amazon.com>
+
+From: Piotr Krysiuk <piotras@gmail.com>
+
+commit 0a13e3537ea67452d549a6a80da3776d6b7dedb3 upstream.
+
+Fix up test_verifier error messages for the case where the original error
+message changed, or for the case where pointer alu errors differ between
+privileged and unprivileged tests. Also, add alternative tests for keeping
+coverage of the original verifier rejection error message (fp alu), and
+newly reject map_ptr += rX where rX == 0 given we now forbid alu on these
+types for unprivileged. All test_verifier cases pass after the change. The
+test case fixups were kept separate to ease backporting of core changes.
+
+Signed-off-by: Piotr Krysiuk <piotras@gmail.com>
+Co-developed-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com: backport to 4.14, skipping non-existent tests]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_verifier.c | 44 +++++++++++++++++++++-------
+ 1 file changed, 34 insertions(+), 10 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -2235,7 +2235,7 @@ static struct bpf_test tests[] = {
+ .result = ACCEPT,
+ },
+ {
+- "unpriv: adding of fp",
++ "unpriv: adding of fp, reg",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+@@ -2243,9 +2243,22 @@ static struct bpf_test tests[] = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+- .result = ACCEPT,
++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+ .result_unpriv = REJECT,
++ .result = ACCEPT,
++ },
++ {
++ "unpriv: adding of fp, imm",
++ .insns = {
++ BPF_MOV64_IMM(BPF_REG_0, 0),
++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
++ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
++ BPF_EXIT_INSN(),
++ },
+ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
++ .result_unpriv = REJECT,
++ .result = ACCEPT,
+ },
+ {
+ "unpriv: cmp of stack pointer",
+@@ -7766,8 +7779,9 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "R0 tried to subtract pointer from scalar",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 2",
+@@ -7780,6 +7794,8 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+@@ -7790,8 +7806,9 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "R0 tried to subtract pointer from scalar",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 4",
+@@ -7804,6 +7821,8 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+@@ -7814,8 +7833,9 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "R0 tried to subtract pointer from scalar",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 6",
+@@ -7826,8 +7846,9 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "R0 tried to subtract pointer from scalar",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 7",
+@@ -7839,8 +7860,9 @@ static struct bpf_test tests[] = {
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "dereference of modified ctx ptr",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 8",
+@@ -7852,8 +7874,9 @@ static struct bpf_test tests[] = {
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+ .errstr = "dereference of modified ctx ptr",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 9",
+@@ -7863,8 +7886,9 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+ .errstr = "R0 tried to subtract pointer from scalar",
++ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 10",
+@@ -7876,8 +7900,8 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .result = REJECT,
+ .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
++ .result = REJECT,
+ },
+ {
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:47 +0000
+Subject: bpf: Tighten speculative pointer arithmetic mask
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-9-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 7fedb63a8307dda0ec3b8969a3b233a1dd7ea8e0 upstream.
+
+This work tightens the offset mask we use for unprivileged pointer arithmetic
+in order to mitigate a corner case reported by Piotr and Benedict where in
+the speculative domain it is possible to advance, for example, the map value
+pointer by up to value_size-1 out-of-bounds in order to leak kernel memory
+via side-channel to user space.
+
+Before this change, the computed ptr_limit for retrieve_ptr_limit() helper
+represents largest valid distance when moving pointer to the right or left
+which is then fed as aux->alu_limit to generate masking instructions against
+the offset register. After the change, the derived aux->alu_limit represents
+the largest potential value of the offset register which we mask against which
+is just a narrower subset of the former limit.
+
+For minimal complexity, we call sanitize_ptr_alu() from 2 observation points
+in adjust_ptr_min_max_vals(), that is, before and after the simulated alu
+operation. In the first step, we retieve the alu_state and alu_limit before
+the operation as well as we branch-off a verifier path and push it to the
+verification stack as we did before which checks the dst_reg under truncation,
+in other words, when the speculative domain would attempt to move the pointer
+out-of-bounds.
+
+In the second step, we retrieve the new alu_limit and calculate the absolute
+distance between both. Moreover, we commit the alu_state and final alu_limit
+via update_alu_sanitation_state() to the env's instruction aux data, and bail
+out from there if there is a mismatch due to coming from different verification
+paths with different states.
+
+Reported-by: Piotr Krysiuk <piotras@gmail.com>
+Reported-by: Benedict Schlueter <benedict.schlueter@rub.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Tested-by: Benedict Schlueter <benedict.schlueter@rub.de>
+[fllinden@amazon.com: backported to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 70 +++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 44 insertions(+), 26 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2039,7 +2039,7 @@ static int retrieve_ptr_limit(const stru
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
+ (opcode == BPF_SUB && !off_is_neg);
+- u32 off, max = 0, ptr_limit = 0;
++ u32 max = 0, ptr_limit = 0;
+
+ if (!tnum_is_const(off_reg->var_off) &&
+ (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+@@ -2048,23 +2048,18 @@ static int retrieve_ptr_limit(const stru
+ switch (ptr_reg->type) {
+ case PTR_TO_STACK:
+ /* Offset 0 is out-of-bounds, but acceptable start for the
+- * left direction, see BPF_REG_FP.
++ * left direction, see BPF_REG_FP. Also, unknown scalar
++ * offset where we would need to deal with min/max bounds is
++ * currently prohibited for unprivileged.
+ */
+ max = MAX_BPF_STACK + mask_to_left;
+- off = ptr_reg->off + ptr_reg->var_off.value;
+- if (mask_to_left)
+- ptr_limit = MAX_BPF_STACK + off;
+- else
+- ptr_limit = -off - 1;
++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+ break;
+ case PTR_TO_MAP_VALUE:
+ max = ptr_reg->map_ptr->value_size;
+- if (mask_to_left) {
+- ptr_limit = ptr_reg->umax_value + ptr_reg->off;
+- } else {
+- off = ptr_reg->smin_value + ptr_reg->off;
+- ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
+- }
++ ptr_limit = (mask_to_left ?
++ ptr_reg->smin_value :
++ ptr_reg->umax_value) + ptr_reg->off;
+ break;
+ default:
+ return REASON_TYPE;
+@@ -2119,10 +2114,12 @@ static int sanitize_ptr_alu(struct bpf_v
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+- struct bpf_reg_state *dst_reg)
++ struct bpf_reg_state *dst_reg,
++ struct bpf_insn_aux_data *tmp_aux,
++ const bool commit_window)
+ {
++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
+ struct bpf_verifier_state *vstate = env->cur_state;
+- struct bpf_insn_aux_data *aux = cur_aux(env);
+ bool off_is_neg = off_reg->smin_value < 0;
+ bool ptr_is_dst_reg = ptr_reg == dst_reg;
+ u8 opcode = BPF_OP(insn->code);
+@@ -2141,18 +2138,33 @@ static int sanitize_ptr_alu(struct bpf_v
+ if (vstate->speculative)
+ goto do_sim;
+
+- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+- alu_state |= ptr_is_dst_reg ?
+- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+-
+ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
+ if (err < 0)
+ return err;
+
++ if (commit_window) {
++ /* In commit phase we narrow the masking window based on
++ * the observed pointer move after the simulated operation.
++ */
++ alu_state = tmp_aux->alu_state;
++ alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++ } else {
++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
++ alu_state |= ptr_is_dst_reg ?
++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
++ }
++
+ err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+ if (err < 0)
+ return err;
+ do_sim:
++ /* If we're in commit phase, we're done here given we already
++ * pushed the truncated dst_reg into the speculative verification
++ * stack.
++ */
++ if (commit_window)
++ return 0;
++
+ /* Simulate and find potential out-of-bounds access under
+ * speculative execution from truncation as a result of
+ * masking when off was not within expected range. If off
+@@ -2262,6 +2274,7 @@ static int adjust_ptr_min_max_vals(struc
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
++ struct bpf_insn_aux_data tmp_aux = {};
+ u8 opcode = BPF_OP(insn->code);
+ u32 dst = insn->dst_reg;
+ int ret;
+@@ -2314,12 +2327,15 @@ static int adjust_ptr_min_max_vals(struc
+ !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
+ return -EINVAL;
+
+- switch (opcode) {
+- case BPF_ADD:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
++ &tmp_aux, false);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++ }
+
++ switch (opcode) {
++ case BPF_ADD:
+ /* We can take a fixed offset as long as it doesn't overflow
+ * the s32 'off' field
+ */
+@@ -2370,10 +2386,6 @@ static int adjust_ptr_min_max_vals(struc
+ }
+ break;
+ case BPF_SUB:
+- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg);
+- if (ret < 0)
+- return sanitize_err(env, insn, ret, off_reg, dst_reg);
+-
+ if (dst_reg == off_reg) {
+ /* scalar -= pointer. Creates an unknown scalar */
+ if (!env->allow_ptr_leaks)
+@@ -2463,6 +2475,12 @@ static int adjust_ptr_min_max_vals(struc
+
+ if (sanitize_check_bounds(env, insn, dst_reg) < 0)
+ return -EACCES;
++ if (sanitize_needed(opcode)) {
++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
++ &tmp_aux, true);
++ if (ret < 0)
++ return sanitize_err(env, insn, ret, off_reg, dst_reg);
++ }
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:48 +0000
+Subject: bpf: Update selftests to reflect new error states
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-10-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit d7a5091351756d0ae8e63134313c455624e36a13 upstream.
+
+Update various selftest error messages:
+
+ * The 'Rx tried to sub from different maps, paths, or prohibited types'
+ is reworked into more specific/differentiated error messages for better
+ guidance.
+
+ * The change into 'value -4294967168 makes map_value pointer be out of
+ bounds' is due to moving the mixed bounds check into the speculation
+ handling and thus occuring slightly later than above mentioned sanity
+ check.
+
+ * The change into 'math between map_value pointer and register with
+ unbounded min value' is similarly due to register sanity check coming
+ before the mixed bounds check.
+
+ * The case of 'map access: known scalar += value_ptr from different maps'
+ now loads fine given masks are the same from the different paths (despite
+ max map value size being different).
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+[fllinden@amazon.com - 4.14 backport, account for split test_verifier and
+different / missing tests]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_verifier.c | 34 +++++++++-------------------
+ 1 file changed, 12 insertions(+), 22 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_verifier.c
++++ b/tools/testing/selftests/bpf/test_verifier.c
+@@ -2243,7 +2243,7 @@ static struct bpf_test tests[] = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+@@ -6220,7 +6220,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6245,7 +6244,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6272,7 +6270,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6298,7 +6295,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6347,7 +6343,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6419,7 +6414,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6471,7 +6465,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6499,7 +6492,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6526,7 +6518,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6556,7 +6547,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ },
+ {
+@@ -6615,7 +6605,6 @@ static struct bpf_test tests[] = {
+ },
+ .fixup_map1 = { 3 },
+ .errstr = "unbounded min value",
+- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
+ .result = REJECT,
+ .result_unpriv = REJECT,
+ },
+@@ -7779,7 +7768,7 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
+ },
+@@ -7794,7 +7783,7 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+@@ -7806,22 +7795,23 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
+ },
+ {
+ "check deducing bounds from const, 4",
+ .insns = {
++ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R6 has pointer with unsupported alu operation",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+@@ -7833,7 +7823,7 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
+ },
+@@ -7846,7 +7836,7 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
+ },
+@@ -7860,7 +7850,7 @@ static struct bpf_test tests[] = {
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ },
+@@ -7874,7 +7864,7 @@ static struct bpf_test tests[] = {
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "dereference of modified ctx ptr",
+ .result = REJECT,
+ },
+@@ -7886,7 +7876,7 @@ static struct bpf_test tests[] = {
+ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
++ .errstr_unpriv = "R1 has pointer with unsupported alu operation",
+ .errstr = "R0 tried to subtract pointer from scalar",
+ .result = REJECT,
+ },
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:50 +0000
+Subject: bpf/verifier: disallow pointer subtraction
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-12-fllinden@amazon.com>
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit dd066823db2ac4e22f721ec85190817b58059a54 upstream.
+
+Subtraction of pointers was accidentally allowed for unpriv programs
+by commit 82abbf8d2fc4. Revert that part of commit.
+
+Fixes: 82abbf8d2fc4 ("bpf: do not allow root to mangle valid pointers")
+Reported-by: Jann Horn <jannh@google.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+[fllinden@amazon.com: backport to 4.14]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2754,7 +2754,7 @@ static int adjust_reg_min_max_vals(struc
+ * an arbitrary scalar. Disallow all math except
+ * pointer subtraction
+ */
+- if (opcode == BPF_SUB){
++ if (opcode == BPF_SUB && env->allow_ptr_leaks) {
+ mark_reg_unknown(regs, insn->dst_reg);
+ return 0;
+ }
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:54 +0000
+Subject: bpf: Wrap aux data inside bpf_sanitize_info container
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-16-fllinden@amazon.com>
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+commit 3d0220f6861d713213b015b582e9f21e5b28d2e0 upstream.
+
+Add a container structure struct bpf_sanitize_info which holds
+the current aux info, and update call-sites to sanitize_ptr_alu()
+to pass it in. This is needed for passing in additional state
+later on.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2110,15 +2110,19 @@ static bool sanitize_needed(u8 opcode)
+ return opcode == BPF_ADD || opcode == BPF_SUB;
+ }
+
++struct bpf_sanitize_info {
++ struct bpf_insn_aux_data aux;
++};
++
+ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ const struct bpf_reg_state *ptr_reg,
+ const struct bpf_reg_state *off_reg,
+ struct bpf_reg_state *dst_reg,
+- struct bpf_insn_aux_data *tmp_aux,
++ struct bpf_sanitize_info *info,
+ const bool commit_window)
+ {
+- struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
+ struct bpf_verifier_state *vstate = env->cur_state;
+ bool off_is_imm = tnum_is_const(off_reg->var_off);
+ bool off_is_neg = off_reg->smin_value < 0;
+@@ -2147,8 +2151,8 @@ static int sanitize_ptr_alu(struct bpf_v
+ /* In commit phase we narrow the masking window based on
+ * the observed pointer move after the simulated operation.
+ */
+- alu_state = tmp_aux->alu_state;
+- alu_limit = abs(tmp_aux->alu_limit - alu_limit);
++ alu_state = info->aux.alu_state;
++ alu_limit = abs(info->aux.alu_limit - alu_limit);
+ } else {
+ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+ alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
+@@ -2276,7 +2280,7 @@ static int adjust_ptr_min_max_vals(struc
+ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
+ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
+ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
+- struct bpf_insn_aux_data tmp_aux = {};
++ struct bpf_sanitize_info info = {};
+ u8 opcode = BPF_OP(insn->code);
+ u32 dst = insn->dst_reg;
+ int ret;
+@@ -2327,7 +2331,7 @@ static int adjust_ptr_min_max_vals(struc
+
+ if (sanitize_needed(opcode)) {
+ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+- &tmp_aux, false);
++ &info, false);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ }
+@@ -2468,7 +2472,7 @@ static int adjust_ptr_min_max_vals(struc
+ return -EACCES;
+ if (sanitize_needed(opcode)) {
+ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+- &tmp_aux, true);
++ &info, true);
+ if (ret < 0)
+ return sanitize_err(env, insn, ret, off_reg, dst_reg);
+ }
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:51 +0000
+Subject: selftests/bpf: fix test_align
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-13-fllinden@amazon.com>
+
+From: Alexei Starovoitov <ast@fb.com>
+
+commit 2b36047e7889b7efee22c11e17f035f721855731 upstream.
+
+since commit 82abbf8d2fc4 the verifier rejects the bit-wise
+arithmetic on pointers earlier.
+The test 'dubious pointer arithmetic' now has less output to match on.
+Adjust it.
+
+Fixes: 82abbf8d2fc4 ("bpf: do not allow root to mangle valid pointers")
+Reported-by: kernel test robot <xiaolong.ye@intel.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_align.c | 22 +---------------------
+ 1 file changed, 1 insertion(+), 21 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_align.c
++++ b/tools/testing/selftests/bpf/test_align.c
+@@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
+ .result = REJECT,
+ .matches = {
+ {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
+- /* ptr & 0x40 == either 0 or 0x40 */
+- {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
+- /* ptr << 2 == unknown, (4n) */
+- {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
+- /* (4n) + 14 == (4n+2). We blow our bounds, because
+- * the add could overflow.
+- */
+- {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
+- /* Checked s>=0 */
+- {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+- /* packet pointer + nonnegative (4n+2) */
+- {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+- {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+- /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
+- * We checked the bounds, but it might have been able
+- * to overflow if the packet pointer started in the
+- * upper half of the address space.
+- * So we did not get a 'range' on R6, and the access
+- * attempt will fail.
+- */
+- {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
++ /* R5 bitwise operator &= on pointer prohibited */
+ }
+ },
+ {
--- /dev/null
+From foo@baz Tue Jun 8 04:46:04 PM CEST 2021
+From: Frank van der Linden <fllinden@amazon.com>
+Date: Mon, 31 May 2021 18:25:52 +0000
+Subject: selftests/bpf: make 'dubious pointer arithmetic' test useful
+To: <stable@vger.kernel.org>
+Cc: <bpf@vger.kernel.org>, <daniel@iogearbox.net>
+Message-ID: <20210531182556.25277-14-fllinden@amazon.com>
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 31e95b61e172144bb2b626a291db1bdc0769275b upstream.
+
+mostly revert the previous workaround and make
+'dubious pointer arithmetic' test useful again.
+Use (ptr - ptr) << const instead of ptr << const to generate large scalar.
+The rest stays as before commit 2b36047e7889.
+
+Fixes: 2b36047e7889 ("selftests/bpf: fix test_align")
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+[fllinden@amazon.com: adjust for 4.14 (no liveness of regs in output)]
+Signed-off-by: Frank van der Linden <fllinden@amazon.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/test_align.c | 30 +++++++++++++++++++++++-------
+ 1 file changed, 23 insertions(+), 7 deletions(-)
+
+--- a/tools/testing/selftests/bpf/test_align.c
++++ b/tools/testing/selftests/bpf/test_align.c
+@@ -446,11 +446,9 @@ static struct bpf_align_test tests[] = {
+ .insns = {
+ PREP_PKT_POINTERS,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+- /* ptr & const => unknown & const */
+- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40),
+- /* ptr << const => unknown << const */
+- BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
++ /* (ptr - ptr) << 2 */
++ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
++ BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
+ /* We have a (4n) value. Let's make a packet offset
+ * out of it. First add 14, to make it a (4n+2)
+@@ -473,8 +471,26 @@ static struct bpf_align_test tests[] = {
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .matches = {
+- {4, "R5=pkt(id=0,off=0,r=0,imm=0)"},
+- /* R5 bitwise operator &= on pointer prohibited */
++ {4, "R5=pkt_end(id=0,off=0,imm=0)"},
++ /* (ptr - ptr) << 2 == unknown, (4n) */
++ {6, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
++ /* (4n) + 14 == (4n+2). We blow our bounds, because
++ * the add could overflow.
++ */
++ {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
++ /* Checked s>=0 */
++ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
++ /* packet pointer + nonnegative (4n+2) */
++ {11, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
++ {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
++ /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
++ * We checked the bounds, but it might have been able
++ * to overflow if the packet pointer started in the
++ * upper half of the address space.
++ * So we did not get a 'range' on R6, and the access
++ * attempt will fail.
++ */
++ {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
+ }
+ },
+ {
btrfs-fix-error-handling-in-btrfs_del_csums.patch
btrfs-fixup-error-handling-in-fixup_inode_link_counts.patch
mm-hugetlb-fix-simple-resv_huge_pages-underflow-on-u.patch
+bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch
+bpf-move-off_reg-into-sanitize_ptr_alu.patch
+bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch
+bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch
+bpf-improve-verifier-error-messages-for-users.patch
+bpf-refactor-and-streamline-bounds-check-into-helper.patch
+bpf-move-sanitize_val_alu-out-of-op-switch.patch
+bpf-tighten-speculative-pointer-arithmetic-mask.patch
+bpf-update-selftests-to-reflect-new-error-states.patch
+bpf-do-not-allow-root-to-mangle-valid-pointers.patch
+bpf-verifier-disallow-pointer-subtraction.patch
+selftests-bpf-fix-test_align.patch
+selftests-bpf-make-dubious-pointer-arithmetic-test-useful.patch
+bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch
+bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch
+bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch
+bpf-no-need-to-simulate-speculative-domain-for-immediates.patch