From: Greg Kroah-Hartman Date: Sun, 30 May 2021 12:32:11 +0000 (+0200) Subject: 4.19-stable patches X-Git-Tag: v4.4.271~77 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=172f17bb3b92d8c4c9f9cf8dc17891ed2d3e9872;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch bpf-extend-is_branch_taken-to-registers.patch bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch bpf-fix-up-selftests-after-backports-were-fixed.patch bpf-improve-verifier-error-messages-for-users.patch bpf-move-off_reg-into-sanitize_ptr_alu.patch bpf-move-sanitize_val_alu-out-of-op-switch.patch bpf-no-need-to-simulate-speculative-domain-for-immediates.patch bpf-refactor-and-streamline-bounds-check-into-helper.patch bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch bpf-test_verifier-bpf_get_stack-return-value-add-0.patch bpf-test_verifier-switch-bpf_get_stack-s-0-s-r8-test.patch bpf-tighten-speculative-pointer-arithmetic-mask.patch bpf-update-selftests-to-reflect-new-error-states.patch bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch net-usb-fix-memory-leak-in-smsc75xx_bind.patch selftests-bpf-add-selftest-part-of-bpf-improve-verifier-branch-analysis.patch selftests-bpf-test-narrow-loads-with-off-0-in-test_verifier.patch spi-gpio-don-t-leak-spi-master-in-probe-error-path.patch spi-mt7621-disable-clock-in-probe-error-path.patch spi-mt7621-don-t-leak-spi-master-in-probe-error-path.patch --- diff --git a/queue-4.19/bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch b/queue-4.19/bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch new file mode 100644 index 00000000000..52df8af83dc --- /dev/null +++ b/queue-4.19/bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch @@ -0,0 +1,89 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:00 +0300 +Subject: bpf: Ensure off_reg has no mixed signed bounds for all types +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-10-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 24c109bb1537c12c02aeed2d51a347b4d6a9b76e upstream. + +The mixed signed bounds check really belongs into retrieve_ptr_limit() +instead of outside of it in adjust_ptr_min_max_vals(). The reason is +that this check is not tied to PTR_TO_MAP_VALUE only, but to all pointer +types that we handle in retrieve_ptr_limit() and given errors from the latter +propagate back to adjust_ptr_min_max_vals() and lead to rejection of the +program, it's a better place to reside to avoid anything slipping through +for future types. The reason why we must reject such off_reg is that we +otherwise would not be able to derive a mask, see details in 9d7eceede769 +("bpf: restrict unknown scalars of mixed signed bounds for unprivileged"). + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +[fllinden@amazon.com: backport to 5.4] +Signed-off-by: Frank van der Linden +Signed-off-by: Greg Kroah-Hartman +[OP: backport to 4.19] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 18 +++++++++--------- + 1 file changed, 9 insertions(+), 9 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2730,12 +2730,18 @@ static struct bpf_insn_aux_data *cur_aux + } + + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, +- u32 *ptr_limit, u8 opcode, bool off_is_neg) ++ const struct bpf_reg_state *off_reg, ++ u32 *ptr_limit, u8 opcode) + { ++ bool off_is_neg = off_reg->smin_value < 0; + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || + (opcode == BPF_SUB && !off_is_neg); + u32 off, max; + ++ if (!tnum_is_const(off_reg->var_off) && ++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) ++ return -EACCES; ++ + switch (ptr_reg->type) { + case PTR_TO_STACK: + /* Offset 0 is out-of-bounds, but acceptable start for the +@@ -2826,7 +2832,7 @@ static int sanitize_ptr_alu(struct bpf_v + alu_state |= ptr_is_dst_reg ? + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + +- err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg); ++ err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode); + if (err < 0) + return err; + +@@ -2871,8 +2877,8 @@ static int adjust_ptr_min_max_vals(struc + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; +- u32 dst = insn->dst_reg, src = insn->src_reg; + u8 opcode = BPF_OP(insn->code); ++ u32 dst = insn->dst_reg; + int ret; + + dst_reg = ®s[dst]; +@@ -2909,12 +2915,6 @@ static int adjust_ptr_min_max_vals(struc + dst); + return -EACCES; + } +- if (ptr_reg->type == PTR_TO_MAP_VALUE && +- !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { +- verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", +- off_reg == dst_reg ? dst : src); +- return -EACCES; +- } + + /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. + * The id may be overwritten later if we create a new variable offset. diff --git a/queue-4.19/bpf-extend-is_branch_taken-to-registers.patch b/queue-4.19/bpf-extend-is_branch_taken-to-registers.patch new file mode 100644 index 00000000000..c0ff7e62d14 --- /dev/null +++ b/queue-4.19/bpf-extend-is_branch_taken-to-registers.patch @@ -0,0 +1,87 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:56 +0300 +Subject: bpf: extend is_branch_taken to registers +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-6-ovidiu.panait@windriver.com> + +From: Alexei Starovoitov + +commit fb8d251ee2a6bf4d7f4af5548e9c8f4fb5f90402 upstream + +This patch extends is_branch_taken() logic from JMP+K instructions +to JMP+X instructions. +Conditional branches are often done when src and dst registers +contain known scalars. In such case the verifier can follow +the branch that is going to be taken when program executes. +That speeds up the verification and is essential feature to support +bounded loops. + +Signed-off-by: Alexei Starovoitov +Acked-by: Andrii Nakryiko +Signed-off-by: Daniel Borkmann +[OP: drop is_jmp32 parameter from is_branch_taken() calls and + adjust context] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 32 ++++++++++++++++++-------------- + 1 file changed, 18 insertions(+), 14 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4127,8 +4127,9 @@ static int check_cond_jmp_op(struct bpf_ + struct bpf_verifier_state *this_branch = env->cur_state; + struct bpf_verifier_state *other_branch; + struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; +- struct bpf_reg_state *dst_reg, *other_branch_regs; ++ struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; + u8 opcode = BPF_OP(insn->code); ++ int pred = -1; + int err; + + if (opcode > BPF_JSLE) { +@@ -4152,6 +4153,7 @@ static int check_cond_jmp_op(struct bpf_ + insn->src_reg); + return -EACCES; + } ++ src_reg = ®s[insn->src_reg]; + } else { + if (insn->src_reg != BPF_REG_0) { + verbose(env, "BPF_JMP uses reserved fields\n"); +@@ -4166,19 +4168,21 @@ static int check_cond_jmp_op(struct bpf_ + + dst_reg = ®s[insn->dst_reg]; + +- if (BPF_SRC(insn->code) == BPF_K) { +- int pred = is_branch_taken(dst_reg, insn->imm, opcode); +- +- if (pred == 1) { +- /* only follow the goto, ignore fall-through */ +- *insn_idx += insn->off; +- return 0; +- } else if (pred == 0) { +- /* only follow fall-through branch, since +- * that's where the program will go +- */ +- return 0; +- } ++ if (BPF_SRC(insn->code) == BPF_K) ++ pred = is_branch_taken(dst_reg, insn->imm, opcode); ++ else if (src_reg->type == SCALAR_VALUE && ++ tnum_is_const(src_reg->var_off)) ++ pred = is_branch_taken(dst_reg, src_reg->var_off.value, ++ opcode); ++ if (pred == 1) { ++ /* only follow the goto, ignore fall-through */ ++ *insn_idx += insn->off; ++ return 0; ++ } else if (pred == 0) { ++ /* only follow fall-through branch, since ++ * that's where the program will go ++ */ ++ return 0; + } + + other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, diff --git a/queue-4.19/bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch b/queue-4.19/bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch new file mode 100644 index 00000000000..510936f9817 --- /dev/null +++ b/queue-4.19/bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch @@ -0,0 +1,138 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:07 +0300 +Subject: bpf: Fix leakage of uninitialized bpf stack under speculation +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-17-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 801c6058d14a82179a7ee17a4b532cac6fad067f upstream. + +The current implemented mechanisms to mitigate data disclosure under +speculation mainly address stack and map value oob access from the +speculative domain. However, Piotr discovered that uninitialized BPF +stack is not protected yet, and thus old data from the kernel stack, +potentially including addresses of kernel structures, could still be +extracted from that 512 bytes large window. The BPF stack is special +compared to map values since it's not zero initialized for every +program invocation, whereas map values /are/ zero initialized upon +their initial allocation and thus cannot leak any prior data in either +domain. In the non-speculative domain, the verifier ensures that every +stack slot read must have a prior stack slot write by the BPF program +to avoid such data leaking issue. + +However, this is not enough: for example, when the pointer arithmetic +operation moves the stack pointer from the last valid stack offset to +the first valid offset, the sanitation logic allows for any intermediate +offsets during speculative execution, which could then be used to +extract any restricted stack content via side-channel. + +Given for unprivileged stack pointer arithmetic the use of unknown +but bounded scalars is generally forbidden, we can simply turn the +register-based arithmetic operation into an immediate-based arithmetic +operation without the need for masking. This also gives the benefit +of reducing the needed instructions for the operation. Given after +the work in 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic +mask"), the aux->alu_limit already holds the final immediate value for +the offset register with the known scalar. Thus, a simple mov of the +immediate to AX register with using AX as the source for the original +instruction is sufficient and possible now in this case. + +Reported-by: Piotr Krysiuk +Signed-off-by: Daniel Borkmann +Tested-by: Piotr Krysiuk +Reviewed-by: Piotr Krysiuk +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/bpf_verifier.h | 5 +++-- + kernel/bpf/verifier.c | 27 +++++++++++++++++---------- + 2 files changed, 20 insertions(+), 12 deletions(-) + +--- a/include/linux/bpf_verifier.h ++++ b/include/linux/bpf_verifier.h +@@ -144,10 +144,11 @@ struct bpf_verifier_state_list { + }; + + /* Possible states for alu_state member. */ +-#define BPF_ALU_SANITIZE_SRC 1U +-#define BPF_ALU_SANITIZE_DST 2U ++#define BPF_ALU_SANITIZE_SRC (1U << 0) ++#define BPF_ALU_SANITIZE_DST (1U << 1) + #define BPF_ALU_NEG_VALUE (1U << 2) + #define BPF_ALU_NON_POINTER (1U << 3) ++#define BPF_ALU_IMMEDIATE (1U << 4) + #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ + BPF_ALU_SANITIZE_DST) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2825,6 +2825,7 @@ static int sanitize_ptr_alu(struct bpf_v + { + struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux; + struct bpf_verifier_state *vstate = env->cur_state; ++ bool off_is_imm = tnum_is_const(off_reg->var_off); + bool off_is_neg = off_reg->smin_value < 0; + bool ptr_is_dst_reg = ptr_reg == dst_reg; + u8 opcode = BPF_OP(insn->code); +@@ -2855,6 +2856,7 @@ static int sanitize_ptr_alu(struct bpf_v + alu_limit = abs(tmp_aux->alu_limit - alu_limit); + } else { + alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; ++ alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; + alu_state |= ptr_is_dst_reg ? + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + } +@@ -6172,7 +6174,7 @@ static int fixup_bpf_calls(struct bpf_ve + const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; + struct bpf_insn insn_buf[16]; + struct bpf_insn *patch = &insn_buf[0]; +- bool issrc, isneg; ++ bool issrc, isneg, isimm; + u32 off_reg; + + aux = &env->insn_aux_data[i + delta]; +@@ -6183,16 +6185,21 @@ static int fixup_bpf_calls(struct bpf_ve + isneg = aux->alu_state & BPF_ALU_NEG_VALUE; + issrc = (aux->alu_state & BPF_ALU_SANITIZE) == + BPF_ALU_SANITIZE_SRC; ++ isimm = aux->alu_state & BPF_ALU_IMMEDIATE; + + off_reg = issrc ? insn->src_reg : insn->dst_reg; +- if (isneg) +- *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); +- *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); +- *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); +- *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); +- *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); +- *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); +- *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); ++ if (isimm) { ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); ++ } else { ++ if (isneg) ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); ++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); ++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); ++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); ++ } + if (!issrc) + *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); + insn->src_reg = BPF_REG_AX; +@@ -6200,7 +6207,7 @@ static int fixup_bpf_calls(struct bpf_ve + insn->code = insn->code == code_add ? + code_sub : code_add; + *patch++ = *insn; +- if (issrc && isneg) ++ if (issrc && isneg && !isimm) + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); + cnt = patch - insn_buf; + diff --git a/queue-4.19/bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch b/queue-4.19/bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch new file mode 100644 index 00000000000..2533a4e75c5 --- /dev/null +++ b/queue-4.19/bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch @@ -0,0 +1,78 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:09 +0300 +Subject: bpf: Fix mask direction swap upon off reg sign change +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-19-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit bb01a1bba579b4b1c5566af24d95f1767859771e upstream + +Masking direction as indicated via mask_to_left is considered to be +calculated once and then used to derive pointer limits. Thus, this +needs to be placed into bpf_sanitize_info instead so we can pass it +to sanitize_ptr_alu() call after the pointer move. Piotr noticed a +corner case where the off reg causes masking direction change which +then results in an incorrect final aux->alu_limit. + +Fixes: 7fedb63a8307 ("bpf: Tighten speculative pointer arithmetic mask") +Reported-by: Piotr Krysiuk +Signed-off-by: Daniel Borkmann +Reviewed-by: Piotr Krysiuk +Acked-by: Alexei Starovoitov +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 22 ++++++++++++---------- + 1 file changed, 12 insertions(+), 10 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2738,18 +2738,10 @@ enum { + }; + + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, +- const struct bpf_reg_state *off_reg, +- u32 *alu_limit, u8 opcode) ++ u32 *alu_limit, bool mask_to_left) + { +- bool off_is_neg = off_reg->smin_value < 0; +- bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || +- (opcode == BPF_SUB && !off_is_neg); + u32 max = 0, ptr_limit = 0; + +- if (!tnum_is_const(off_reg->var_off) && +- (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) +- return REASON_BOUNDS; +- + switch (ptr_reg->type) { + case PTR_TO_STACK: + /* Offset 0 is out-of-bounds, but acceptable start for the +@@ -2817,6 +2809,7 @@ static bool sanitize_needed(u8 opcode) + + struct bpf_sanitize_info { + struct bpf_insn_aux_data aux; ++ bool mask_to_left; + }; + + static int sanitize_ptr_alu(struct bpf_verifier_env *env, +@@ -2848,7 +2841,16 @@ static int sanitize_ptr_alu(struct bpf_v + if (vstate->speculative) + goto do_sim; + +- err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode); ++ if (!commit_window) { ++ if (!tnum_is_const(off_reg->var_off) && ++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) ++ return REASON_BOUNDS; ++ ++ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || ++ (opcode == BPF_SUB && !off_is_neg); ++ } ++ ++ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); + if (err < 0) + return err; + diff --git a/queue-4.19/bpf-fix-up-selftests-after-backports-were-fixed.patch b/queue-4.19/bpf-fix-up-selftests-after-backports-were-fixed.patch new file mode 100644 index 00000000000..03df2e5b350 --- /dev/null +++ b/queue-4.19/bpf-fix-up-selftests-after-backports-were-fixed.patch @@ -0,0 +1,172 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:52 +0300 +Subject: bpf: fix up selftests after backports were fixed +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-2-ovidiu.panait@windriver.com> + +From: Ovidiu Panait + +After the backport of the changes to fix CVE 2019-7308, the +selftests also need to be fixed up, as was done originally +in mainline 80c9b2fae87b ("bpf: add various test cases to selftests"). + +This is a backport of upstream commit 80c9b2fae87b ("bpf: add various test +cases to selftests") adapted to 4.19 in order to fix the +selftests that began to fail after CVE-2019-7308 fixes. + +Suggested-by: Frank van der Linden +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -2448,6 +2448,7 @@ static struct bpf_test tests[] = { + }, + .result = REJECT, + .errstr = "invalid stack off=-79992 size=8", ++ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + }, + { + "PTR_TO_STACK store/load - out of bounds high", +@@ -2844,6 +2845,8 @@ static struct bpf_test tests[] = { + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", ++ .result_unpriv = REJECT, + .result = ACCEPT, + }, + { +@@ -7457,6 +7460,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7481,6 +7485,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7507,6 +7512,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7532,6 +7538,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7580,6 +7587,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7651,6 +7659,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7702,6 +7711,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7729,6 +7739,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7755,6 +7766,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7784,6 +7796,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7814,6 +7827,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 4 }, + .errstr = "R0 invalid mem access 'inv'", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7842,6 +7856,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + .result_unpriv = REJECT, + }, +@@ -7894,6 +7909,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", ++ .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -8266,6 +8282,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "pointer offset 1073741822", ++ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .result = REJECT + }, + { +@@ -8287,6 +8304,7 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "pointer offset -1073741822", ++ .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", + .result = REJECT + }, + { +@@ -8458,6 +8476,7 @@ static struct bpf_test tests[] = { + BPF_EXIT_INSN() + }, + .errstr = "fp pointer offset 1073741822", ++ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result = REJECT + }, + { diff --git a/queue-4.19/bpf-improve-verifier-error-messages-for-users.patch b/queue-4.19/bpf-improve-verifier-error-messages-for-users.patch new file mode 100644 index 00000000000..cfe140daa34 --- /dev/null +++ b/queue-4.19/bpf-improve-verifier-error-messages-for-users.patch @@ -0,0 +1,186 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:02 +0300 +Subject: bpf: Improve verifier error messages for users +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-12-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit a6aaece00a57fa6f22575364b3903dfbccf5345d upstream + +Consolidate all error handling and provide more user-friendly error messages +from sanitize_ptr_alu() and sanitize_val_alu(). + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +[fllinden@amazon.com: backport to 5.4] +Signed-off-by: Frank van der Linden +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 84 ++++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 62 insertions(+), 22 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2729,6 +2729,14 @@ static struct bpf_insn_aux_data *cur_aux + return &env->insn_aux_data[env->insn_idx]; + } + ++enum { ++ REASON_BOUNDS = -1, ++ REASON_TYPE = -2, ++ REASON_PATHS = -3, ++ REASON_LIMIT = -4, ++ REASON_STACK = -5, ++}; ++ + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg, + u32 *alu_limit, u8 opcode) +@@ -2740,7 +2748,7 @@ static int retrieve_ptr_limit(const stru + + if (!tnum_is_const(off_reg->var_off) && + (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) +- return -EACCES; ++ return REASON_BOUNDS; + + switch (ptr_reg->type) { + case PTR_TO_STACK: +@@ -2764,11 +2772,11 @@ static int retrieve_ptr_limit(const stru + } + break; + default: +- return -EINVAL; ++ return REASON_TYPE; + } + + if (ptr_limit >= max) +- return -ERANGE; ++ return REASON_LIMIT; + *alu_limit = ptr_limit; + return 0; + } +@@ -2788,7 +2796,7 @@ static int update_alu_sanitation_state(s + if (aux->alu_state && + (aux->alu_state != alu_state || + aux->alu_limit != alu_limit)) +- return -EACCES; ++ return REASON_PATHS; + + /* Corresponding fixup done in fixup_bpf_calls(). */ + aux->alu_state = alu_state; +@@ -2861,7 +2869,46 @@ do_sim: + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); + if (!ptr_is_dst_reg && ret) + *dst_reg = tmp; +- return !ret ? -EFAULT : 0; ++ return !ret ? REASON_STACK : 0; ++} ++ ++static int sanitize_err(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, int reason, ++ const struct bpf_reg_state *off_reg, ++ const struct bpf_reg_state *dst_reg) ++{ ++ static const char *err = "pointer arithmetic with it prohibited for !root"; ++ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; ++ u32 dst = insn->dst_reg, src = insn->src_reg; ++ ++ switch (reason) { ++ case REASON_BOUNDS: ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", ++ off_reg == dst_reg ? dst : src, err); ++ break; ++ case REASON_TYPE: ++ verbose(env, "R%d has pointer with unsupported alu operation, %s\n", ++ off_reg == dst_reg ? src : dst, err); ++ break; ++ case REASON_PATHS: ++ verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", ++ dst, op, err); ++ break; ++ case REASON_LIMIT: ++ verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", ++ dst, op, err); ++ break; ++ case REASON_STACK: ++ verbose(env, "R%d could not be pushed for speculative verification, %s\n", ++ dst, err); ++ break; ++ default: ++ verbose(env, "verifier internal error: unknown reason (%d)\n", ++ reason); ++ break; ++ } ++ ++ return -EACCES; + } + + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. +@@ -2934,10 +2981,9 @@ static int adjust_ptr_min_max_vals(struc + switch (opcode) { + case BPF_ADD: + ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); +- if (ret < 0) { +- verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst); +- return ret; +- } ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ + /* We can take a fixed offset as long as it doesn't overflow + * the s32 'off' field + */ +@@ -2989,10 +3035,9 @@ static int adjust_ptr_min_max_vals(struc + break; + case BPF_SUB: + ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); +- if (ret < 0) { +- verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst); +- return ret; +- } ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ + if (dst_reg == off_reg) { + /* scalar -= pointer. Creates an unknown scalar */ + verbose(env, "R%d tried to subtract pointer from scalar\n", +@@ -3109,7 +3154,6 @@ static int adjust_scalar_min_max_vals(st + s64 smin_val, smax_val; + u64 umin_val, umax_val; + u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; +- u32 dst = insn->dst_reg; + int ret; + + if (insn_bitness == 32) { +@@ -3146,10 +3190,8 @@ static int adjust_scalar_min_max_vals(st + switch (opcode) { + case BPF_ADD: + ret = sanitize_val_alu(env, insn); +- if (ret < 0) { +- verbose(env, "R%d tried to add from different pointers or scalars\n", dst); +- return ret; +- } ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, NULL, NULL); + if (signed_add_overflows(dst_reg->smin_value, smin_val) || + signed_add_overflows(dst_reg->smax_value, smax_val)) { + dst_reg->smin_value = S64_MIN; +@@ -3170,10 +3212,8 @@ static int adjust_scalar_min_max_vals(st + break; + case BPF_SUB: + ret = sanitize_val_alu(env, insn); +- if (ret < 0) { +- verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); +- return ret; +- } ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, NULL, NULL); + if (signed_sub_overflows(dst_reg->smin_value, smax_val) || + signed_sub_overflows(dst_reg->smax_value, smin_val)) { + /* Overflow possible, we know nothing */ diff --git a/queue-4.19/bpf-move-off_reg-into-sanitize_ptr_alu.patch b/queue-4.19/bpf-move-off_reg-into-sanitize_ptr_alu.patch new file mode 100644 index 00000000000..010b9d59381 --- /dev/null +++ b/queue-4.19/bpf-move-off_reg-into-sanitize_ptr_alu.patch @@ -0,0 +1,60 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:59 +0300 +Subject: bpf: Move off_reg into sanitize_ptr_alu +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-9-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 6f55b2f2a1178856c19bbce2f71449926e731914 upstream. + +Small refactor to drag off_reg into sanitize_ptr_alu(), so we later on can +use off_reg for generalizing some of the checks for all pointer types. + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2799,11 +2799,12 @@ static int sanitize_val_alu(struct bpf_v + static int sanitize_ptr_alu(struct bpf_verifier_env *env, + struct bpf_insn *insn, + const struct bpf_reg_state *ptr_reg, +- struct bpf_reg_state *dst_reg, +- bool off_is_neg) ++ const struct bpf_reg_state *off_reg, ++ struct bpf_reg_state *dst_reg) + { + struct bpf_verifier_state *vstate = env->cur_state; + struct bpf_insn_aux_data *aux = cur_aux(env); ++ bool off_is_neg = off_reg->smin_value < 0; + bool ptr_is_dst_reg = ptr_reg == dst_reg; + u8 opcode = BPF_OP(insn->code); + u32 alu_state, alu_limit; +@@ -2927,7 +2928,7 @@ static int adjust_ptr_min_max_vals(struc + + switch (opcode) { + case BPF_ADD: +- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); + if (ret < 0) { + verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst); + return ret; +@@ -2982,7 +2983,7 @@ static int adjust_ptr_min_max_vals(struc + } + break; + case BPF_SUB: +- ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); + if (ret < 0) { + verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst); + return ret; diff --git a/queue-4.19/bpf-move-sanitize_val_alu-out-of-op-switch.patch b/queue-4.19/bpf-move-sanitize_val_alu-out-of-op-switch.patch new file mode 100644 index 00000000000..64c9cf29323 --- /dev/null +++ b/queue-4.19/bpf-move-sanitize_val_alu-out-of-op-switch.patch @@ -0,0 +1,69 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:04 +0300 +Subject: bpf: Move sanitize_val_alu out of op switch +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-14-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit f528819334881fd622fdadeddb3f7edaed8b7c9b upstream. + +Add a small sanitize_needed() helper function and move sanitize_val_alu() +out of the main opcode switch. In upcoming work, we'll move sanitize_ptr_alu() +as well out of its opcode switch so this helps to streamline both. + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +[fllinden@amazon.com: backported to 5.4] +Signed-off-by: Frank van der Linden +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2815,6 +2815,11 @@ static int sanitize_val_alu(struct bpf_v + return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); + } + ++static bool sanitize_needed(u8 opcode) ++{ ++ return opcode == BPF_ADD || opcode == BPF_SUB; ++} ++ + static int sanitize_ptr_alu(struct bpf_verifier_env *env, + struct bpf_insn *insn, + const struct bpf_reg_state *ptr_reg, +@@ -3207,11 +3212,14 @@ static int adjust_scalar_min_max_vals(st + return 0; + } + +- switch (opcode) { +- case BPF_ADD: ++ if (sanitize_needed(opcode)) { + ret = sanitize_val_alu(env, insn); + if (ret < 0) + return sanitize_err(env, insn, ret, NULL, NULL); ++ } ++ ++ switch (opcode) { ++ case BPF_ADD: + if (signed_add_overflows(dst_reg->smin_value, smin_val) || + signed_add_overflows(dst_reg->smax_value, smax_val)) { + dst_reg->smin_value = S64_MIN; +@@ -3231,9 +3239,6 @@ static int adjust_scalar_min_max_vals(st + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); + break; + case BPF_SUB: +- ret = sanitize_val_alu(env, insn); +- if (ret < 0) +- return sanitize_err(env, insn, ret, NULL, NULL); + if (signed_sub_overflows(dst_reg->smin_value, smax_val) || + signed_sub_overflows(dst_reg->smax_value, smin_val)) { + /* Overflow possible, we know nothing */ diff --git a/queue-4.19/bpf-no-need-to-simulate-speculative-domain-for-immediates.patch b/queue-4.19/bpf-no-need-to-simulate-speculative-domain-for-immediates.patch new file mode 100644 index 00000000000..a7ed40c69b6 --- /dev/null +++ b/queue-4.19/bpf-no-need-to-simulate-speculative-domain-for-immediates.patch @@ -0,0 +1,48 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:10 +0300 +Subject: bpf: No need to simulate speculative domain for immediates +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-20-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit a7036191277f9fa68d92f2071ddc38c09b1e5ee5 upstream + +In 801c6058d14a ("bpf: Fix leakage of uninitialized bpf stack under +speculation") we replaced masking logic with direct loads of immediates +if the register is a known constant. Given in this case we do not apply +any masking, there is also no reason for the operation to be truncated +under the speculative domain. + +Therefore, there is also zero reason for the verifier to branch-off and +simulate this case, it only needs to do it for unknown but bounded scalars. +As a side-effect, this also enables few test cases that were previously +rejected due to simulation under zero truncation. + +Signed-off-by: Daniel Borkmann +Reviewed-by: Piotr Krysiuk +Acked-by: Alexei Starovoitov +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2874,8 +2874,12 @@ do_sim: + /* If we're in commit phase, we're done here given we already + * pushed the truncated dst_reg into the speculative verification + * stack. ++ * ++ * Also, when register is a known constant, we rewrite register-based ++ * operation to immediate-based, and thus do not need masking (and as ++ * a consequence, do not need to simulate the zero-truncation either). + */ +- if (commit_window) ++ if (commit_window || off_is_imm) + return 0; + + /* Simulate and find potential out-of-bounds access under diff --git a/queue-4.19/bpf-refactor-and-streamline-bounds-check-into-helper.patch b/queue-4.19/bpf-refactor-and-streamline-bounds-check-into-helper.patch new file mode 100644 index 00000000000..d24edee573c --- /dev/null +++ b/queue-4.19/bpf-refactor-and-streamline-bounds-check-into-helper.patch @@ -0,0 +1,97 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:03 +0300 +Subject: bpf: Refactor and streamline bounds check into helper +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-13-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 073815b756c51ba9d8384d924c5d1c03ca3d1ae4 upstream. + +Move the bounds check in adjust_ptr_min_max_vals() into a small helper named +sanitize_check_bounds() in order to simplify the former a bit. + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +[fllinden@amazon.com: backport to 5.4] +Signed-off-by: Frank van der Linden +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 54 ++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 37 insertions(+), 17 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2911,6 +2911,41 @@ static int sanitize_err(struct bpf_verif + return -EACCES; + } + ++static int sanitize_check_bounds(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, ++ const struct bpf_reg_state *dst_reg) ++{ ++ u32 dst = insn->dst_reg; ++ ++ /* For unprivileged we require that resulting offset must be in bounds ++ * in order to be able to sanitize access later on. ++ */ ++ if (env->allow_ptr_leaks) ++ return 0; ++ ++ switch (dst_reg->type) { ++ case PTR_TO_STACK: ++ if (check_stack_access(env, dst_reg, dst_reg->off + ++ dst_reg->var_off.value, 1)) { ++ verbose(env, "R%d stack pointer arithmetic goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } ++ break; ++ case PTR_TO_MAP_VALUE: ++ if (check_map_access(env, dst, dst_reg->off, 1, false)) { ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ + /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. + * Caller should also handle BPF_MOV case separately. + * If we return -EACCES, caller may want to try again treating pointer as a +@@ -3118,23 +3153,8 @@ static int adjust_ptr_min_max_vals(struc + __reg_deduce_bounds(dst_reg); + __reg_bound_offset(dst_reg); + +- /* For unprivileged we require that resulting offset must be in bounds +- * in order to be able to sanitize access later on. +- */ +- if (!env->allow_ptr_leaks) { +- if (dst_reg->type == PTR_TO_MAP_VALUE && +- check_map_access(env, dst, dst_reg->off, 1, false)) { +- verbose(env, "R%d pointer arithmetic of map value goes out of range, " +- "prohibited for !root\n", dst); +- return -EACCES; +- } else if (dst_reg->type == PTR_TO_STACK && +- check_stack_access(env, dst_reg, dst_reg->off + +- dst_reg->var_off.value, 1)) { +- verbose(env, "R%d stack pointer arithmetic goes out of range, " +- "prohibited for !root\n", dst); +- return -EACCES; +- } +- } ++ if (sanitize_check_bounds(env, insn, dst_reg) < 0) ++ return -EACCES; + + return 0; + } diff --git a/queue-4.19/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch b/queue-4.19/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch new file mode 100644 index 00000000000..158d028fdd2 --- /dev/null +++ b/queue-4.19/bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch @@ -0,0 +1,77 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:01 +0300 +Subject: bpf: Rework ptr_limit into alu_limit and add common error path +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-11-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit b658bbb844e28f1862867f37e8ca11a8e2aa94a3 upstream. + +Small refactor with no semantic changes in order to consolidate the max +ptr_limit boundary check. + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman +[OP: backport to 4.19] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 21 +++++++++++++-------- + 1 file changed, 13 insertions(+), 8 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2731,12 +2731,12 @@ static struct bpf_insn_aux_data *cur_aux + + static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg, +- u32 *ptr_limit, u8 opcode) ++ u32 *alu_limit, u8 opcode) + { + bool off_is_neg = off_reg->smin_value < 0; + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || + (opcode == BPF_SUB && !off_is_neg); +- u32 off, max; ++ u32 off, max = 0, ptr_limit = 0; + + if (!tnum_is_const(off_reg->var_off) && + (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) +@@ -2750,22 +2750,27 @@ static int retrieve_ptr_limit(const stru + max = MAX_BPF_STACK + mask_to_left; + off = ptr_reg->off + ptr_reg->var_off.value; + if (mask_to_left) +- *ptr_limit = MAX_BPF_STACK + off; ++ ptr_limit = MAX_BPF_STACK + off; + else +- *ptr_limit = -off - 1; +- return *ptr_limit >= max ? -ERANGE : 0; ++ ptr_limit = -off - 1; ++ break; + case PTR_TO_MAP_VALUE: + max = ptr_reg->map_ptr->value_size; + if (mask_to_left) { +- *ptr_limit = ptr_reg->umax_value + ptr_reg->off; ++ ptr_limit = ptr_reg->umax_value + ptr_reg->off; + } else { + off = ptr_reg->smin_value + ptr_reg->off; +- *ptr_limit = ptr_reg->map_ptr->value_size - off - 1; ++ ptr_limit = ptr_reg->map_ptr->value_size - off - 1; + } +- return *ptr_limit >= max ? -ERANGE : 0; ++ break; + default: + return -EINVAL; + } ++ ++ if (ptr_limit >= max) ++ return -ERANGE; ++ *alu_limit = ptr_limit; ++ return 0; + } + + static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, diff --git a/queue-4.19/bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch b/queue-4.19/bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch new file mode 100644 index 00000000000..3cef5bb304b --- /dev/null +++ b/queue-4.19/bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch @@ -0,0 +1,167 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:53 +0300 +Subject: bpf, selftests: Fix up some test_verifier cases for unprivileged +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-3-ovidiu.panait@windriver.com> + +From: Piotr Krysiuk + +commit 0a13e3537ea67452d549a6a80da3776d6b7dedb3 upstream + +Fix up test_verifier error messages for the case where the original error +message changed, or for the case where pointer alu errors differ between +privileged and unprivileged tests. Also, add alternative tests for keeping +coverage of the original verifier rejection error message (fp alu), and +newly reject map_ptr += rX where rX == 0 given we now forbid alu on these +types for unprivileged. All test_verifier cases pass after the change. The +test case fixups were kept separate to ease backporting of core changes. + +Signed-off-by: Piotr Krysiuk +Co-developed-by: Daniel Borkmann +Signed-off-by: Daniel Borkmann +Acked-by: Alexei Starovoitov +[OP: backport to 4.19, skipping non-existent tests] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 42 ++++++++++++++++++++++------ + 1 file changed, 33 insertions(+), 9 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -2837,7 +2837,7 @@ static struct bpf_test tests[] = { + .result = ACCEPT, + }, + { +- "unpriv: adding of fp", ++ "unpriv: adding of fp, reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), +@@ -2845,6 +2845,19 @@ static struct bpf_test tests[] = { + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, ++ .result = ACCEPT, ++ }, ++ { ++ "unpriv: adding of fp, imm", ++ .insns = { ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), ++ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), ++ BPF_EXIT_INSN(), ++ }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, +@@ -9758,8 +9771,9 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 2", +@@ -9772,6 +9786,8 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, + }, +@@ -9783,8 +9799,9 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 4", +@@ -9797,6 +9814,8 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, + .result = ACCEPT, + }, + { +@@ -9807,8 +9826,9 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 6", +@@ -9819,8 +9839,9 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 7", +@@ -9832,8 +9853,9 @@ static struct bpf_test tests[] = { + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", + .errstr = "dereference of modified ctx ptr", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 8", +@@ -9845,8 +9867,9 @@ static struct bpf_test tests[] = { + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", + .errstr = "dereference of modified ctx ptr", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 9", +@@ -9856,8 +9879,9 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 10", +@@ -9869,8 +9893,8 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, + .errstr = "math between ctx pointer and register with unbounded min value is not allowed", ++ .result = REJECT, + }, + { + "bpf_exit with invalid return code. test1", diff --git a/queue-4.19/bpf-test_verifier-bpf_get_stack-return-value-add-0.patch b/queue-4.19/bpf-test_verifier-bpf_get_stack-return-value-add-0.patch new file mode 100644 index 00000000000..910fa05eb8c --- /dev/null +++ b/queue-4.19/bpf-test_verifier-bpf_get_stack-return-value-add-0.patch @@ -0,0 +1,138 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:57 +0300 +Subject: bpf: Test_verifier, bpf_get_stack return value add <0 +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-7-ovidiu.panait@windriver.com> + +From: John Fastabend + +commit 9ac26e9973bac5716a2a542e32f380c84db2b88c upstream. + +With current ALU32 subreg handling and retval refine fix from last +patches we see an expected failure in test_verifier. With verbose +verifier state being printed at each step for clarity we have the +following relavent lines [I omit register states that are not +necessarily useful to see failure cause], + +#101/p bpf_get_stack return R0 within range FAIL +Failed to load prog 'Success'! +[..] +14: (85) call bpf_get_stack#67 + R0_w=map_value(id=0,off=0,ks=8,vs=48,imm=0) + R3_w=inv48 +15: + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) +15: (b7) r1 = 0 +16: + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 +16: (bf) r8 = r0 +17: + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 + R8_w=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) +17: (67) r8 <<= 32 +18: + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 + R8_w=inv(id=0,smax_value=9223372032559808512, + umax_value=18446744069414584320, + var_off=(0x0; 0xffffffff00000000), + s32_min_value=0, + s32_max_value=0, + u32_max_value=0, + var32_off=(0x0; 0x0)) +18: (c7) r8 s>>= 32 +19 + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 + R8_w=inv(id=0,smin_value=-2147483648, + smax_value=2147483647, + var32_off=(0x0; 0xffffffff)) +19: (cd) if r1 s< r8 goto pc+16 + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 + R8_w=inv(id=0,smin_value=-2147483648, + smax_value=0, + var32_off=(0x0; 0xffffffff)) +20: + R0=inv(id=0,smax_value=48,var32_off=(0x0; 0xffffffff)) + R1_w=inv0 + R8_w=inv(id=0,smin_value=-2147483648, + smax_value=0, + R9=inv48 +20: (1f) r9 -= r8 +21: (bf) r2 = r7 +22: + R2_w=map_value(id=0,off=0,ks=8,vs=48,imm=0) +22: (0f) r2 += r8 +value -2147483648 makes map_value pointer be out of bounds + +After call bpf_get_stack() on line 14 and some moves we have at line 16 +an r8 bound with max_value 48 but an unknown min value. This is to be +expected bpf_get_stack call can only return a max of the input size but +is free to return any negative error in the 32-bit register space. The +C helper is returning an int so will use lower 32-bits. + +Lines 17 and 18 clear the top 32 bits with a left/right shift but use +ARSH so we still have worst case min bound before line 19 of -2147483648. +At this point the signed check 'r1 s< r8' meant to protect the addition +on line 22 where dst reg is a map_value pointer may very well return +true with a large negative number. Then the final line 22 will detect +this as an invalid operation and fail the program. What we want to do +is proceed only if r8 is positive non-error. So change 'r1 s< r8' to +'r1 s> r8' so that we jump if r8 is negative. + +Next we will throw an error because we access past the end of the map +value. The map value size is 48 and sizeof(struct test_val) is 48 so +we walk off the end of the map value on the second call to +get bpf_get_stack(). Fix this by changing sizeof(struct test_val) to +24 by using 'sizeof(struct test_val) / 2'. After this everything passes +as expected. + +Signed-off-by: John Fastabend +Signed-off-by: Alexei Starovoitov +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/158560426019.10843.3285429543232025187.stgit@john-Precision-5820-Tower +Signed-off-by: Greg Kroah-Hartman +[OP: backport to 4.19] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -12253,17 +12253,17 @@ static struct bpf_test tests[] = { + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), +- BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), +- BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2), + BPF_MOV64_IMM(BPF_REG_4, 256), + BPF_EMIT_CALL(BPF_FUNC_get_stack), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), +- BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16), ++ BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16), + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), +@@ -12273,7 +12273,7 @@ static struct bpf_test tests[] = { + BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), +- BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)), ++ BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), diff --git a/queue-4.19/bpf-test_verifier-switch-bpf_get_stack-s-0-s-r8-test.patch b/queue-4.19/bpf-test_verifier-switch-bpf_get_stack-s-0-s-r8-test.patch new file mode 100644 index 00000000000..56991f0d03b --- /dev/null +++ b/queue-4.19/bpf-test_verifier-switch-bpf_get_stack-s-0-s-r8-test.patch @@ -0,0 +1,68 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:58 +0300 +Subject: bpf, test_verifier: switch bpf_get_stack's 0 s> r8 test +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-8-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +[ no upstream commit ] + +Switch the comparison, so that is_branch_taken() will recognize that below +branch is never taken: + + [...] + 17: [...] R1_w=inv0 [...] R8_w=inv(id=0,smin_value=-2147483648,smax_value=-1,umin_value=18446744071562067968,var_off=(0xffffffff80000000; 0x7fffffff)) [...] + 17: (67) r8 <<= 32 + 18: [...] R8_w=inv(id=0,smax_value=-4294967296,umin_value=9223372036854775808,umax_value=18446744069414584320,var_off=(0x8000000000000000; 0x7fffffff00000000)) [...] + 18: (c7) r8 s>>= 32 + 19: [...] R8_w=inv(id=0,smin_value=-2147483648,smax_value=-1,umin_value=18446744071562067968,var_off=(0xffffffff80000000; 0x7fffffff)) [...] + 19: (6d) if r1 s> r8 goto pc+16 + [...] R1_w=inv0 [...] R8_w=inv(id=0,smin_value=-2147483648,smax_value=-1,umin_value=18446744071562067968,var_off=(0xffffffff80000000; 0x7fffffff)) [...] + [...] + +Currently we check for is_branch_taken() only if either K is source, or source +is a scalar value that is const. For upstream it would be good to extend this +properly to check whether dst is const and src not. + +For the sake of the test_verifier, it is probably not needed here: + + # ./test_verifier 101 + #101/p bpf_get_stack return R0 within range OK + Summary: 1 PASSED, 0 SKIPPED, 0 FAILED + +I haven't seen this issue in test_progs* though, they are passing fine: + + # ./test_progs-no_alu32 -t get_stack + Switching to flavor 'no_alu32' subdirectory... + #20 get_stack_raw_tp:OK + Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED + + # ./test_progs -t get_stack + #20 get_stack_raw_tp:OK + Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED + +Signed-off-by: Daniel Borkmann +Acked-by: Alexei Starovoitov +Acked-by: John Fastabend +Signed-off-by: Greg Kroah-Hartman +[OP: backport to 4.19] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -12263,7 +12263,7 @@ static struct bpf_test tests[] = { + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), +- BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16), ++ BPF_JMP_REG(BPF_JSLT, BPF_REG_8, BPF_REG_1, 16), + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), diff --git a/queue-4.19/bpf-tighten-speculative-pointer-arithmetic-mask.patch b/queue-4.19/bpf-tighten-speculative-pointer-arithmetic-mask.patch new file mode 100644 index 00000000000..7507bb82a64 --- /dev/null +++ b/queue-4.19/bpf-tighten-speculative-pointer-arithmetic-mask.patch @@ -0,0 +1,201 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:05 +0300 +Subject: bpf: Tighten speculative pointer arithmetic mask +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-15-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 7fedb63a8307dda0ec3b8969a3b233a1dd7ea8e0 upstream. + +This work tightens the offset mask we use for unprivileged pointer arithmetic +in order to mitigate a corner case reported by Piotr and Benedict where in +the speculative domain it is possible to advance, for example, the map value +pointer by up to value_size-1 out-of-bounds in order to leak kernel memory +via side-channel to user space. + +Before this change, the computed ptr_limit for retrieve_ptr_limit() helper +represents largest valid distance when moving pointer to the right or left +which is then fed as aux->alu_limit to generate masking instructions against +the offset register. After the change, the derived aux->alu_limit represents +the largest potential value of the offset register which we mask against which +is just a narrower subset of the former limit. + +For minimal complexity, we call sanitize_ptr_alu() from 2 observation points +in adjust_ptr_min_max_vals(), that is, before and after the simulated alu +operation. In the first step, we retieve the alu_state and alu_limit before +the operation as well as we branch-off a verifier path and push it to the +verification stack as we did before which checks the dst_reg under truncation, +in other words, when the speculative domain would attempt to move the pointer +out-of-bounds. + +In the second step, we retrieve the new alu_limit and calculate the absolute +distance between both. Moreover, we commit the alu_state and final alu_limit +via update_alu_sanitation_state() to the env's instruction aux data, and bail +out from there if there is a mismatch due to coming from different verification +paths with different states. + +Reported-by: Piotr Krysiuk +Reported-by: Benedict Schlueter +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +Tested-by: Benedict Schlueter +[fllinden@amazon.com: backported to 5.4] +Signed-off-by: Frank van der Linden +Signed-off-by: Greg Kroah-Hartman +[OP: backport to 4.19] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 70 +++++++++++++++++++++++++++++++------------------- + 1 file changed, 44 insertions(+), 26 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2744,7 +2744,7 @@ static int retrieve_ptr_limit(const stru + bool off_is_neg = off_reg->smin_value < 0; + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || + (opcode == BPF_SUB && !off_is_neg); +- u32 off, max = 0, ptr_limit = 0; ++ u32 max = 0, ptr_limit = 0; + + if (!tnum_is_const(off_reg->var_off) && + (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) +@@ -2753,23 +2753,18 @@ static int retrieve_ptr_limit(const stru + switch (ptr_reg->type) { + case PTR_TO_STACK: + /* Offset 0 is out-of-bounds, but acceptable start for the +- * left direction, see BPF_REG_FP. ++ * left direction, see BPF_REG_FP. Also, unknown scalar ++ * offset where we would need to deal with min/max bounds is ++ * currently prohibited for unprivileged. + */ + max = MAX_BPF_STACK + mask_to_left; +- off = ptr_reg->off + ptr_reg->var_off.value; +- if (mask_to_left) +- ptr_limit = MAX_BPF_STACK + off; +- else +- ptr_limit = -off - 1; ++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); + break; + case PTR_TO_MAP_VALUE: + max = ptr_reg->map_ptr->value_size; +- if (mask_to_left) { +- ptr_limit = ptr_reg->umax_value + ptr_reg->off; +- } else { +- off = ptr_reg->smin_value + ptr_reg->off; +- ptr_limit = ptr_reg->map_ptr->value_size - off - 1; +- } ++ ptr_limit = (mask_to_left ? ++ ptr_reg->smin_value : ++ ptr_reg->umax_value) + ptr_reg->off; + break; + default: + return REASON_TYPE; +@@ -2824,10 +2819,12 @@ static int sanitize_ptr_alu(struct bpf_v + struct bpf_insn *insn, + const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg, +- struct bpf_reg_state *dst_reg) ++ struct bpf_reg_state *dst_reg, ++ struct bpf_insn_aux_data *tmp_aux, ++ const bool commit_window) + { ++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux; + struct bpf_verifier_state *vstate = env->cur_state; +- struct bpf_insn_aux_data *aux = cur_aux(env); + bool off_is_neg = off_reg->smin_value < 0; + bool ptr_is_dst_reg = ptr_reg == dst_reg; + u8 opcode = BPF_OP(insn->code); +@@ -2846,18 +2843,33 @@ static int sanitize_ptr_alu(struct bpf_v + if (vstate->speculative) + goto do_sim; + +- alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; +- alu_state |= ptr_is_dst_reg ? +- BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; +- + err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode); + if (err < 0) + return err; + ++ if (commit_window) { ++ /* In commit phase we narrow the masking window based on ++ * the observed pointer move after the simulated operation. ++ */ ++ alu_state = tmp_aux->alu_state; ++ alu_limit = abs(tmp_aux->alu_limit - alu_limit); ++ } else { ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; ++ alu_state |= ptr_is_dst_reg ? ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; ++ } ++ + err = update_alu_sanitation_state(aux, alu_state, alu_limit); + if (err < 0) + return err; + do_sim: ++ /* If we're in commit phase, we're done here given we already ++ * pushed the truncated dst_reg into the speculative verification ++ * stack. ++ */ ++ if (commit_window) ++ return 0; ++ + /* Simulate and find potential out-of-bounds access under + * speculative execution from truncation as a result of + * masking when off was not within expected range. If off +@@ -2969,6 +2981,7 @@ static int adjust_ptr_min_max_vals(struc + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; ++ struct bpf_insn_aux_data tmp_aux = {}; + u8 opcode = BPF_OP(insn->code); + u32 dst = insn->dst_reg; + int ret; +@@ -3018,12 +3031,15 @@ static int adjust_ptr_min_max_vals(struc + !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) + return -EINVAL; + +- switch (opcode) { +- case BPF_ADD: +- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); ++ if (sanitize_needed(opcode)) { ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, ++ &tmp_aux, false); + if (ret < 0) + return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ } + ++ switch (opcode) { ++ case BPF_ADD: + /* We can take a fixed offset as long as it doesn't overflow + * the s32 'off' field + */ +@@ -3074,10 +3090,6 @@ static int adjust_ptr_min_max_vals(struc + } + break; + case BPF_SUB: +- ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg); +- if (ret < 0) +- return sanitize_err(env, insn, ret, off_reg, dst_reg); +- + if (dst_reg == off_reg) { + /* scalar -= pointer. Creates an unknown scalar */ + verbose(env, "R%d tried to subtract pointer from scalar\n", +@@ -3160,6 +3172,12 @@ static int adjust_ptr_min_max_vals(struc + + if (sanitize_check_bounds(env, insn, dst_reg) < 0) + return -EACCES; ++ if (sanitize_needed(opcode)) { ++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, ++ &tmp_aux, true); ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ } + + return 0; + } diff --git a/queue-4.19/bpf-update-selftests-to-reflect-new-error-states.patch b/queue-4.19/bpf-update-selftests-to-reflect-new-error-states.patch new file mode 100644 index 00000000000..5b6cf929b51 --- /dev/null +++ b/queue-4.19/bpf-update-selftests-to-reflect-new-error-states.patch @@ -0,0 +1,239 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:06 +0300 +Subject: bpf: Update selftests to reflect new error states +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-16-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit d7a5091351756d0ae8e63134313c455624e36a13 upstream + +Update various selftest error messages: + + * The 'Rx tried to sub from different maps, paths, or prohibited types' + is reworked into more specific/differentiated error messages for better + guidance. + + * The change into 'value -4294967168 makes map_value pointer be out of + bounds' is due to moving the mixed bounds check into the speculation + handling and thus occuring slightly later than above mentioned sanity + check. + + * The change into 'math between map_value pointer and register with + unbounded min value' is similarly due to register sanity check coming + before the mixed bounds check. + + * The case of 'map access: known scalar += value_ptr from different maps' + now loads fine given masks are the same from the different paths (despite + max map value size being different). + +Signed-off-by: Daniel Borkmann +Reviewed-by: John Fastabend +Acked-by: Alexei Starovoitov +[OP: 4.19 backport, account for split test_verifier and +different / missing tests] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 35 +++++++++------------------- + 1 file changed, 12 insertions(+), 23 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -2873,7 +2873,7 @@ static struct bpf_test tests[] = { + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, + }, +@@ -7501,7 +7501,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7526,7 +7525,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7553,7 +7551,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7579,7 +7576,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7628,7 +7624,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7700,7 +7695,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7752,7 +7746,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7780,7 +7773,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7807,7 +7799,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7837,7 +7828,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7868,7 +7858,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 4 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, + { +@@ -7897,7 +7886,6 @@ static struct bpf_test tests[] = { + }, + .fixup_map1 = { 3 }, + .errstr = "unbounded min value", +- .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + .result_unpriv = REJECT, + }, +@@ -9799,7 +9787,7 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, + }, +@@ -9814,7 +9802,7 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, +@@ -9827,22 +9815,23 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, + }, + { + "check deducing bounds from const, 4", + .insns = { ++ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), +- BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R6 has pointer with unsupported alu operation", + .result_unpriv = REJECT, + .result = ACCEPT, + }, +@@ -9854,7 +9843,7 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, + }, +@@ -9867,7 +9856,7 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, + }, +@@ -9881,7 +9870,7 @@ static struct bpf_test tests[] = { + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "dereference of modified ctx ptr", + .result = REJECT, + }, +@@ -9895,7 +9884,7 @@ static struct bpf_test tests[] = { + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "dereference of modified ctx ptr", + .result = REJECT, + }, +@@ -9907,7 +9896,7 @@ static struct bpf_test tests[] = { + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", ++ .errstr_unpriv = "R1 has pointer with unsupported alu operation", + .errstr = "R0 tried to subtract pointer from scalar", + .result = REJECT, + }, diff --git a/queue-4.19/bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch b/queue-4.19/bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch new file mode 100644 index 00000000000..2b7c1c88b01 --- /dev/null +++ b/queue-4.19/bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch @@ -0,0 +1,88 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:38:08 +0300 +Subject: bpf: Wrap aux data inside bpf_sanitize_info container +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-18-ovidiu.panait@windriver.com> + +From: Daniel Borkmann + +commit 3d0220f6861d713213b015b582e9f21e5b28d2e0 upstream + +Add a container structure struct bpf_sanitize_info which holds +the current aux info, and update call-sites to sanitize_ptr_alu() +to pass it in. This is needed for passing in additional state +later on. + +Signed-off-by: Daniel Borkmann +Reviewed-by: Piotr Krysiuk +Acked-by: Alexei Starovoitov +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 18 +++++++++++------- + 1 file changed, 11 insertions(+), 7 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2815,15 +2815,19 @@ static bool sanitize_needed(u8 opcode) + return opcode == BPF_ADD || opcode == BPF_SUB; + } + ++struct bpf_sanitize_info { ++ struct bpf_insn_aux_data aux; ++}; ++ + static int sanitize_ptr_alu(struct bpf_verifier_env *env, + struct bpf_insn *insn, + const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg, + struct bpf_reg_state *dst_reg, +- struct bpf_insn_aux_data *tmp_aux, ++ struct bpf_sanitize_info *info, + const bool commit_window) + { +- struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux; ++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; + struct bpf_verifier_state *vstate = env->cur_state; + bool off_is_imm = tnum_is_const(off_reg->var_off); + bool off_is_neg = off_reg->smin_value < 0; +@@ -2852,8 +2856,8 @@ static int sanitize_ptr_alu(struct bpf_v + /* In commit phase we narrow the masking window based on + * the observed pointer move after the simulated operation. + */ +- alu_state = tmp_aux->alu_state; +- alu_limit = abs(tmp_aux->alu_limit - alu_limit); ++ alu_state = info->aux.alu_state; ++ alu_limit = abs(info->aux.alu_limit - alu_limit); + } else { + alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; + alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; +@@ -2983,7 +2987,7 @@ static int adjust_ptr_min_max_vals(struc + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; +- struct bpf_insn_aux_data tmp_aux = {}; ++ struct bpf_sanitize_info info = {}; + u8 opcode = BPF_OP(insn->code); + u32 dst = insn->dst_reg; + int ret; +@@ -3035,7 +3039,7 @@ static int adjust_ptr_min_max_vals(struc + + if (sanitize_needed(opcode)) { + ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, +- &tmp_aux, false); ++ &info, false); + if (ret < 0) + return sanitize_err(env, insn, ret, off_reg, dst_reg); + } +@@ -3176,7 +3180,7 @@ static int adjust_ptr_min_max_vals(struc + return -EACCES; + if (sanitize_needed(opcode)) { + ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, +- &tmp_aux, true); ++ &info, true); + if (ret < 0) + return sanitize_err(env, insn, ret, off_reg, dst_reg); + } diff --git a/queue-4.19/net-usb-fix-memory-leak-in-smsc75xx_bind.patch b/queue-4.19/net-usb-fix-memory-leak-in-smsc75xx_bind.patch new file mode 100644 index 00000000000..977c70f886e --- /dev/null +++ b/queue-4.19/net-usb-fix-memory-leak-in-smsc75xx_bind.patch @@ -0,0 +1,60 @@ +From 46a8b29c6306d8bbfd92b614ef65a47c900d8e70 Mon Sep 17 00:00:00 2001 +From: Pavel Skripkin +Date: Mon, 24 May 2021 23:02:08 +0300 +Subject: net: usb: fix memory leak in smsc75xx_bind + +From: Pavel Skripkin + +commit 46a8b29c6306d8bbfd92b614ef65a47c900d8e70 upstream. + +Syzbot reported memory leak in smsc75xx_bind(). +The problem was is non-freed memory in case of +errors after memory allocation. + +backtrace: + [] kmalloc include/linux/slab.h:556 [inline] + [] kzalloc include/linux/slab.h:686 [inline] + [] smsc75xx_bind+0x7a/0x334 drivers/net/usb/smsc75xx.c:1460 + [] usbnet_probe+0x3b6/0xc30 drivers/net/usb/usbnet.c:1728 + +Fixes: d0cad871703b ("smsc75xx: SMSC LAN75xx USB gigabit ethernet adapter driver") +Cc: stable@kernel.vger.org +Reported-and-tested-by: syzbot+b558506ba8165425fee2@syzkaller.appspotmail.com +Signed-off-by: Pavel Skripkin +Signed-off-by: David S. Miller +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/usb/smsc75xx.c | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +--- a/drivers/net/usb/smsc75xx.c ++++ b/drivers/net/usb/smsc75xx.c +@@ -1495,7 +1495,7 @@ static int smsc75xx_bind(struct usbnet * + ret = smsc75xx_wait_ready(dev, 0); + if (ret < 0) { + netdev_warn(dev->net, "device not ready in smsc75xx_bind\n"); +- return ret; ++ goto err; + } + + smsc75xx_init_mac_address(dev); +@@ -1504,7 +1504,7 @@ static int smsc75xx_bind(struct usbnet * + ret = smsc75xx_reset(dev); + if (ret < 0) { + netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret); +- return ret; ++ goto err; + } + + dev->net->netdev_ops = &smsc75xx_netdev_ops; +@@ -1514,6 +1514,10 @@ static int smsc75xx_bind(struct usbnet * + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; + dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE; + return 0; ++ ++err: ++ kfree(pdata); ++ return ret; + } + + static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf) diff --git a/queue-4.19/selftests-bpf-add-selftest-part-of-bpf-improve-verifier-branch-analysis.patch b/queue-4.19/selftests-bpf-add-selftest-part-of-bpf-improve-verifier-branch-analysis.patch new file mode 100644 index 00000000000..881f8c68d23 --- /dev/null +++ b/queue-4.19/selftests-bpf-add-selftest-part-of-bpf-improve-verifier-branch-analysis.patch @@ -0,0 +1,56 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:55 +0300 +Subject: selftests/bpf: add selftest part of "bpf: improve verifier branch analysis" +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-5-ovidiu.panait@windriver.com> + +From: Ovidiu Panait + +Backport the missing selftest part of commit 7da6cd690c43 ("bpf: improve +verifier branch analysis") in order to fix the following test_verifier +failures: + +... +Unexpected success to load! +0: (b7) r0 = 0 +1: (75) if r0 s>= 0x0 goto pc+1 +3: (95) exit +processed 3 insns (limit 131072), stack depth 0 +Unexpected success to load! +0: (b7) r0 = 0 +1: (75) if r0 s>= 0x0 goto pc+1 +3: (95) exit +processed 3 insns (limit 131072), stack depth 0 +... + +The changesets apply with a minor context difference. + +Fixes: 7da6cd690c43 ("bpf: improve verifier branch analysis") +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -7867,7 +7867,7 @@ static struct bpf_test tests[] = { + BPF_JMP_IMM(BPF_JA, 0, 0, -7), + }, + .fixup_map1 = { 4 }, +- .errstr = "R0 invalid mem access 'inv'", ++ .errstr = "unbounded min value", + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", + .result = REJECT, + }, +@@ -9850,7 +9850,7 @@ static struct bpf_test tests[] = { + "check deducing bounds from const, 5", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), +- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), ++ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, diff --git a/queue-4.19/selftests-bpf-test-narrow-loads-with-off-0-in-test_verifier.patch b/queue-4.19/selftests-bpf-test-narrow-loads-with-off-0-in-test_verifier.patch new file mode 100644 index 00000000000..ea600810abf --- /dev/null +++ b/queue-4.19/selftests-bpf-test-narrow-loads-with-off-0-in-test_verifier.patch @@ -0,0 +1,123 @@ +From foo@baz Sun May 30 02:20:56 PM CEST 2021 +From: Ovidiu Panait +Date: Fri, 28 May 2021 13:37:54 +0300 +Subject: selftests/bpf: Test narrow loads with off > 0 in test_verifier +To: stable@vger.kernel.org +Cc: fllinden@amazon.com, bpf@vger.kernel.org, ast@kernel.org, daniel@iogearbox.net, yhs@fb.com, john.fastabend@gmail.com, samjonas@amazon.com +Message-ID: <20210528103810.22025-4-ovidiu.panait@windriver.com> + +From: Andrey Ignatov + +commit 6c2afb674dbda9b736b8f09c976516e1e788860a upstream + +Test the following narrow loads in test_verifier for context __sk_buff: +* off=1, size=1 - ok; +* off=2, size=1 - ok; +* off=3, size=1 - ok; +* off=0, size=2 - ok; +* off=1, size=2 - fail; +* off=0, size=2 - ok; +* off=3, size=2 - fail. + +Signed-off-by: Andrey Ignatov +Signed-off-by: Alexei Starovoitov +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 48 ++++++++++++++++++++++------ + 1 file changed, 38 insertions(+), 10 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -2002,29 +2002,27 @@ static struct bpf_test tests[] = { + .result = ACCEPT, + }, + { +- "check skb->hash byte load not permitted 1", ++ "check skb->hash byte load permitted 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 1), + BPF_EXIT_INSN(), + }, +- .errstr = "invalid bpf_context access", +- .result = REJECT, ++ .result = ACCEPT, + }, + { +- "check skb->hash byte load not permitted 2", ++ "check skb->hash byte load permitted 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, hash) + 2), + BPF_EXIT_INSN(), + }, +- .errstr = "invalid bpf_context access", +- .result = REJECT, ++ .result = ACCEPT, + }, + { +- "check skb->hash byte load not permitted 3", ++ "check skb->hash byte load permitted 3", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + #if __BYTE_ORDER == __LITTLE_ENDIAN +@@ -2036,8 +2034,7 @@ static struct bpf_test tests[] = { + #endif + BPF_EXIT_INSN(), + }, +- .errstr = "invalid bpf_context access", +- .result = REJECT, ++ .result = ACCEPT, + }, + { + "check cb access: byte, wrong type", +@@ -2149,7 +2146,7 @@ static struct bpf_test tests[] = { + .result = ACCEPT, + }, + { +- "check skb->hash half load not permitted", ++ "check skb->hash half load permitted 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + #if __BYTE_ORDER == __LITTLE_ENDIAN +@@ -2161,6 +2158,37 @@ static struct bpf_test tests[] = { + #endif + BPF_EXIT_INSN(), + }, ++ .result = ACCEPT, ++ }, ++ { ++ "check skb->hash half load not permitted, unaligned 1", ++ .insns = { ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, ++ offsetof(struct __sk_buff, hash) + 1), ++#else ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, ++ offsetof(struct __sk_buff, hash) + 3), ++#endif ++ BPF_EXIT_INSN(), ++ }, ++ .errstr = "invalid bpf_context access", ++ .result = REJECT, ++ }, ++ { ++ "check skb->hash half load not permitted, unaligned 3", ++ .insns = { ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, ++ offsetof(struct __sk_buff, hash) + 3), ++#else ++ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, ++ offsetof(struct __sk_buff, hash) + 1), ++#endif ++ BPF_EXIT_INSN(), ++ }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, diff --git a/queue-4.19/series b/queue-4.19/series index e04fb47b702..f5f5b510fe7 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -36,3 +36,26 @@ usb-serial-ftdi_sio-add-ids-for-ids-gmbh-products.patch usb-serial-pl2303-add-device-id-for-adlink-nd-6530-gc.patch usb-dwc3-gadget-properly-track-pending-and-queued-sg.patch usb-gadget-udc-renesas_usb3-fix-a-race-in-usb3_start_pipen.patch +net-usb-fix-memory-leak-in-smsc75xx_bind.patch +bpf-fix-up-selftests-after-backports-were-fixed.patch +bpf-selftests-fix-up-some-test_verifier-cases-for-unprivileged.patch +selftests-bpf-test-narrow-loads-with-off-0-in-test_verifier.patch +selftests-bpf-add-selftest-part-of-bpf-improve-verifier-branch-analysis.patch +bpf-extend-is_branch_taken-to-registers.patch +bpf-test_verifier-bpf_get_stack-return-value-add-0.patch +bpf-test_verifier-switch-bpf_get_stack-s-0-s-r8-test.patch +bpf-move-off_reg-into-sanitize_ptr_alu.patch +bpf-ensure-off_reg-has-no-mixed-signed-bounds-for-all-types.patch +bpf-rework-ptr_limit-into-alu_limit-and-add-common-error-path.patch +bpf-improve-verifier-error-messages-for-users.patch +bpf-refactor-and-streamline-bounds-check-into-helper.patch +bpf-move-sanitize_val_alu-out-of-op-switch.patch +bpf-tighten-speculative-pointer-arithmetic-mask.patch +bpf-update-selftests-to-reflect-new-error-states.patch +bpf-fix-leakage-of-uninitialized-bpf-stack-under-speculation.patch +bpf-wrap-aux-data-inside-bpf_sanitize_info-container.patch +bpf-fix-mask-direction-swap-upon-off-reg-sign-change.patch +bpf-no-need-to-simulate-speculative-domain-for-immediates.patch +spi-gpio-don-t-leak-spi-master-in-probe-error-path.patch +spi-mt7621-disable-clock-in-probe-error-path.patch +spi-mt7621-don-t-leak-spi-master-in-probe-error-path.patch diff --git a/queue-4.19/spi-gpio-don-t-leak-spi-master-in-probe-error-path.patch b/queue-4.19/spi-gpio-don-t-leak-spi-master-in-probe-error-path.patch new file mode 100644 index 00000000000..b3cd7c08b17 --- /dev/null +++ b/queue-4.19/spi-gpio-don-t-leak-spi-master-in-probe-error-path.patch @@ -0,0 +1,78 @@ +From 7174dc655ef0578877b0b4598e69619d2be28b4d Mon Sep 17 00:00:00 2001 +From: Lukas Wunner +Date: Mon, 7 Dec 2020 09:17:09 +0100 +Subject: spi: gpio: Don't leak SPI master in probe error path + +From: Lukas Wunner + +commit 7174dc655ef0578877b0b4598e69619d2be28b4d upstream. + +If the call to devm_spi_register_master() fails on probe of the GPIO SPI +driver, the spi_master struct is erroneously not freed: + +After allocating the spi_master, its reference count is 1. The driver +unconditionally decrements the reference count on unbind using a devm +action. Before calling devm_spi_register_master(), the driver +unconditionally increments the reference count because on success, +that function will decrement the reference count on unbind. However on +failure, devm_spi_register_master() does *not* decrement the reference +count, so the spi_master is leaked. + +The issue was introduced by commits 8b797490b4db ("spi: gpio: Make sure +spi_master_put() is called in every error path") and 79567c1a321e ("spi: +gpio: Use devm_spi_register_master()"), which sought to plug leaks +introduced by 9b00bc7b901f ("spi: spi-gpio: Rewrite to use GPIO +descriptors") but missed this remaining leak. + +The situation was later aggravated by commit d3b0ffa1d75d ("spi: gpio: +prevent memory leak in spi_gpio_probe"), which introduced a +use-after-free because it releases a reference on the spi_master if +devm_add_action_or_reset() fails even though the function already +does that. + +Fix by switching over to the new devm_spi_alloc_master() helper. + +Fixes: 9b00bc7b901f ("spi: spi-gpio: Rewrite to use GPIO descriptors") +Signed-off-by: Lukas Wunner +Reviewed-by: Linus Walleij +Cc: # v4.17+: 5e844cc37a5c: spi: Introduce device-managed SPI controller allocation +Cc: # v5.1-: 8b797490b4db: spi: gpio: Make sure spi_master_put() is called in every error path +Cc: # v5.1-: 45beec351998: spi: bitbang: Introduce spi_bitbang_init() +Cc: # v5.1-: 79567c1a321e: spi: gpio: Use devm_spi_register_master() +Cc: # v5.4-: d3b0ffa1d75d: spi: gpio: prevent memory leak in spi_gpio_probe +Cc: # v4.17+ +Cc: Navid Emamdoost +Cc: Andrey Smirnov +Link: https://lore.kernel.org/r/86eaed27431c3d709e3748eb76ceecbfc790dd37.1607286887.git.lukas@wunner.de +Signed-off-by: Mark Brown +[lukas: backport to v4.19.192] +Signed-off-by: Lukas Wunner +Signed-off-by: Greg Kroah-Hartman +--- + drivers/spi/spi-gpio.c | 8 ++------ + 1 file changed, 2 insertions(+), 6 deletions(-) + +--- a/drivers/spi/spi-gpio.c ++++ b/drivers/spi/spi-gpio.c +@@ -382,7 +382,7 @@ static int spi_gpio_probe(struct platfor + return -ENODEV; + #endif + +- master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*spi_gpio)); + if (!master) + return -ENOMEM; + +@@ -438,11 +438,7 @@ static int spi_gpio_probe(struct platfor + } + spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; + +- status = spi_bitbang_start(&spi_gpio->bitbang); +- if (status) +- spi_master_put(master); +- +- return status; ++ return spi_bitbang_start(&spi_gpio->bitbang); + } + + static int spi_gpio_remove(struct platform_device *pdev) diff --git a/queue-4.19/spi-mt7621-disable-clock-in-probe-error-path.patch b/queue-4.19/spi-mt7621-disable-clock-in-probe-error-path.patch new file mode 100644 index 00000000000..6bab5ae9a22 --- /dev/null +++ b/queue-4.19/spi-mt7621-disable-clock-in-probe-error-path.patch @@ -0,0 +1,48 @@ +From 24f7033405abe195224ec793dbc3d7a27dec0b98 Mon Sep 17 00:00:00 2001 +From: Lukas Wunner +Date: Mon, 7 Dec 2020 09:17:13 +0100 +Subject: spi: mt7621: Disable clock in probe error path + +From: Lukas Wunner + +commit 24f7033405abe195224ec793dbc3d7a27dec0b98 upstream. + +Commit 702b15cb9712 ("spi: mt7621: fix missing clk_disable_unprepare() +on error in mt7621_spi_probe") sought to disable the SYS clock on probe +errors, but only did so for 2 of 3 potentially failing calls: The clock +needs to be disabled on failure of devm_spi_register_controller() as +well. + +Moreover, the commit purports to fix a bug in commit cbd66c626e16 ("spi: +mt7621: Move SPI driver out of staging") but in reality the bug has +existed since the driver was first introduced. + +Fixes: 1ab7f2a43558 ("staging: mt7621-spi: add mt7621 support") +Signed-off-by: Lukas Wunner +Cc: # v4.17+: 702b15cb9712: spi: mt7621: fix missing clk_disable_unprepare() on error in mt7621_spi_probe +Cc: # v4.17+ +Cc: Qinglang Miao +Link: https://lore.kernel.org/r/36ad42760087952fb7c10aae7d2628547c26a7ec.1607286887.git.lukas@wunner.de +Signed-off-by: Mark Brown +[lukas: backport to v4.19.192] +Signed-off-by: Lukas Wunner +Signed-off-by: Greg Kroah-Hartman +--- + drivers/staging/mt7621-spi/spi-mt7621.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/staging/mt7621-spi/spi-mt7621.c ++++ b/drivers/staging/mt7621-spi/spi-mt7621.c +@@ -487,7 +487,11 @@ static int mt7621_spi_probe(struct platf + + mt7621_spi_reset(rs, 0); + +- return spi_register_master(master); ++ ret = spi_register_master(master); ++ if (ret) ++ clk_disable_unprepare(clk); ++ ++ return ret; + } + + static int mt7621_spi_remove(struct platform_device *pdev) diff --git a/queue-4.19/spi-mt7621-don-t-leak-spi-master-in-probe-error-path.patch b/queue-4.19/spi-mt7621-don-t-leak-spi-master-in-probe-error-path.patch new file mode 100644 index 00000000000..087c047b4dc --- /dev/null +++ b/queue-4.19/spi-mt7621-don-t-leak-spi-master-in-probe-error-path.patch @@ -0,0 +1,58 @@ +From 46b5c4fb87ce8211e0f9b0383dbde72c3652d2ba Mon Sep 17 00:00:00 2001 +From: Lukas Wunner +Date: Mon, 7 Dec 2020 09:17:14 +0100 +Subject: spi: mt7621: Don't leak SPI master in probe error path + +From: Lukas Wunner + +commit 46b5c4fb87ce8211e0f9b0383dbde72c3652d2ba upstream. + +If the calls to device_reset() or devm_spi_register_controller() fail on +probe of the MediaTek MT7621 SPI driver, the spi_controller struct is +erroneously not freed. Fix by switching over to the new +devm_spi_alloc_master() helper. + +Additionally, there's an ordering issue in mt7621_spi_remove() wherein +the spi_controller is unregistered after disabling the SYS clock. +The correct order is to call spi_unregister_controller() *before* this +teardown step because bus accesses may still be ongoing until that +function returns. + +All of these bugs have existed since the driver was first introduced, +so it seems fair to fix them together in a single commit. + +Fixes: 1ab7f2a43558 ("staging: mt7621-spi: add mt7621 support") +Signed-off-by: Lukas Wunner +Reviewed-by: Stefan Roese +Cc: # v4.17+: 5e844cc37a5c: spi: Introduce device-managed SPI controller allocation +Cc: # v4.17+ +Link: https://lore.kernel.org/r/72b680796149f5fcda0b3f530ffb7ee73b04f224.1607286887.git.lukas@wunner.de +Signed-off-by: Mark Brown +[lukas: backport to v4.19.192] +Signed-off-by: Lukas Wunner +Signed-off-by: Greg Kroah-Hartman +--- + drivers/staging/mt7621-spi/spi-mt7621.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/staging/mt7621-spi/spi-mt7621.c ++++ b/drivers/staging/mt7621-spi/spi-mt7621.c +@@ -452,7 +452,7 @@ static int mt7621_spi_probe(struct platf + if (status) + return status; + +- master = spi_alloc_master(&pdev->dev, sizeof(*rs)); ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs)); + if (master == NULL) { + dev_info(&pdev->dev, "master allocation failed\n"); + clk_disable_unprepare(clk); +@@ -502,8 +502,8 @@ static int mt7621_spi_remove(struct plat + master = dev_get_drvdata(&pdev->dev); + rs = spi_master_get_devdata(master); + +- clk_disable(rs->clk); + spi_unregister_master(master); ++ clk_disable_unprepare(rs->clk); + + return 0; + }