From: Greg Kroah-Hartman Date: Thu, 8 Sep 2022 11:56:31 +0000 (+0200) Subject: 4.14-stable patches X-Git-Tag: v5.19.9~84 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=a209f9da9912340b0555fa46f2aea82bb8c4f889;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: bpf-fix-the-off-by-two-error-in-range-markings.patch bpf-verifer-adjust_scalar_min_max_vals-to-always-call-update_reg_bounds.patch selftests-bpf-fix-test_align-verifier-log-patterns.patch --- diff --git a/queue-4.14/bpf-fix-the-off-by-two-error-in-range-markings.patch b/queue-4.14/bpf-fix-the-off-by-two-error-in-range-markings.patch new file mode 100644 index 00000000000..28fd91abbf3 --- /dev/null +++ b/queue-4.14/bpf-fix-the-off-by-two-error-in-range-markings.patch @@ -0,0 +1,109 @@ +From foo@baz Thu Sep 8 01:55:11 PM CEST 2022 +From: Ovidiu Panait +Date: Tue, 6 Sep 2022 18:38:55 +0300 +Subject: bpf: Fix the off-by-two error in range markings +To: stable@vger.kernel.org +Cc: Maxim Mikityanskiy , Daniel Borkmann , Ovidiu Panait +Message-ID: <20220906153855.2515437-4-ovidiu.panait@windriver.com> + +From: Maxim Mikityanskiy + +commit 2fa7d94afc1afbb4d702760c058dc2d7ed30f226 upstream. + +The first commit cited below attempts to fix the off-by-one error that +appeared in some comparisons with an open range. Due to this error, +arithmetically equivalent pieces of code could get different verdicts +from the verifier, for example (pseudocode): + + // 1. Passes the verifier: + if (data + 8 > data_end) + return early + read *(u64 *)data, i.e. [data; data+7] + + // 2. Rejected by the verifier (should still pass): + if (data + 7 >= data_end) + return early + read *(u64 *)data, i.e. [data; data+7] + +The attempted fix, however, shifts the range by one in a wrong +direction, so the bug not only remains, but also such piece of code +starts failing in the verifier: + + // 3. Rejected by the verifier, but the check is stricter than in #1. + if (data + 8 >= data_end) + return early + read *(u64 *)data, i.e. [data; data+7] + +The change performed by that fix converted an off-by-one bug into +off-by-two. The second commit cited below added the BPF selftests +written to ensure than code chunks like #3 are rejected, however, +they should be accepted. + +This commit fixes the off-by-two error by adjusting new_range in the +right direction and fixes the tests by changing the range into the +one that should actually fail. + +Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns") +Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests") +Signed-off-by: Maxim Mikityanskiy +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com +[OP: only cherry-pick selftest changes applicable to 4.14] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_verifier.c | 16 ++++++++-------- + 1 file changed, 8 insertions(+), 8 deletions(-) + +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -7438,10 +7438,10 @@ static struct bpf_test tests[] = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +@@ -7494,10 +7494,10 @@ static struct bpf_test tests[] = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +@@ -7603,9 +7603,9 @@ static struct bpf_test tests[] = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, +@@ -7770,9 +7770,9 @@ static struct bpf_test tests[] = { + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, diff --git a/queue-4.14/bpf-verifer-adjust_scalar_min_max_vals-to-always-call-update_reg_bounds.patch b/queue-4.14/bpf-verifer-adjust_scalar_min_max_vals-to-always-call-update_reg_bounds.patch new file mode 100644 index 00000000000..a6fd847c4ef --- /dev/null +++ b/queue-4.14/bpf-verifer-adjust_scalar_min_max_vals-to-always-call-update_reg_bounds.patch @@ -0,0 +1,52 @@ +From foo@baz Thu Sep 8 01:55:11 PM CEST 2022 +From: Ovidiu Panait +Date: Tue, 6 Sep 2022 18:38:53 +0300 +Subject: bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds() +To: stable@vger.kernel.org +Cc: John Fastabend , Alexei Starovoitov , Ovidiu Panait +Message-ID: <20220906153855.2515437-2-ovidiu.panait@windriver.com> + +From: John Fastabend + +commit 294f2fc6da27620a506e6c050241655459ccd6bd upstream. + +Currently, for all op verification we call __red_deduce_bounds() and +__red_bound_offset() but we only call __update_reg_bounds() in bitwise +ops. However, we could benefit from calling __update_reg_bounds() in +BPF_ADD, BPF_SUB, and BPF_MUL cases as well. + +For example, a register with state 'R1_w=invP0' when we subtract from +it, + + w1 -= 2 + +Before coerce we will now have an smin_value=S64_MIN, smax_value=U64_MAX +and unsigned bounds umin_value=0, umax_value=U64_MAX. These will then +be clamped to S32_MIN, U32_MAX values by coerce in the case of alu32 op +as done in above example. However tnum will be a constant because the +ALU op is done on a constant. + +Without update_reg_bounds() we have a scenario where tnum is a const +but our unsigned bounds do not reflect this. By calling update_reg_bounds +after coerce to 32bit we further refine the umin_value to U64_MAX in the +alu64 case or U32_MAX in the alu32 case above. + +Signed-off-by: John Fastabend +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/158507151689.15666.566796274289413203.stgit@john-Precision-5820-Tower +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + kernel/bpf/verifier.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -2739,6 +2739,7 @@ static int adjust_scalar_min_max_vals(st + coerce_reg_to_size(dst_reg, 4); + } + ++ __update_reg_bounds(dst_reg); + __reg_deduce_bounds(dst_reg); + __reg_bound_offset(dst_reg); + return 0; diff --git a/queue-4.14/selftests-bpf-fix-test_align-verifier-log-patterns.patch b/queue-4.14/selftests-bpf-fix-test_align-verifier-log-patterns.patch new file mode 100644 index 00000000000..d14c70ec59e --- /dev/null +++ b/queue-4.14/selftests-bpf-fix-test_align-verifier-log-patterns.patch @@ -0,0 +1,120 @@ +From foo@baz Thu Sep 8 01:55:11 PM CEST 2022 +From: Ovidiu Panait +Date: Tue, 6 Sep 2022 18:38:54 +0300 +Subject: selftests/bpf: Fix test_align verifier log patterns +To: stable@vger.kernel.org +Cc: Stanislav Fomichev , Daniel Borkmann , Ovidiu Panait +Message-ID: <20220906153855.2515437-3-ovidiu.panait@windriver.com> + +From: Stanislav Fomichev + +commit 5366d2269139ba8eb6a906d73a0819947e3e4e0a upstream. + +Commit 294f2fc6da27 ("bpf: Verifer, adjust_scalar_min_max_vals to always +call update_reg_bounds()") changed the way verifier logs some of its state, +adjust the test_align accordingly. Where possible, I tried to not copy-paste +the entire log line and resorted to dropping the last closing brace instead. + +Fixes: 294f2fc6da27 ("bpf: Verifer, adjust_scalar_min_max_vals to always call update_reg_bounds()") +Signed-off-by: Stanislav Fomichev +Signed-off-by: Daniel Borkmann +Link: https://lore.kernel.org/bpf/20200515194904.229296-1-sdf@google.com +[OP: adjust for 4.14 selftests, apply only the relevant diffs] +Signed-off-by: Ovidiu Panait +Signed-off-by: Greg Kroah-Hartman +--- + tools/testing/selftests/bpf/test_align.c | 27 ++++++++++++++------------- + 1 file changed, 14 insertions(+), 13 deletions(-) + +--- a/tools/testing/selftests/bpf/test_align.c ++++ b/tools/testing/selftests/bpf/test_align.c +@@ -363,15 +363,15 @@ static struct bpf_align_test tests[] = { + * is still (4n), fixed offset is not changed. + * Also, we create a new reg->id. + */ +- {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"}, ++ {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (18) + * which is 20. Then the variable offset is (4n), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ +- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, +- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, ++ {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, ++ {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + }, + }, + { +@@ -414,15 +414,15 @@ static struct bpf_align_test tests[] = { + /* Adding 14 makes R6 be (4n+2) */ + {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* Packet pointer has (4n+2) offset */ +- {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, +- {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, ++ {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, ++ {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ +- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, ++ {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + /* Newly read value in R6 was shifted left by 2, so has + * known alignment of 4. + */ +@@ -430,15 +430,15 @@ static struct bpf_align_test tests[] = { + /* Added (4n) to packet pointer's (4n+2) var_off, giving + * another (4n+2). + */ +- {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, +- {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, ++ {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, ++ {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ +- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, ++ {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + }, + }, + { +@@ -473,11 +473,11 @@ static struct bpf_align_test tests[] = { + .matches = { + {4, "R5=pkt_end(id=0,off=0,imm=0)"}, + /* (ptr - ptr) << 2 == unknown, (4n) */ +- {6, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"}, ++ {6, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, + /* (4n) + 14 == (4n+2). We blow our bounds, because + * the add could overflow. + */ +- {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, ++ {7, "R5=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + /* Checked s>=0 */ + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, + /* packet pointer + nonnegative (4n+2) */ +@@ -532,7 +532,7 @@ static struct bpf_align_test tests[] = { + /* New unknown value in R7 is (4n) */ + {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Subtracting it from R6 blows our unsigned bounds */ +- {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"}, ++ {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + /* Checked s>= 0 */ + {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* At the time the word size load is performed from R5, +@@ -541,7 +541,8 @@ static struct bpf_align_test tests[] = { + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ +- {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, ++ {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"}, ++ + }, + }, + { diff --git a/queue-4.14/series b/queue-4.14/series index 06636e5e963..df58b175856 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -1,3 +1,6 @@ +bpf-verifer-adjust_scalar_min_max_vals-to-always-call-update_reg_bounds.patch +selftests-bpf-fix-test_align-verifier-log-patterns.patch +bpf-fix-the-off-by-two-error-in-range-markings.patch drm-msm-dsi-fix-number-of-regulators-for-msm8996_dsi.patch platform-x86-pmc_atom-fix-slp_typx-bitfield-mask.patch wifi-cfg80211-debugfs-fix-return-type-in-ht40allow_m.patch