From 6bde6c9eba60f5aafb423626cf8a7729e95f7e05 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 15 Dec 2021 14:35:20 +0100 Subject: [PATCH] 4.14-stable patches added patches: bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch --- ...-due-to-oob-in-bpf_prog_test_run_skb.patch | 125 ++++++++++++++++++ queue-4.14/series | 1 + ...emleak-false-positive-in-tracing_map.patch | 11 +- 3 files changed, 129 insertions(+), 8 deletions(-) create mode 100644 queue-4.14/bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch diff --git a/queue-4.14/bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch b/queue-4.14/bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch new file mode 100644 index 00000000000..04dc9f70c91 --- /dev/null +++ b/queue-4.14/bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch @@ -0,0 +1,125 @@ +From 6e6fddc78323533be570873abb728b7e0ba7e024 Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Wed, 11 Jul 2018 15:30:14 +0200 +Subject: bpf: fix panic due to oob in bpf_prog_test_run_skb + +From: Daniel Borkmann + +commit 6e6fddc78323533be570873abb728b7e0ba7e024 upstream. + +sykzaller triggered several panics similar to the below: + + [...] + [ 248.851531] BUG: KASAN: use-after-free in _copy_to_user+0x5c/0x90 + [ 248.857656] Read of size 985 at addr ffff8808017ffff2 by task a.out/1425 + [...] + [ 248.865902] CPU: 1 PID: 1425 Comm: a.out Not tainted 4.18.0-rc4+ #13 + [ 248.865903] Hardware name: Supermicro SYS-5039MS-H12TRF/X11SSE-F, BIOS 2.1a 03/08/2018 + [ 248.865905] Call Trace: + [ 248.865910] dump_stack+0xd6/0x185 + [ 248.865911] ? show_regs_print_info+0xb/0xb + [ 248.865913] ? printk+0x9c/0xc3 + [ 248.865915] ? kmsg_dump_rewind_nolock+0xe4/0xe4 + [ 248.865919] print_address_description+0x6f/0x270 + [ 248.865920] kasan_report+0x25b/0x380 + [ 248.865922] ? _copy_to_user+0x5c/0x90 + [ 248.865924] check_memory_region+0x137/0x190 + [ 248.865925] kasan_check_read+0x11/0x20 + [ 248.865927] _copy_to_user+0x5c/0x90 + [ 248.865930] bpf_test_finish.isra.8+0x4f/0xc0 + [ 248.865932] bpf_prog_test_run_skb+0x6a0/0xba0 + [...] + +After scrubbing the BPF prog a bit from the noise, turns out it called +bpf_skb_change_head() for the lwt_xmit prog with headroom of 2. Nothing +wrong in that, however, this was run with repeat >> 0 in bpf_prog_test_run_skb() +and the same skb thus keeps changing until the pskb_expand_head() called +from skb_cow() keeps bailing out in atomic alloc context with -ENOMEM. +So upon return we'll basically have 0 headroom left yet blindly do the +__skb_push() of 14 bytes and keep copying data from there in bpf_test_finish() +out of bounds. Fix to check if we have enough headroom and if pskb_expand_head() +fails, bail out with error. + +Another bug independent of this fix (but related in triggering above) is +that BPF_PROG_TEST_RUN should be reworked to reset the skb/xdp buffer to +it's original state from input as otherwise repeating the same test in a +loop won't work for benchmarking when underlying input buffer is getting +changed by the prog each time and reused for the next run leading to +unexpected results. + +Fixes: 1cf1cae963c2 ("bpf: introduce BPF_PROG_TEST_RUN command") +Reported-by: syzbot+709412e651e55ed96498@syzkaller.appspotmail.com +Reported-by: syzbot+54f39d6ab58f39720a55@syzkaller.appspotmail.com +Signed-off-by: Daniel Borkmann +Signed-off-by: Alexei Starovoitov +[connoro: drop test_verifier.c changes not applicable to 4.14] +Signed-off-by: Connor O'Brien +Signed-off-by: Greg Kroah-Hartman +--- + net/bpf/test_run.c | 17 ++++++++++++++--- + tools/testing/selftests/bpf/test_verifier.c | 18 ++++++++++++++++++ + 2 files changed, 32 insertions(+), 3 deletions(-) + +--- a/net/bpf/test_run.c ++++ b/net/bpf/test_run.c +@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_pro + u32 size = kattr->test.data_size_in; + u32 repeat = kattr->test.repeat; + u32 retval, duration; ++ int hh_len = ETH_HLEN; + struct sk_buff *skb; + void *data; + int ret; +@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_pro + skb_reset_network_header(skb); + + if (is_l2) +- __skb_push(skb, ETH_HLEN); ++ __skb_push(skb, hh_len); + if (is_direct_pkt_access) + bpf_compute_data_end(skb); + retval = bpf_test_run(prog, skb, repeat, &duration); +- if (!is_l2) +- __skb_push(skb, ETH_HLEN); ++ if (!is_l2) { ++ if (skb_headroom(skb) < hh_len) { ++ int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); ++ ++ if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { ++ kfree_skb(skb); ++ return -ENOMEM; ++ } ++ } ++ memset(__skb_push(skb, hh_len), 0, hh_len); ++ } ++ + size = skb->len; + /* bpf program can never convert linear skb to non-linear */ + if (WARN_ON_ONCE(skb_is_nonlinear(skb))) +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -4335,6 +4335,24 @@ static struct bpf_test tests[] = { + .prog_type = BPF_PROG_TYPE_LWT_XMIT, + }, + { ++ "make headroom for LWT_XMIT", ++ .insns = { ++ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), ++ BPF_MOV64_IMM(BPF_REG_2, 34), ++ BPF_MOV64_IMM(BPF_REG_3, 0), ++ BPF_EMIT_CALL(BPF_FUNC_skb_change_head), ++ /* split for s390 to succeed */ ++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), ++ BPF_MOV64_IMM(BPF_REG_2, 42), ++ BPF_MOV64_IMM(BPF_REG_3, 0), ++ BPF_EMIT_CALL(BPF_FUNC_skb_change_head), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result = ACCEPT, ++ .prog_type = BPF_PROG_TYPE_LWT_XMIT, ++ }, ++ { + "invalid access of tc_classid for LWT_IN", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, diff --git a/queue-4.14/series b/queue-4.14/series index d9c98de23ef..564a5ca4d8b 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -5,3 +5,4 @@ parisc-agp-annotate-parisc-agp-init-functions-with-_.patch i2c-rk3x-handle-a-spurious-start-completion-interrup.patch net-netlink-af_netlink-prevent-empty-skb-by-adding-a.patch tracing-fix-a-kmemleak-false-positive-in-tracing_map.patch +bpf-fix-panic-due-to-oob-in-bpf_prog_test_run_skb.patch diff --git a/queue-4.14/tracing-fix-a-kmemleak-false-positive-in-tracing_map.patch b/queue-4.14/tracing-fix-a-kmemleak-false-positive-in-tracing_map.patch index 873e8696c2a..0c217829156 100644 --- a/queue-4.14/tracing-fix-a-kmemleak-false-positive-in-tracing_map.patch +++ b/queue-4.14/tracing-fix-a-kmemleak-false-positive-in-tracing_map.patch @@ -67,11 +67,9 @@ Signed-off-by: Chen Jun Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Sasha Levin --- - kernel/trace/tracing_map.c | 3 +++ + kernel/trace/tracing_map.c | 3 +++ 1 file changed, 3 insertions(+) -diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c -index 379db35838b64..572c0854d631c 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -24,6 +24,7 @@ @@ -82,7 +80,7 @@ index 379db35838b64..572c0854d631c 100644 #include "tracing_map.h" #include "trace.h" -@@ -227,6 +228,7 @@ void tracing_map_array_free(struct tracing_map_array *a) +@@ -227,6 +228,7 @@ void tracing_map_array_free(struct traci for (i = 0; i < a->n_pages; i++) { if (!a->pages[i]) break; @@ -90,7 +88,7 @@ index 379db35838b64..572c0854d631c 100644 free_page((unsigned long)a->pages[i]); } -@@ -262,6 +264,7 @@ struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, +@@ -262,6 +264,7 @@ struct tracing_map_array *tracing_map_ar a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!a->pages[i]) goto free; @@ -98,6 +96,3 @@ index 379db35838b64..572c0854d631c 100644 } out: return a; --- -2.33.0 - -- 2.47.2