From: Greg Kroah-Hartman Date: Fri, 10 Jan 2020 07:59:10 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.4.209~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0daa06f52ffd7469b445b5a7463652c8b39dcab2;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: bpf-fix-passing-modified-ctx-to-ld-abs-ind-instruction.patch bpf-reject-passing-modified-ctx-to-helper-functions.patch --- diff --git a/queue-4.14/bpf-fix-passing-modified-ctx-to-ld-abs-ind-instruction.patch b/queue-4.14/bpf-fix-passing-modified-ctx-to-ld-abs-ind-instruction.patch new file mode 100644 index 00000000000..06a22026cbd --- /dev/null +++ b/queue-4.14/bpf-fix-passing-modified-ctx-to-ld-abs-ind-instruction.patch @@ -0,0 +1,117 @@ +From 6d4f151acf9a4f6fab09b615f246c717ddedcf0c Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Mon, 6 Jan 2020 22:51:57 +0100 +Subject: bpf: Fix passing modified ctx to ld/abs/ind instruction + +From: Daniel Borkmann + +commit 6d4f151acf9a4f6fab09b615f246c717ddedcf0c upstream. + +Anatoly has been fuzzing with kBdysch harness and reported a KASAN +slab oob in one of the outcomes: + + [...] + [ 77.359642] BUG: KASAN: slab-out-of-bounds in bpf_skb_load_helper_8_no_cache+0x71/0x130 + [ 77.360463] Read of size 4 at addr ffff8880679bac68 by task bpf/406 + [ 77.361119] + [ 77.361289] CPU: 2 PID: 406 Comm: bpf Not tainted 5.5.0-rc2-xfstests-00157-g2187f215eba #1 + [ 77.362134] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 + [ 77.362984] Call Trace: + [ 77.363249] dump_stack+0x97/0xe0 + [ 77.363603] print_address_description.constprop.0+0x1d/0x220 + [ 77.364251] ? bpf_skb_load_helper_8_no_cache+0x71/0x130 + [ 77.365030] ? bpf_skb_load_helper_8_no_cache+0x71/0x130 + [ 77.365860] __kasan_report.cold+0x37/0x7b + [ 77.366365] ? bpf_skb_load_helper_8_no_cache+0x71/0x130 + [ 77.366940] kasan_report+0xe/0x20 + [ 77.367295] bpf_skb_load_helper_8_no_cache+0x71/0x130 + [ 77.367821] ? bpf_skb_load_helper_8+0xf0/0xf0 + [ 77.368278] ? mark_lock+0xa3/0x9b0 + [ 77.368641] ? kvm_sched_clock_read+0x14/0x30 + [ 77.369096] ? sched_clock+0x5/0x10 + [ 77.369460] ? sched_clock_cpu+0x18/0x110 + [ 77.369876] ? bpf_skb_load_helper_8+0xf0/0xf0 + [ 77.370330] ___bpf_prog_run+0x16c0/0x28f0 + [ 77.370755] __bpf_prog_run32+0x83/0xc0 + [ 77.371153] ? __bpf_prog_run64+0xc0/0xc0 + [ 77.371568] ? match_held_lock+0x1b/0x230 + [ 77.371984] ? rcu_read_lock_held+0xa1/0xb0 + [ 77.372416] ? rcu_is_watching+0x34/0x50 + [ 77.372826] sk_filter_trim_cap+0x17c/0x4d0 + [ 77.373259] ? sock_kzfree_s+0x40/0x40 + [ 77.373648] ? __get_filter+0x150/0x150 + [ 77.374059] ? skb_copy_datagram_from_iter+0x80/0x280 + [ 77.374581] ? do_raw_spin_unlock+0xa5/0x140 + [ 77.375025] unix_dgram_sendmsg+0x33a/0xa70 + [ 77.375459] ? do_raw_spin_lock+0x1d0/0x1d0 + [ 77.375893] ? unix_peer_get+0xa0/0xa0 + [ 77.376287] ? __fget_light+0xa4/0xf0 + [ 77.376670] __sys_sendto+0x265/0x280 + [ 77.377056] ? __ia32_sys_getpeername+0x50/0x50 + [ 77.377523] ? lock_downgrade+0x350/0x350 + [ 77.377940] ? __sys_setsockopt+0x2a6/0x2c0 + [ 77.378374] ? sock_read_iter+0x240/0x240 + [ 77.378789] ? __sys_socketpair+0x22a/0x300 + [ 77.379221] ? __ia32_sys_socket+0x50/0x50 + [ 77.379649] ? mark_held_locks+0x1d/0x90 + [ 77.380059] ? trace_hardirqs_on_thunk+0x1a/0x1c + [ 77.380536] __x64_sys_sendto+0x74/0x90 + [ 77.380938] do_syscall_64+0x68/0x2a0 + [ 77.381324] entry_SYSCALL_64_after_hwframe+0x49/0xbe + [ 77.381878] RIP: 0033:0x44c070 + [...] + +After further debugging, turns out while in case of other helper functions +we disallow passing modified ctx, the special case of ld/abs/ind instruction +which has similar semantics (except r6 being the ctx argument) is missing +such check. Modified ctx is impossible here as bpf_skb_load_helper_8_no_cache() +and others are expecting skb fields in original position, hence, add +check_ctx_reg() to reject any modified ctx. Issue was first introduced back +in f1174f77b50c ("bpf/verifier: rework value tracking"). + +Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") +Reported-by: Anatoly Trosinenko +Signed-off-by: Daniel Borkmann +Signed-off-by: Alexei Starovoitov +Link: https://lore.kernel.org/bpf/20200106215157.3553-1-daniel@iogearbox.net +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/bpf/verifier.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -3457,6 +3457,7 @@ static bool may_access_skb(enum bpf_prog + static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) + { + struct bpf_reg_state *regs = cur_regs(env); ++ static const int ctx_reg = BPF_REG_6; + u8 mode = BPF_MODE(insn->code); + int i, err; + +@@ -3473,11 +3474,11 @@ static int check_ld_abs(struct bpf_verif + } + + /* check whether implicit source operand (register R6) is readable */ +- err = check_reg_arg(env, BPF_REG_6, SRC_OP); ++ err = check_reg_arg(env, ctx_reg, SRC_OP); + if (err) + return err; + +- if (regs[BPF_REG_6].type != PTR_TO_CTX) { ++ if (regs[ctx_reg].type != PTR_TO_CTX) { + verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); + return -EINVAL; + } +@@ -3489,6 +3490,10 @@ static int check_ld_abs(struct bpf_verif + return err; + } + ++ err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); ++ if (err < 0) ++ return err; ++ + /* reset caller saved regs to unreadable */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { + mark_reg_not_init(regs, caller_saved[i]); diff --git a/queue-4.14/bpf-reject-passing-modified-ctx-to-helper-functions.patch b/queue-4.14/bpf-reject-passing-modified-ctx-to-helper-functions.patch new file mode 100644 index 00000000000..2861e0d2ca1 --- /dev/null +++ b/queue-4.14/bpf-reject-passing-modified-ctx-to-helper-functions.patch @@ -0,0 +1,202 @@ +From 58990d1ff3f7896ee341030e9a7c2e4002570683 Mon Sep 17 00:00:00 2001 +From: Daniel Borkmann +Date: Thu, 7 Jun 2018 17:40:03 +0200 +Subject: bpf: reject passing modified ctx to helper functions + +From: Daniel Borkmann + +commit 58990d1ff3f7896ee341030e9a7c2e4002570683 upstream. + +As commit 28e33f9d78ee ("bpf: disallow arithmetic operations on +context pointer") already describes, f1174f77b50c ("bpf/verifier: +rework value tracking") removed the specific white-listed cases +we had previously where we would allow for pointer arithmetic in +order to further generalize it, and allow e.g. context access via +modified registers. While the dereferencing of modified context +pointers had been forbidden through 28e33f9d78ee, syzkaller did +recently manage to trigger several KASAN splats for slab out of +bounds access and use after frees by simply passing a modified +context pointer to a helper function which would then do the bad +access since verifier allowed it in adjust_ptr_min_max_vals(). + +Rejecting arithmetic on ctx pointer in adjust_ptr_min_max_vals() +generally could break existing programs as there's a valid use +case in tracing in combination with passing the ctx to helpers as +bpf_probe_read(), where the register then becomes unknown at +verification time due to adding a non-constant offset to it. An +access sequence may look like the following: + + offset = args->filename; /* field __data_loc filename */ + bpf_probe_read(&dst, len, (char *)args + offset); // args is ctx + +There are two options: i) we could special case the ctx and as +soon as we add a constant or bounded offset to it (hence ctx type +wouldn't change) we could turn the ctx into an unknown scalar, or +ii) we generalize the sanity test for ctx member access into a +small helper and assert it on the ctx register that was passed +as a function argument. Fwiw, latter is more obvious and less +complex at the same time, and one case that may potentially be +legitimate in future for ctx member access at least would be for +ctx to carry a const offset. Therefore, fix follows approach +from ii) and adds test cases to BPF kselftests. + +Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") +Reported-by: syzbot+3d0b2441dbb71751615e@syzkaller.appspotmail.com +Reported-by: syzbot+c8504affd4fdd0c1b626@syzkaller.appspotmail.com +Reported-by: syzbot+e5190cb881d8660fb1a3@syzkaller.appspotmail.com +Reported-by: syzbot+efae31b384d5badbd620@syzkaller.appspotmail.com +Signed-off-by: Daniel Borkmann +Acked-by: Alexei Starovoitov +Acked-by: Yonghong Song +Acked-by: Edward Cree +Signed-off-by: Alexei Starovoitov +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/bpf/verifier.c | 45 ++++++++++++++------- + tools/testing/selftests/bpf/test_verifier.c | 58 +++++++++++++++++++++++++++- + 2 files changed, 87 insertions(+), 16 deletions(-) + +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1251,6 +1251,30 @@ static int check_ptr_alignment(struct bp + return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict); + } + ++static int check_ctx_reg(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, int regno) ++{ ++ /* Access to ctx or passing it to a helper is only allowed in ++ * its original, unmodified form. ++ */ ++ ++ if (reg->off) { ++ verbose("dereference of modified ctx ptr R%d off=%d disallowed\n", ++ regno, reg->off); ++ return -EACCES; ++ } ++ ++ if (!tnum_is_const(reg->var_off) || reg->var_off.value) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose("variable ctx access var_off=%s disallowed\n", tn_buf); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ + /* truncate register to smaller size (in bytes) + * must be called with size < BPF_REG_SIZE + */ +@@ -1320,22 +1344,10 @@ static int check_mem_access(struct bpf_v + verbose("R%d leaks addr into ctx\n", value_regno); + return -EACCES; + } +- /* ctx accesses must be at a fixed offset, so that we can +- * determine what type of data were returned. +- */ +- if (reg->off) { +- verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", +- regno, reg->off, off - reg->off); +- return -EACCES; +- } +- if (!tnum_is_const(reg->var_off) || reg->var_off.value) { +- char tn_buf[48]; ++ err = check_ctx_reg(env, reg, regno); ++ if (err < 0) ++ return err; + +- tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); +- verbose("variable ctx access var_off=%s off=%d size=%d", +- tn_buf, off, size); +- return -EACCES; +- } + err = check_ctx_access(env, insn_idx, off, size, t, ®_type); + if (!err && t == BPF_READ && value_regno >= 0) { + /* ctx access returns either a scalar, or a +@@ -1573,6 +1585,9 @@ static int check_func_arg(struct bpf_ver + expected_type = PTR_TO_CTX; + if (type != expected_type) + goto err_type; ++ err = check_ctx_reg(env, reg, regno); ++ if (err < 0) ++ return err; + } else if (arg_type == ARG_PTR_TO_MEM || + arg_type == ARG_PTR_TO_UNINIT_MEM) { + expected_type = PTR_TO_STACK; +--- a/tools/testing/selftests/bpf/test_verifier.c ++++ b/tools/testing/selftests/bpf/test_verifier.c +@@ -7281,7 +7281,7 @@ static struct bpf_test tests[] = { + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not", ++ .errstr = "dereference of modified ctx ptr", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, +@@ -7944,6 +7944,62 @@ static struct bpf_test tests[] = { + .errstr = "BPF_XADD stores into R2 packet", + .prog_type = BPF_PROG_TYPE_XDP, + }, ++ { ++ "pass unmodified ctx pointer to helper", ++ .insns = { ++ BPF_MOV64_IMM(BPF_REG_2, 0), ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, ++ BPF_FUNC_csum_update), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++ .result = ACCEPT, ++ }, ++ { ++ "pass modified ctx pointer to helper, 1", ++ .insns = { ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), ++ BPF_MOV64_IMM(BPF_REG_2, 0), ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, ++ BPF_FUNC_csum_update), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++ .result = REJECT, ++ .errstr = "dereference of modified ctx ptr", ++ }, ++ { ++ "pass modified ctx pointer to helper, 2", ++ .insns = { ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, ++ BPF_FUNC_get_socket_cookie), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .result_unpriv = REJECT, ++ .result = REJECT, ++ .errstr_unpriv = "dereference of modified ctx ptr", ++ .errstr = "dereference of modified ctx ptr", ++ }, ++ { ++ "pass modified ctx pointer to helper, 3", ++ .insns = { ++ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0), ++ BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4), ++ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), ++ BPF_MOV64_IMM(BPF_REG_2, 0), ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, ++ BPF_FUNC_csum_update), ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .prog_type = BPF_PROG_TYPE_SCHED_CLS, ++ .result = REJECT, ++ .errstr = "variable ctx access var_off=(0x0; 0x4)", ++ }, + }; + + static int probe_filter_length(const struct bpf_insn *fp) diff --git a/queue-4.14/series b/queue-4.14/series index b1ad95b5529..aa776c60ca0 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -37,3 +37,5 @@ block-fix-memleak-when-__blk_rq_map_user_iov-is-fail.patch parisc-fix-compiler-warnings-in-debug_core.c.patch llc2-fix-return-statement-of-llc_stat_ev_rx_null_dsa.patch hv_netvsc-fix-unwanted-rx_table-reset.patch +bpf-reject-passing-modified-ctx-to-helper-functions.patch +bpf-fix-passing-modified-ctx-to-ld-abs-ind-instruction.patch