]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.26/bpf-arm64-fix-out-of-bounds-access-in-tail-call.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.14.26 / bpf-arm64-fix-out-of-bounds-access-in-tail-call.patch
1 From foo@baz Fri Mar 9 14:18:36 PST 2018
2 From: Daniel Borkmann <daniel@iogearbox.net>
3 Date: Thu, 8 Mar 2018 13:14:44 +0100
4 Subject: bpf, arm64: fix out of bounds access in tail call
5 To: gregkh@linuxfoundation.org
6 Cc: ast@kernel.org, daniel@iogearbox.net, stable@vger.kernel.org
7 Message-ID: <8d821dbec5378699f13c7e55f7abfbffd6cdcaa7.1520504748.git.daniel@iogearbox.net>
8
9 From: Daniel Borkmann <daniel@iogearbox.net>
10
11 [ upstream commit 16338a9b3ac30740d49f5dfed81bac0ffa53b9c7 ]
12
13 I recently noticed a crash on arm64 when feeding a bogus index
14 into BPF tail call helper. The crash would not occur when the
15 interpreter is used, but only in case of JIT. Output looks as
16 follows:
17
18 [ 347.007486] Unable to handle kernel paging request at virtual address fffb850e96492510
19 [...]
20 [ 347.043065] [fffb850e96492510] address between user and kernel address ranges
21 [ 347.050205] Internal error: Oops: 96000004 [#1] SMP
22 [...]
23 [ 347.190829] x13: 0000000000000000 x12: 0000000000000000
24 [ 347.196128] x11: fffc047ebe782800 x10: ffff808fd7d0fd10
25 [ 347.201427] x9 : 0000000000000000 x8 : 0000000000000000
26 [ 347.206726] x7 : 0000000000000000 x6 : 001c991738000000
27 [ 347.212025] x5 : 0000000000000018 x4 : 000000000000ba5a
28 [ 347.217325] x3 : 00000000000329c4 x2 : ffff808fd7cf0500
29 [ 347.222625] x1 : ffff808fd7d0fc00 x0 : ffff808fd7cf0500
30 [ 347.227926] Process test_verifier (pid: 4548, stack limit = 0x000000007467fa61)
31 [ 347.235221] Call trace:
32 [ 347.237656] 0xffff000002f3a4fc
33 [ 347.240784] bpf_test_run+0x78/0xf8
34 [ 347.244260] bpf_prog_test_run_skb+0x148/0x230
35 [ 347.248694] SyS_bpf+0x77c/0x1110
36 [ 347.251999] el0_svc_naked+0x30/0x34
37 [ 347.255564] Code: 9100075a d280220a 8b0a002a d37df04b (f86b694b)
38 [...]
39
40 In this case the index used in BPF r3 is the same as in r1
41 at the time of the call, meaning we fed a pointer as index;
42 here, it had the value 0xffff808fd7cf0500 which sits in x2.
43
44 While I found tail calls to be working in general (also for
45 hitting the error cases), I noticed the following in the code
46 emission:
47
48 # bpftool p d j i 988
49 [...]
50 38: ldr w10, [x1,x10]
51 3c: cmp w2, w10
52 40: b.ge 0x000000000000007c <-- signed cmp
53 44: mov x10, #0x20 // #32
54 48: cmp x26, x10
55 4c: b.gt 0x000000000000007c
56 50: add x26, x26, #0x1
57 54: mov x10, #0x110 // #272
58 58: add x10, x1, x10
59 5c: lsl x11, x2, #3
60 60: ldr x11, [x10,x11] <-- faulting insn (f86b694b)
61 64: cbz x11, 0x000000000000007c
62 [...]
63
64 Meaning, the tests passed because commit ddb55992b04d ("arm64:
65 bpf: implement bpf_tail_call() helper") was using signed compares
66 instead of unsigned which as a result had the test wrongly passing.
67
68 Change this but also the tail call count test both into unsigned
69 and cap the index as u32. Latter we did as well in 90caccdd8cc0
70 ("bpf: fix bpf_tail_call() x64 JIT") and is needed in addition here,
71 too. Tested on HiSilicon Hi1616.
72
73 Result after patch:
74
75 # bpftool p d j i 268
76 [...]
77 38: ldr w10, [x1,x10]
78 3c: add w2, w2, #0x0
79 40: cmp w2, w10
80 44: b.cs 0x0000000000000080
81 48: mov x10, #0x20 // #32
82 4c: cmp x26, x10
83 50: b.hi 0x0000000000000080
84 54: add x26, x26, #0x1
85 58: mov x10, #0x110 // #272
86 5c: add x10, x1, x10
87 60: lsl x11, x2, #3
88 64: ldr x11, [x10,x11]
89 68: cbz x11, 0x0000000000000080
90 [...]
91
92 Fixes: ddb55992b04d ("arm64: bpf: implement bpf_tail_call() helper")
93 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
94 Signed-off-by: Alexei Starovoitov <ast@kernel.org>
95 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
96 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
97 ---
98 arch/arm64/net/bpf_jit_comp.c | 5 +++--
99 tools/testing/selftests/bpf/test_verifier.c | 26 ++++++++++++++++++++++++++
100 2 files changed, 29 insertions(+), 2 deletions(-)
101
102 --- a/arch/arm64/net/bpf_jit_comp.c
103 +++ b/arch/arm64/net/bpf_jit_comp.c
104 @@ -238,8 +238,9 @@ static int emit_bpf_tail_call(struct jit
105 off = offsetof(struct bpf_array, map.max_entries);
106 emit_a64_mov_i64(tmp, off, ctx);
107 emit(A64_LDR32(tmp, r2, tmp), ctx);
108 + emit(A64_MOV(0, r3, r3), ctx);
109 emit(A64_CMP(0, r3, tmp), ctx);
110 - emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
111 + emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
112
113 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
114 * goto out;
115 @@ -247,7 +248,7 @@ static int emit_bpf_tail_call(struct jit
116 */
117 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
118 emit(A64_CMP(1, tcc, tmp), ctx);
119 - emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
120 + emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
121 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
122
123 /* prog = array->ptrs[index];
124 --- a/tools/testing/selftests/bpf/test_verifier.c
125 +++ b/tools/testing/selftests/bpf/test_verifier.c
126 @@ -2258,6 +2258,32 @@ static struct bpf_test tests[] = {
127 .result = ACCEPT,
128 },
129 {
130 + "runtime/jit: pass negative index to tail_call",
131 + .insns = {
132 + BPF_MOV64_IMM(BPF_REG_3, -1),
133 + BPF_LD_MAP_FD(BPF_REG_2, 0),
134 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
135 + BPF_FUNC_tail_call),
136 + BPF_MOV64_IMM(BPF_REG_0, 0),
137 + BPF_EXIT_INSN(),
138 + },
139 + .fixup_prog = { 1 },
140 + .result = ACCEPT,
141 + },
142 + {
143 + "runtime/jit: pass > 32bit index to tail_call",
144 + .insns = {
145 + BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
146 + BPF_LD_MAP_FD(BPF_REG_2, 0),
147 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
148 + BPF_FUNC_tail_call),
149 + BPF_MOV64_IMM(BPF_REG_0, 0),
150 + BPF_EXIT_INSN(),
151 + },
152 + .fixup_prog = { 2 },
153 + .result = ACCEPT,
154 + },
155 + {
156 "stack pointer arithmetic",
157 .insns = {
158 BPF_MOV64_IMM(BPF_REG_1, 4),