]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/bpf/verifier/ref_tracking.c
Merge tag 'drm/tegra/for-5.1-rc5' of git://anongit.freedesktop.org/tegra/linux into...
[thirdparty/kernel/stable.git] / tools / testing / selftests / bpf / verifier / ref_tracking.c
1 {
2 "reference tracking: leak potential reference",
3 .insns = {
4 BPF_SK_LOOKUP,
5 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
6 BPF_EXIT_INSN(),
7 },
8 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9 .errstr = "Unreleased reference",
10 .result = REJECT,
11 },
12 {
13 "reference tracking: leak potential reference on stack",
14 .insns = {
15 BPF_SK_LOOKUP,
16 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
17 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
18 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
19 BPF_MOV64_IMM(BPF_REG_0, 0),
20 BPF_EXIT_INSN(),
21 },
22 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
23 .errstr = "Unreleased reference",
24 .result = REJECT,
25 },
26 {
27 "reference tracking: leak potential reference on stack 2",
28 .insns = {
29 BPF_SK_LOOKUP,
30 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
31 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
32 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
33 BPF_MOV64_IMM(BPF_REG_0, 0),
34 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
35 BPF_EXIT_INSN(),
36 },
37 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
38 .errstr = "Unreleased reference",
39 .result = REJECT,
40 },
41 {
42 "reference tracking: zero potential reference",
43 .insns = {
44 BPF_SK_LOOKUP,
45 BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
46 BPF_EXIT_INSN(),
47 },
48 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
49 .errstr = "Unreleased reference",
50 .result = REJECT,
51 },
52 {
53 "reference tracking: copy and zero potential references",
54 .insns = {
55 BPF_SK_LOOKUP,
56 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
57 BPF_MOV64_IMM(BPF_REG_0, 0),
58 BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
59 BPF_EXIT_INSN(),
60 },
61 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
62 .errstr = "Unreleased reference",
63 .result = REJECT,
64 },
65 {
66 "reference tracking: release reference without check",
67 .insns = {
68 BPF_SK_LOOKUP,
69 /* reference in r0 may be NULL */
70 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
71 BPF_MOV64_IMM(BPF_REG_2, 0),
72 BPF_EMIT_CALL(BPF_FUNC_sk_release),
73 BPF_EXIT_INSN(),
74 },
75 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
76 .errstr = "type=sock_or_null expected=sock",
77 .result = REJECT,
78 },
79 {
80 "reference tracking: release reference",
81 .insns = {
82 BPF_SK_LOOKUP,
83 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
84 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
85 BPF_EMIT_CALL(BPF_FUNC_sk_release),
86 BPF_EXIT_INSN(),
87 },
88 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
89 .result = ACCEPT,
90 },
91 {
92 "reference tracking: release reference 2",
93 .insns = {
94 BPF_SK_LOOKUP,
95 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
96 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
97 BPF_EXIT_INSN(),
98 BPF_EMIT_CALL(BPF_FUNC_sk_release),
99 BPF_EXIT_INSN(),
100 },
101 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
102 .result = ACCEPT,
103 },
104 {
105 "reference tracking: release reference twice",
106 .insns = {
107 BPF_SK_LOOKUP,
108 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
109 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
110 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
111 BPF_EMIT_CALL(BPF_FUNC_sk_release),
112 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
113 BPF_EMIT_CALL(BPF_FUNC_sk_release),
114 BPF_EXIT_INSN(),
115 },
116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
117 .errstr = "type=inv expected=sock",
118 .result = REJECT,
119 },
120 {
121 "reference tracking: release reference twice inside branch",
122 .insns = {
123 BPF_SK_LOOKUP,
124 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
125 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
126 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
127 BPF_EMIT_CALL(BPF_FUNC_sk_release),
128 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
129 BPF_EMIT_CALL(BPF_FUNC_sk_release),
130 BPF_EXIT_INSN(),
131 },
132 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
133 .errstr = "type=inv expected=sock",
134 .result = REJECT,
135 },
136 {
137 "reference tracking: alloc, check, free in one subbranch",
138 .insns = {
139 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
140 offsetof(struct __sk_buff, data)),
141 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
142 offsetof(struct __sk_buff, data_end)),
143 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
144 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
145 /* if (offsetof(skb, mark) > data_len) exit; */
146 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
147 BPF_EXIT_INSN(),
148 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
149 offsetof(struct __sk_buff, mark)),
150 BPF_SK_LOOKUP,
151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
152 /* Leak reference in R0 */
153 BPF_EXIT_INSN(),
154 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
155 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
156 BPF_EMIT_CALL(BPF_FUNC_sk_release),
157 BPF_EXIT_INSN(),
158 },
159 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
160 .errstr = "Unreleased reference",
161 .result = REJECT,
162 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
163 },
164 {
165 "reference tracking: alloc, check, free in both subbranches",
166 .insns = {
167 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
168 offsetof(struct __sk_buff, data)),
169 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
170 offsetof(struct __sk_buff, data_end)),
171 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
172 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16),
173 /* if (offsetof(skb, mark) > data_len) exit; */
174 BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1),
175 BPF_EXIT_INSN(),
176 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
177 offsetof(struct __sk_buff, mark)),
178 BPF_SK_LOOKUP,
179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
180 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
181 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
182 BPF_EMIT_CALL(BPF_FUNC_sk_release),
183 BPF_EXIT_INSN(),
184 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
185 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
186 BPF_EMIT_CALL(BPF_FUNC_sk_release),
187 BPF_EXIT_INSN(),
188 },
189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
190 .result = ACCEPT,
191 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
192 },
193 {
194 "reference tracking in call: free reference in subprog",
195 .insns = {
196 BPF_SK_LOOKUP,
197 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
198 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
199 BPF_MOV64_IMM(BPF_REG_0, 0),
200 BPF_EXIT_INSN(),
201
202 /* subprog 1 */
203 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
204 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
205 BPF_EMIT_CALL(BPF_FUNC_sk_release),
206 BPF_EXIT_INSN(),
207 },
208 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
209 .result = ACCEPT,
210 },
211 {
212 "reference tracking in call: free reference in subprog and outside",
213 .insns = {
214 BPF_SK_LOOKUP,
215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
216 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
217 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
218 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
219 BPF_EMIT_CALL(BPF_FUNC_sk_release),
220 BPF_EXIT_INSN(),
221
222 /* subprog 1 */
223 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
224 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1),
225 BPF_EMIT_CALL(BPF_FUNC_sk_release),
226 BPF_EXIT_INSN(),
227 },
228 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
229 .errstr = "type=inv expected=sock",
230 .result = REJECT,
231 },
232 {
233 "reference tracking in call: alloc & leak reference in subprog",
234 .insns = {
235 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
237 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
238 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
239 BPF_MOV64_IMM(BPF_REG_0, 0),
240 BPF_EXIT_INSN(),
241
242 /* subprog 1 */
243 BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
244 BPF_SK_LOOKUP,
245 /* spill unchecked sk_ptr into stack of caller */
246 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
247 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
248 BPF_EXIT_INSN(),
249 },
250 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
251 .errstr = "Unreleased reference",
252 .result = REJECT,
253 },
254 {
255 "reference tracking in call: alloc in subprog, release outside",
256 .insns = {
257 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
258 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
259 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
260 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
261 BPF_EMIT_CALL(BPF_FUNC_sk_release),
262 BPF_EXIT_INSN(),
263
264 /* subprog 1 */
265 BPF_SK_LOOKUP,
266 BPF_EXIT_INSN(), /* return sk */
267 },
268 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
269 .retval = POINTER_VALUE,
270 .result = ACCEPT,
271 },
272 {
273 "reference tracking in call: sk_ptr leak into caller stack",
274 .insns = {
275 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
277 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
278 BPF_MOV64_IMM(BPF_REG_0, 0),
279 BPF_EXIT_INSN(),
280
281 /* subprog 1 */
282 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
284 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
286 /* spill unchecked sk_ptr into stack of caller */
287 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
289 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
290 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
291 BPF_EXIT_INSN(),
292
293 /* subprog 2 */
294 BPF_SK_LOOKUP,
295 BPF_EXIT_INSN(),
296 },
297 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
298 .errstr = "Unreleased reference",
299 .result = REJECT,
300 },
301 {
302 "reference tracking in call: sk_ptr spill into caller stack",
303 .insns = {
304 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
306 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
307 BPF_MOV64_IMM(BPF_REG_0, 0),
308 BPF_EXIT_INSN(),
309
310 /* subprog 1 */
311 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
312 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
313 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8),
315 /* spill unchecked sk_ptr into stack of caller */
316 BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8),
318 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
319 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
321 /* now the sk_ptr is verified, free the reference */
322 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0),
323 BPF_EMIT_CALL(BPF_FUNC_sk_release),
324 BPF_EXIT_INSN(),
325
326 /* subprog 2 */
327 BPF_SK_LOOKUP,
328 BPF_EXIT_INSN(),
329 },
330 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
331 .result = ACCEPT,
332 },
333 {
334 "reference tracking: allow LD_ABS",
335 .insns = {
336 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
337 BPF_SK_LOOKUP,
338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
339 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
340 BPF_EMIT_CALL(BPF_FUNC_sk_release),
341 BPF_LD_ABS(BPF_B, 0),
342 BPF_LD_ABS(BPF_H, 0),
343 BPF_LD_ABS(BPF_W, 0),
344 BPF_EXIT_INSN(),
345 },
346 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
347 .result = ACCEPT,
348 },
349 {
350 "reference tracking: forbid LD_ABS while holding reference",
351 .insns = {
352 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
353 BPF_SK_LOOKUP,
354 BPF_LD_ABS(BPF_B, 0),
355 BPF_LD_ABS(BPF_H, 0),
356 BPF_LD_ABS(BPF_W, 0),
357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
358 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
359 BPF_EMIT_CALL(BPF_FUNC_sk_release),
360 BPF_EXIT_INSN(),
361 },
362 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
363 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
364 .result = REJECT,
365 },
366 {
367 "reference tracking: allow LD_IND",
368 .insns = {
369 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
370 BPF_SK_LOOKUP,
371 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
373 BPF_EMIT_CALL(BPF_FUNC_sk_release),
374 BPF_MOV64_IMM(BPF_REG_7, 1),
375 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
376 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
377 BPF_EXIT_INSN(),
378 },
379 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
380 .result = ACCEPT,
381 .retval = 1,
382 },
383 {
384 "reference tracking: forbid LD_IND while holding reference",
385 .insns = {
386 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
387 BPF_SK_LOOKUP,
388 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
389 BPF_MOV64_IMM(BPF_REG_7, 1),
390 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
391 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
392 BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),
393 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
394 BPF_EMIT_CALL(BPF_FUNC_sk_release),
395 BPF_EXIT_INSN(),
396 },
397 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
398 .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references",
399 .result = REJECT,
400 },
401 {
402 "reference tracking: check reference or tail call",
403 .insns = {
404 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
405 BPF_SK_LOOKUP,
406 /* if (sk) bpf_sk_release() */
407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
408 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
409 /* bpf_tail_call() */
410 BPF_MOV64_IMM(BPF_REG_3, 2),
411 BPF_LD_MAP_FD(BPF_REG_2, 0),
412 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
413 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
414 BPF_MOV64_IMM(BPF_REG_0, 0),
415 BPF_EXIT_INSN(),
416 BPF_EMIT_CALL(BPF_FUNC_sk_release),
417 BPF_EXIT_INSN(),
418 },
419 .fixup_prog1 = { 17 },
420 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
421 .result = ACCEPT,
422 },
423 {
424 "reference tracking: release reference then tail call",
425 .insns = {
426 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
427 BPF_SK_LOOKUP,
428 /* if (sk) bpf_sk_release() */
429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
430 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
431 BPF_EMIT_CALL(BPF_FUNC_sk_release),
432 /* bpf_tail_call() */
433 BPF_MOV64_IMM(BPF_REG_3, 2),
434 BPF_LD_MAP_FD(BPF_REG_2, 0),
435 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
436 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
437 BPF_MOV64_IMM(BPF_REG_0, 0),
438 BPF_EXIT_INSN(),
439 },
440 .fixup_prog1 = { 18 },
441 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
442 .result = ACCEPT,
443 },
444 {
445 "reference tracking: leak possible reference over tail call",
446 .insns = {
447 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
448 /* Look up socket and store in REG_6 */
449 BPF_SK_LOOKUP,
450 /* bpf_tail_call() */
451 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
452 BPF_MOV64_IMM(BPF_REG_3, 2),
453 BPF_LD_MAP_FD(BPF_REG_2, 0),
454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
455 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
456 BPF_MOV64_IMM(BPF_REG_0, 0),
457 /* if (sk) bpf_sk_release() */
458 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
460 BPF_EMIT_CALL(BPF_FUNC_sk_release),
461 BPF_EXIT_INSN(),
462 },
463 .fixup_prog1 = { 16 },
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 .errstr = "tail_call would lead to reference leak",
466 .result = REJECT,
467 },
468 {
469 "reference tracking: leak checked reference over tail call",
470 .insns = {
471 BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
472 /* Look up socket and store in REG_6 */
473 BPF_SK_LOOKUP,
474 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
475 /* if (!sk) goto end */
476 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
477 /* bpf_tail_call() */
478 BPF_MOV64_IMM(BPF_REG_3, 0),
479 BPF_LD_MAP_FD(BPF_REG_2, 0),
480 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
481 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
482 BPF_MOV64_IMM(BPF_REG_0, 0),
483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
484 BPF_EMIT_CALL(BPF_FUNC_sk_release),
485 BPF_EXIT_INSN(),
486 },
487 .fixup_prog1 = { 17 },
488 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
489 .errstr = "tail_call would lead to reference leak",
490 .result = REJECT,
491 },
492 {
493 "reference tracking: mangle and release sock_or_null",
494 .insns = {
495 BPF_SK_LOOKUP,
496 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
497 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
498 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
499 BPF_EMIT_CALL(BPF_FUNC_sk_release),
500 BPF_EXIT_INSN(),
501 },
502 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
503 .errstr = "R1 pointer arithmetic on sock_or_null prohibited",
504 .result = REJECT,
505 },
506 {
507 "reference tracking: mangle and release sock",
508 .insns = {
509 BPF_SK_LOOKUP,
510 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
513 BPF_EMIT_CALL(BPF_FUNC_sk_release),
514 BPF_EXIT_INSN(),
515 },
516 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
517 .errstr = "R1 pointer arithmetic on sock prohibited",
518 .result = REJECT,
519 },
520 {
521 "reference tracking: access member",
522 .insns = {
523 BPF_SK_LOOKUP,
524 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
525 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
526 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
527 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
528 BPF_EMIT_CALL(BPF_FUNC_sk_release),
529 BPF_EXIT_INSN(),
530 },
531 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
532 .result = ACCEPT,
533 },
534 {
535 "reference tracking: write to member",
536 .insns = {
537 BPF_SK_LOOKUP,
538 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
540 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
541 BPF_LD_IMM64(BPF_REG_2, 42),
542 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2,
543 offsetof(struct bpf_sock, mark)),
544 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
545 BPF_EMIT_CALL(BPF_FUNC_sk_release),
546 BPF_LD_IMM64(BPF_REG_0, 0),
547 BPF_EXIT_INSN(),
548 },
549 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
550 .errstr = "cannot write into sock",
551 .result = REJECT,
552 },
553 {
554 "reference tracking: invalid 64-bit access of member",
555 .insns = {
556 BPF_SK_LOOKUP,
557 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
559 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
560 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
561 BPF_EMIT_CALL(BPF_FUNC_sk_release),
562 BPF_EXIT_INSN(),
563 },
564 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
565 .errstr = "invalid sock access off=0 size=8",
566 .result = REJECT,
567 },
568 {
569 "reference tracking: access after release",
570 .insns = {
571 BPF_SK_LOOKUP,
572 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
573 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
574 BPF_EMIT_CALL(BPF_FUNC_sk_release),
575 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
576 BPF_EXIT_INSN(),
577 },
578 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
579 .errstr = "!read_ok",
580 .result = REJECT,
581 },
582 {
583 "reference tracking: direct access for lookup",
584 .insns = {
585 /* Check that the packet is at least 64B long */
586 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
587 offsetof(struct __sk_buff, data)),
588 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
589 offsetof(struct __sk_buff, data_end)),
590 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64),
592 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
593 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */
594 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),
595 BPF_MOV64_IMM(BPF_REG_4, 0),
596 BPF_MOV64_IMM(BPF_REG_5, 0),
597 BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp),
598 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
599 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
600 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
601 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
602 BPF_EMIT_CALL(BPF_FUNC_sk_release),
603 BPF_EXIT_INSN(),
604 },
605 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
606 .result = ACCEPT,
607 },
608 {
609 "reference tracking: use ptr from bpf_tcp_sock() after release",
610 .insns = {
611 BPF_SK_LOOKUP,
612 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
613 BPF_EXIT_INSN(),
614 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
615 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
616 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
617 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
618 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
619 BPF_EMIT_CALL(BPF_FUNC_sk_release),
620 BPF_EXIT_INSN(),
621 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
623 BPF_EMIT_CALL(BPF_FUNC_sk_release),
624 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
625 BPF_EXIT_INSN(),
626 },
627 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
628 .result = REJECT,
629 .errstr = "invalid mem access",
630 },
631 {
632 "reference tracking: use ptr from bpf_sk_fullsock() after release",
633 .insns = {
634 BPF_SK_LOOKUP,
635 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
636 BPF_EXIT_INSN(),
637 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
638 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
639 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
640 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
642 BPF_EMIT_CALL(BPF_FUNC_sk_release),
643 BPF_EXIT_INSN(),
644 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
645 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
646 BPF_EMIT_CALL(BPF_FUNC_sk_release),
647 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
648 BPF_EXIT_INSN(),
649 },
650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
651 .result = REJECT,
652 .errstr = "invalid mem access",
653 },
654 {
655 "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
656 .insns = {
657 BPF_SK_LOOKUP,
658 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
659 BPF_EXIT_INSN(),
660 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
661 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
662 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
663 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
664 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
665 BPF_EMIT_CALL(BPF_FUNC_sk_release),
666 BPF_EXIT_INSN(),
667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
668 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
669 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
670 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
671 BPF_EMIT_CALL(BPF_FUNC_sk_release),
672 BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
673 BPF_EXIT_INSN(),
674 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
675 BPF_EXIT_INSN(),
676 },
677 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
678 .result = REJECT,
679 .errstr = "invalid mem access",
680 },
681 {
682 "reference tracking: use sk after bpf_sk_release(tp)",
683 .insns = {
684 BPF_SK_LOOKUP,
685 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
686 BPF_EXIT_INSN(),
687 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
689 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
690 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
692 BPF_EMIT_CALL(BPF_FUNC_sk_release),
693 BPF_EXIT_INSN(),
694 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
695 BPF_EMIT_CALL(BPF_FUNC_sk_release),
696 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
697 BPF_EXIT_INSN(),
698 },
699 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
700 .result = REJECT,
701 .errstr = "invalid mem access",
702 },
703 {
704 "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
705 .insns = {
706 BPF_SK_LOOKUP,
707 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
708 BPF_EXIT_INSN(),
709 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
710 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
711 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
712 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
713 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
714 BPF_EMIT_CALL(BPF_FUNC_sk_release),
715 BPF_EXIT_INSN(),
716 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
717 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
718 BPF_EMIT_CALL(BPF_FUNC_sk_release),
719 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
720 BPF_EXIT_INSN(),
721 },
722 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
723 .result = ACCEPT,
724 },
725 {
726 "reference tracking: bpf_sk_release(listen_sk)",
727 .insns = {
728 BPF_SK_LOOKUP,
729 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
730 BPF_EXIT_INSN(),
731 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
732 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
733 BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
734 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
735 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
736 BPF_EMIT_CALL(BPF_FUNC_sk_release),
737 BPF_EXIT_INSN(),
738 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
739 BPF_EMIT_CALL(BPF_FUNC_sk_release),
740 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
741 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
742 BPF_EMIT_CALL(BPF_FUNC_sk_release),
743 BPF_EXIT_INSN(),
744 },
745 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
746 .result = REJECT,
747 .errstr = "reference has not been acquired before",
748 },
749 {
750 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
751 "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
752 .insns = {
753 BPF_SK_LOOKUP,
754 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
755 BPF_EXIT_INSN(),
756 BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
757 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
758 BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
759 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
761 BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
762 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
763 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
765 BPF_EMIT_CALL(BPF_FUNC_sk_release),
766 BPF_EXIT_INSN(),
767 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
768 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
769 BPF_EMIT_CALL(BPF_FUNC_sk_release),
770 BPF_EXIT_INSN(),
771 },
772 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
773 .result = REJECT,
774 .errstr = "invalid mem access",
775 },