]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/bpf/test_verifier.c
bpf: add test cases for new BPF_J{LT, LE, SLT, SLE} instructions
[thirdparty/kernel/stable.git] / tools / testing / selftests / bpf / test_verifier.c
1 /*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10
11 #include <endian.h>
12 #include <asm/types.h>
13 #include <linux/types.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <errno.h>
19 #include <string.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <sched.h>
23
24 #include <sys/capability.h>
25 #include <sys/resource.h>
26
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31
32 #include <bpf/bpf.h>
33
34 #ifdef HAVE_GENHDR
35 # include "autoconf.h"
36 #else
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
39 # endif
40 #endif
41
42 #include "../../../include/linux/filter.h"
43
44 #ifndef ARRAY_SIZE
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
46 #endif
47
48 #define MAX_INSNS 512
49 #define MAX_FIXUPS 8
50 #define MAX_NR_MAPS 4
51
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
54
55 struct bpf_test {
56 const char *descr;
57 struct bpf_insn insns[MAX_INSNS];
58 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
61 int fixup_map_in_map[MAX_FIXUPS];
62 const char *errstr;
63 const char *errstr_unpriv;
64 enum {
65 UNDEF,
66 ACCEPT,
67 REJECT
68 } result, result_unpriv;
69 enum bpf_prog_type prog_type;
70 uint8_t flags;
71 };
72
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
75 */
76 #define MAX_ENTRIES 11
77
78 struct test_val {
79 unsigned int index;
80 int foo[MAX_ENTRIES];
81 };
82
83 static struct bpf_test tests[] = {
84 {
85 "add+sub+mul",
86 .insns = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
94 BPF_EXIT_INSN(),
95 },
96 .result = ACCEPT,
97 },
98 {
99 "unreachable",
100 .insns = {
101 BPF_EXIT_INSN(),
102 BPF_EXIT_INSN(),
103 },
104 .errstr = "unreachable",
105 .result = REJECT,
106 },
107 {
108 "unreachable2",
109 .insns = {
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
112 BPF_EXIT_INSN(),
113 },
114 .errstr = "unreachable",
115 .result = REJECT,
116 },
117 {
118 "out of range jump",
119 .insns = {
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "jump out of range",
124 .result = REJECT,
125 },
126 {
127 "out of range jump2",
128 .insns = {
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "jump out of range",
133 .result = REJECT,
134 },
135 {
136 "test1 ld_imm64",
137 .insns = {
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
144 BPF_EXIT_INSN(),
145 },
146 .errstr = "invalid BPF_LD_IMM insn",
147 .errstr_unpriv = "R1 pointer comparison",
148 .result = REJECT,
149 },
150 {
151 "test2 ld_imm64",
152 .insns = {
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
158 BPF_EXIT_INSN(),
159 },
160 .errstr = "invalid BPF_LD_IMM insn",
161 .errstr_unpriv = "R1 pointer comparison",
162 .result = REJECT,
163 },
164 {
165 "test3 ld_imm64",
166 .insns = {
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
173 BPF_EXIT_INSN(),
174 },
175 .errstr = "invalid bpf_ld_imm64 insn",
176 .result = REJECT,
177 },
178 {
179 "test4 ld_imm64",
180 .insns = {
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
182 BPF_EXIT_INSN(),
183 },
184 .errstr = "invalid bpf_ld_imm64 insn",
185 .result = REJECT,
186 },
187 {
188 "test5 ld_imm64",
189 .insns = {
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
191 },
192 .errstr = "invalid bpf_ld_imm64 insn",
193 .result = REJECT,
194 },
195 {
196 "test6 ld_imm64",
197 .insns = {
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
200 BPF_EXIT_INSN(),
201 },
202 .result = ACCEPT,
203 },
204 {
205 "test7 ld_imm64",
206 .insns = {
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
209 BPF_EXIT_INSN(),
210 },
211 .result = ACCEPT,
212 },
213 {
214 "test8 ld_imm64",
215 .insns = {
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
218 BPF_EXIT_INSN(),
219 },
220 .errstr = "uses reserved fields",
221 .result = REJECT,
222 },
223 {
224 "test9 ld_imm64",
225 .insns = {
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
228 BPF_EXIT_INSN(),
229 },
230 .errstr = "invalid bpf_ld_imm64 insn",
231 .result = REJECT,
232 },
233 {
234 "test10 ld_imm64",
235 .insns = {
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "invalid bpf_ld_imm64 insn",
241 .result = REJECT,
242 },
243 {
244 "test11 ld_imm64",
245 .insns = {
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
248 BPF_EXIT_INSN(),
249 },
250 .errstr = "invalid bpf_ld_imm64 insn",
251 .result = REJECT,
252 },
253 {
254 "test12 ld_imm64",
255 .insns = {
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
259 BPF_EXIT_INSN(),
260 },
261 .errstr = "not pointing to valid bpf_map",
262 .result = REJECT,
263 },
264 {
265 "test13 ld_imm64",
266 .insns = {
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
270 BPF_EXIT_INSN(),
271 },
272 .errstr = "invalid bpf_ld_imm64 insn",
273 .result = REJECT,
274 },
275 {
276 "no bpf_exit",
277 .insns = {
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
279 },
280 .errstr = "jump out of range",
281 .result = REJECT,
282 },
283 {
284 "loop (back-edge)",
285 .insns = {
286 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
287 BPF_EXIT_INSN(),
288 },
289 .errstr = "back-edge",
290 .result = REJECT,
291 },
292 {
293 "loop2 (back-edge)",
294 .insns = {
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
299 BPF_EXIT_INSN(),
300 },
301 .errstr = "back-edge",
302 .result = REJECT,
303 },
304 {
305 "conditional loop",
306 .insns = {
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
311 BPF_EXIT_INSN(),
312 },
313 .errstr = "back-edge",
314 .result = REJECT,
315 },
316 {
317 "read uninitialized register",
318 .insns = {
319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
320 BPF_EXIT_INSN(),
321 },
322 .errstr = "R2 !read_ok",
323 .result = REJECT,
324 },
325 {
326 "read invalid register",
327 .insns = {
328 BPF_MOV64_REG(BPF_REG_0, -1),
329 BPF_EXIT_INSN(),
330 },
331 .errstr = "R15 is invalid",
332 .result = REJECT,
333 },
334 {
335 "program doesn't init R0 before exit",
336 .insns = {
337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
338 BPF_EXIT_INSN(),
339 },
340 .errstr = "R0 !read_ok",
341 .result = REJECT,
342 },
343 {
344 "program doesn't init R0 before exit in all branches",
345 .insns = {
346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0, 1),
348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
349 BPF_EXIT_INSN(),
350 },
351 .errstr = "R0 !read_ok",
352 .errstr_unpriv = "R1 pointer comparison",
353 .result = REJECT,
354 },
355 {
356 "stack out of bounds",
357 .insns = {
358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
359 BPF_EXIT_INSN(),
360 },
361 .errstr = "invalid stack",
362 .result = REJECT,
363 },
364 {
365 "invalid call insn1",
366 .insns = {
367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
368 BPF_EXIT_INSN(),
369 },
370 .errstr = "BPF_CALL uses reserved",
371 .result = REJECT,
372 },
373 {
374 "invalid call insn2",
375 .insns = {
376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
377 BPF_EXIT_INSN(),
378 },
379 .errstr = "BPF_CALL uses reserved",
380 .result = REJECT,
381 },
382 {
383 "invalid function call",
384 .insns = {
385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
386 BPF_EXIT_INSN(),
387 },
388 .errstr = "invalid func unknown#1234567",
389 .result = REJECT,
390 },
391 {
392 "uninitialized stack1",
393 .insns = {
394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 BPF_LD_MAP_FD(BPF_REG_1, 0),
397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem),
399 BPF_EXIT_INSN(),
400 },
401 .fixup_map1 = { 2 },
402 .errstr = "invalid indirect read from stack",
403 .result = REJECT,
404 },
405 {
406 "uninitialized stack2",
407 .insns = {
408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "invalid read from stack",
413 .result = REJECT,
414 },
415 {
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
418 .insns = {
419 BPF_MOV64_IMM(BPF_REG_0, 0),
420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(),
424 },
425 .errstr_unpriv = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT,
429 },
430 {
431 "non-invalid fp arithmetic",
432 .insns = {
433 BPF_MOV64_IMM(BPF_REG_0, 0),
434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
435 BPF_EXIT_INSN(),
436 },
437 .result = ACCEPT,
438 },
439 {
440 "invalid argument register",
441 .insns = {
442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid),
444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid),
446 BPF_EXIT_INSN(),
447 },
448 .errstr = "R1 !read_ok",
449 .result = REJECT,
450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 },
452 {
453 "non-invalid argument register",
454 .insns = {
455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid),
458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid),
461 BPF_EXIT_INSN(),
462 },
463 .result = ACCEPT,
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 },
466 {
467 "check valid spill/fill",
468 .insns = {
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
476 BPF_EXIT_INSN(),
477 },
478 .errstr_unpriv = "R0 leaks addr",
479 .result = ACCEPT,
480 .result_unpriv = REJECT,
481 },
482 {
483 "check valid spill/fill, skb mark",
484 .insns = {
485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 offsetof(struct __sk_buff, mark)),
490 BPF_EXIT_INSN(),
491 },
492 .result = ACCEPT,
493 .result_unpriv = ACCEPT,
494 },
495 {
496 "check corrupted spill/fill",
497 .insns = {
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
504 BPF_EXIT_INSN(),
505 },
506 .errstr_unpriv = "attempt to corrupt spilled",
507 .errstr = "corrupted spill",
508 .result = REJECT,
509 },
510 {
511 "invalid src register in STX",
512 .insns = {
513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
514 BPF_EXIT_INSN(),
515 },
516 .errstr = "R15 is invalid",
517 .result = REJECT,
518 },
519 {
520 "invalid dst register in STX",
521 .insns = {
522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
523 BPF_EXIT_INSN(),
524 },
525 .errstr = "R14 is invalid",
526 .result = REJECT,
527 },
528 {
529 "invalid dst register in ST",
530 .insns = {
531 BPF_ST_MEM(BPF_B, 14, -1, -1),
532 BPF_EXIT_INSN(),
533 },
534 .errstr = "R14 is invalid",
535 .result = REJECT,
536 },
537 {
538 "invalid src register in LDX",
539 .insns = {
540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
541 BPF_EXIT_INSN(),
542 },
543 .errstr = "R12 is invalid",
544 .result = REJECT,
545 },
546 {
547 "invalid dst register in LDX",
548 .insns = {
549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
550 BPF_EXIT_INSN(),
551 },
552 .errstr = "R11 is invalid",
553 .result = REJECT,
554 },
555 {
556 "junk insn",
557 .insns = {
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
559 BPF_EXIT_INSN(),
560 },
561 .errstr = "invalid BPF_LD_IMM",
562 .result = REJECT,
563 },
564 {
565 "junk insn2",
566 .insns = {
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
568 BPF_EXIT_INSN(),
569 },
570 .errstr = "BPF_LDX uses reserved fields",
571 .result = REJECT,
572 },
573 {
574 "junk insn3",
575 .insns = {
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
577 BPF_EXIT_INSN(),
578 },
579 .errstr = "invalid BPF_ALU opcode f0",
580 .result = REJECT,
581 },
582 {
583 "junk insn4",
584 .insns = {
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "invalid BPF_ALU opcode f0",
589 .result = REJECT,
590 },
591 {
592 "junk insn5",
593 .insns = {
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
595 BPF_EXIT_INSN(),
596 },
597 .errstr = "BPF_ALU uses reserved fields",
598 .result = REJECT,
599 },
600 {
601 "misaligned read from stack",
602 .insns = {
603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
605 BPF_EXIT_INSN(),
606 },
607 .errstr = "misaligned stack access",
608 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 },
611 {
612 "invalid map_fd for function call",
613 .insns = {
614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
615 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
617 BPF_LD_MAP_FD(BPF_REG_1, 0),
618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
619 BPF_FUNC_map_delete_elem),
620 BPF_EXIT_INSN(),
621 },
622 .errstr = "fd 0 is not pointing to valid bpf_map",
623 .result = REJECT,
624 },
625 {
626 "don't check return value before access",
627 .insns = {
628 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
629 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
630 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
631 BPF_LD_MAP_FD(BPF_REG_1, 0),
632 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
633 BPF_FUNC_map_lookup_elem),
634 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
635 BPF_EXIT_INSN(),
636 },
637 .fixup_map1 = { 3 },
638 .errstr = "R0 invalid mem access 'map_value_or_null'",
639 .result = REJECT,
640 },
641 {
642 "access memory with incorrect alignment",
643 .insns = {
644 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
645 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
646 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
647 BPF_LD_MAP_FD(BPF_REG_1, 0),
648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
649 BPF_FUNC_map_lookup_elem),
650 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
651 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
652 BPF_EXIT_INSN(),
653 },
654 .fixup_map1 = { 3 },
655 .errstr = "misaligned value access",
656 .result = REJECT,
657 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
658 },
659 {
660 "sometimes access memory with incorrect alignment",
661 .insns = {
662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
665 BPF_LD_MAP_FD(BPF_REG_1, 0),
666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
667 BPF_FUNC_map_lookup_elem),
668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
669 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
670 BPF_EXIT_INSN(),
671 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
672 BPF_EXIT_INSN(),
673 },
674 .fixup_map1 = { 3 },
675 .errstr = "R0 invalid mem access",
676 .errstr_unpriv = "R0 leaks addr",
677 .result = REJECT,
678 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
679 },
680 {
681 "jump test 1",
682 .insns = {
683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
684 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
685 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
686 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
687 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
688 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
689 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
690 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
692 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
693 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
694 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
695 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
696 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
697 BPF_MOV64_IMM(BPF_REG_0, 0),
698 BPF_EXIT_INSN(),
699 },
700 .errstr_unpriv = "R1 pointer comparison",
701 .result_unpriv = REJECT,
702 .result = ACCEPT,
703 },
704 {
705 "jump test 2",
706 .insns = {
707 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
708 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
709 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
710 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
712 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
713 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
715 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
716 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
717 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
718 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
719 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
720 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
721 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
722 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
723 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
724 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
725 BPF_MOV64_IMM(BPF_REG_0, 0),
726 BPF_EXIT_INSN(),
727 },
728 .errstr_unpriv = "R1 pointer comparison",
729 .result_unpriv = REJECT,
730 .result = ACCEPT,
731 },
732 {
733 "jump test 3",
734 .insns = {
735 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
737 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
738 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
739 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
740 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
741 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
742 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
743 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
744 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
745 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
746 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
747 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
748 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
749 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
750 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
751 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
752 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
753 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
754 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
755 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
756 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
757 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
759 BPF_LD_MAP_FD(BPF_REG_1, 0),
760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
761 BPF_FUNC_map_delete_elem),
762 BPF_EXIT_INSN(),
763 },
764 .fixup_map1 = { 24 },
765 .errstr_unpriv = "R1 pointer comparison",
766 .result_unpriv = REJECT,
767 .result = ACCEPT,
768 },
769 {
770 "jump test 4",
771 .insns = {
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
812 BPF_MOV64_IMM(BPF_REG_0, 0),
813 BPF_EXIT_INSN(),
814 },
815 .errstr_unpriv = "R1 pointer comparison",
816 .result_unpriv = REJECT,
817 .result = ACCEPT,
818 },
819 {
820 "jump test 5",
821 .insns = {
822 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
823 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
824 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
825 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
826 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
827 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
828 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
829 BPF_MOV64_IMM(BPF_REG_0, 0),
830 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
831 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
832 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
833 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
834 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
835 BPF_MOV64_IMM(BPF_REG_0, 0),
836 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
837 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
838 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
839 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
840 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
841 BPF_MOV64_IMM(BPF_REG_0, 0),
842 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
843 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
844 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
845 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
846 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
847 BPF_MOV64_IMM(BPF_REG_0, 0),
848 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
849 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
850 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
851 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
852 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
853 BPF_MOV64_IMM(BPF_REG_0, 0),
854 BPF_EXIT_INSN(),
855 },
856 .errstr_unpriv = "R1 pointer comparison",
857 .result_unpriv = REJECT,
858 .result = ACCEPT,
859 },
860 {
861 "access skb fields ok",
862 .insns = {
863 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
864 offsetof(struct __sk_buff, len)),
865 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
866 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
867 offsetof(struct __sk_buff, mark)),
868 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
869 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
870 offsetof(struct __sk_buff, pkt_type)),
871 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
872 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
873 offsetof(struct __sk_buff, queue_mapping)),
874 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
875 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
876 offsetof(struct __sk_buff, protocol)),
877 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
878 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
879 offsetof(struct __sk_buff, vlan_present)),
880 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
882 offsetof(struct __sk_buff, vlan_tci)),
883 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
884 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
885 offsetof(struct __sk_buff, napi_id)),
886 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
887 BPF_EXIT_INSN(),
888 },
889 .result = ACCEPT,
890 },
891 {
892 "access skb fields bad1",
893 .insns = {
894 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
895 BPF_EXIT_INSN(),
896 },
897 .errstr = "invalid bpf_context access",
898 .result = REJECT,
899 },
900 {
901 "access skb fields bad2",
902 .insns = {
903 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
904 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
905 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
906 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
907 BPF_LD_MAP_FD(BPF_REG_1, 0),
908 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
909 BPF_FUNC_map_lookup_elem),
910 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
911 BPF_EXIT_INSN(),
912 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
913 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
914 offsetof(struct __sk_buff, pkt_type)),
915 BPF_EXIT_INSN(),
916 },
917 .fixup_map1 = { 4 },
918 .errstr = "different pointers",
919 .errstr_unpriv = "R1 pointer comparison",
920 .result = REJECT,
921 },
922 {
923 "access skb fields bad3",
924 .insns = {
925 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
926 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
927 offsetof(struct __sk_buff, pkt_type)),
928 BPF_EXIT_INSN(),
929 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
932 BPF_LD_MAP_FD(BPF_REG_1, 0),
933 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
934 BPF_FUNC_map_lookup_elem),
935 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
936 BPF_EXIT_INSN(),
937 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
938 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
939 },
940 .fixup_map1 = { 6 },
941 .errstr = "different pointers",
942 .errstr_unpriv = "R1 pointer comparison",
943 .result = REJECT,
944 },
945 {
946 "access skb fields bad4",
947 .insns = {
948 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
949 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
950 offsetof(struct __sk_buff, len)),
951 BPF_MOV64_IMM(BPF_REG_0, 0),
952 BPF_EXIT_INSN(),
953 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
954 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
955 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
956 BPF_LD_MAP_FD(BPF_REG_1, 0),
957 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
958 BPF_FUNC_map_lookup_elem),
959 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
960 BPF_EXIT_INSN(),
961 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
962 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
963 },
964 .fixup_map1 = { 7 },
965 .errstr = "different pointers",
966 .errstr_unpriv = "R1 pointer comparison",
967 .result = REJECT,
968 },
969 {
970 "check skb->mark is not writeable by sockets",
971 .insns = {
972 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
973 offsetof(struct __sk_buff, mark)),
974 BPF_EXIT_INSN(),
975 },
976 .errstr = "invalid bpf_context access",
977 .errstr_unpriv = "R1 leaks addr",
978 .result = REJECT,
979 },
980 {
981 "check skb->tc_index is not writeable by sockets",
982 .insns = {
983 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
984 offsetof(struct __sk_buff, tc_index)),
985 BPF_EXIT_INSN(),
986 },
987 .errstr = "invalid bpf_context access",
988 .errstr_unpriv = "R1 leaks addr",
989 .result = REJECT,
990 },
991 {
992 "check cb access: byte",
993 .insns = {
994 BPF_MOV64_IMM(BPF_REG_0, 0),
995 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
996 offsetof(struct __sk_buff, cb[0])),
997 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
998 offsetof(struct __sk_buff, cb[0]) + 1),
999 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1000 offsetof(struct __sk_buff, cb[0]) + 2),
1001 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1002 offsetof(struct __sk_buff, cb[0]) + 3),
1003 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1004 offsetof(struct __sk_buff, cb[1])),
1005 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1006 offsetof(struct __sk_buff, cb[1]) + 1),
1007 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1008 offsetof(struct __sk_buff, cb[1]) + 2),
1009 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1010 offsetof(struct __sk_buff, cb[1]) + 3),
1011 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1012 offsetof(struct __sk_buff, cb[2])),
1013 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1014 offsetof(struct __sk_buff, cb[2]) + 1),
1015 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1016 offsetof(struct __sk_buff, cb[2]) + 2),
1017 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1018 offsetof(struct __sk_buff, cb[2]) + 3),
1019 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1020 offsetof(struct __sk_buff, cb[3])),
1021 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1022 offsetof(struct __sk_buff, cb[3]) + 1),
1023 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1024 offsetof(struct __sk_buff, cb[3]) + 2),
1025 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1026 offsetof(struct __sk_buff, cb[3]) + 3),
1027 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1028 offsetof(struct __sk_buff, cb[4])),
1029 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1030 offsetof(struct __sk_buff, cb[4]) + 1),
1031 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1032 offsetof(struct __sk_buff, cb[4]) + 2),
1033 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1034 offsetof(struct __sk_buff, cb[4]) + 3),
1035 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1036 offsetof(struct __sk_buff, cb[0])),
1037 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1038 offsetof(struct __sk_buff, cb[0]) + 1),
1039 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1040 offsetof(struct __sk_buff, cb[0]) + 2),
1041 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, cb[0]) + 3),
1043 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1044 offsetof(struct __sk_buff, cb[1])),
1045 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1046 offsetof(struct __sk_buff, cb[1]) + 1),
1047 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1048 offsetof(struct __sk_buff, cb[1]) + 2),
1049 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1050 offsetof(struct __sk_buff, cb[1]) + 3),
1051 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, cb[2])),
1053 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1054 offsetof(struct __sk_buff, cb[2]) + 1),
1055 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1056 offsetof(struct __sk_buff, cb[2]) + 2),
1057 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1058 offsetof(struct __sk_buff, cb[2]) + 3),
1059 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1060 offsetof(struct __sk_buff, cb[3])),
1061 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, cb[3]) + 1),
1063 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1064 offsetof(struct __sk_buff, cb[3]) + 2),
1065 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1066 offsetof(struct __sk_buff, cb[3]) + 3),
1067 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1068 offsetof(struct __sk_buff, cb[4])),
1069 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1070 offsetof(struct __sk_buff, cb[4]) + 1),
1071 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1072 offsetof(struct __sk_buff, cb[4]) + 2),
1073 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1074 offsetof(struct __sk_buff, cb[4]) + 3),
1075 BPF_EXIT_INSN(),
1076 },
1077 .result = ACCEPT,
1078 },
1079 {
1080 "__sk_buff->hash, offset 0, byte store not permitted",
1081 .insns = {
1082 BPF_MOV64_IMM(BPF_REG_0, 0),
1083 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1084 offsetof(struct __sk_buff, hash)),
1085 BPF_EXIT_INSN(),
1086 },
1087 .errstr = "invalid bpf_context access",
1088 .result = REJECT,
1089 },
1090 {
1091 "__sk_buff->tc_index, offset 3, byte store not permitted",
1092 .insns = {
1093 BPF_MOV64_IMM(BPF_REG_0, 0),
1094 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1095 offsetof(struct __sk_buff, tc_index) + 3),
1096 BPF_EXIT_INSN(),
1097 },
1098 .errstr = "invalid bpf_context access",
1099 .result = REJECT,
1100 },
1101 {
1102 "check skb->hash byte load permitted",
1103 .insns = {
1104 BPF_MOV64_IMM(BPF_REG_0, 0),
1105 #if __BYTE_ORDER == __LITTLE_ENDIAN
1106 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1107 offsetof(struct __sk_buff, hash)),
1108 #else
1109 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1110 offsetof(struct __sk_buff, hash) + 3),
1111 #endif
1112 BPF_EXIT_INSN(),
1113 },
1114 .result = ACCEPT,
1115 },
1116 {
1117 "check skb->hash byte load not permitted 1",
1118 .insns = {
1119 BPF_MOV64_IMM(BPF_REG_0, 0),
1120 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1121 offsetof(struct __sk_buff, hash) + 1),
1122 BPF_EXIT_INSN(),
1123 },
1124 .errstr = "invalid bpf_context access",
1125 .result = REJECT,
1126 },
1127 {
1128 "check skb->hash byte load not permitted 2",
1129 .insns = {
1130 BPF_MOV64_IMM(BPF_REG_0, 0),
1131 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1132 offsetof(struct __sk_buff, hash) + 2),
1133 BPF_EXIT_INSN(),
1134 },
1135 .errstr = "invalid bpf_context access",
1136 .result = REJECT,
1137 },
1138 {
1139 "check skb->hash byte load not permitted 3",
1140 .insns = {
1141 BPF_MOV64_IMM(BPF_REG_0, 0),
1142 #if __BYTE_ORDER == __LITTLE_ENDIAN
1143 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1144 offsetof(struct __sk_buff, hash) + 3),
1145 #else
1146 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1147 offsetof(struct __sk_buff, hash)),
1148 #endif
1149 BPF_EXIT_INSN(),
1150 },
1151 .errstr = "invalid bpf_context access",
1152 .result = REJECT,
1153 },
1154 {
1155 "check cb access: byte, wrong type",
1156 .insns = {
1157 BPF_MOV64_IMM(BPF_REG_0, 0),
1158 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1159 offsetof(struct __sk_buff, cb[0])),
1160 BPF_EXIT_INSN(),
1161 },
1162 .errstr = "invalid bpf_context access",
1163 .result = REJECT,
1164 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1165 },
1166 {
1167 "check cb access: half",
1168 .insns = {
1169 BPF_MOV64_IMM(BPF_REG_0, 0),
1170 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1171 offsetof(struct __sk_buff, cb[0])),
1172 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1173 offsetof(struct __sk_buff, cb[0]) + 2),
1174 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1175 offsetof(struct __sk_buff, cb[1])),
1176 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1177 offsetof(struct __sk_buff, cb[1]) + 2),
1178 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1179 offsetof(struct __sk_buff, cb[2])),
1180 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1181 offsetof(struct __sk_buff, cb[2]) + 2),
1182 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1183 offsetof(struct __sk_buff, cb[3])),
1184 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1185 offsetof(struct __sk_buff, cb[3]) + 2),
1186 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1187 offsetof(struct __sk_buff, cb[4])),
1188 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1189 offsetof(struct __sk_buff, cb[4]) + 2),
1190 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1191 offsetof(struct __sk_buff, cb[0])),
1192 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1193 offsetof(struct __sk_buff, cb[0]) + 2),
1194 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1195 offsetof(struct __sk_buff, cb[1])),
1196 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1197 offsetof(struct __sk_buff, cb[1]) + 2),
1198 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1199 offsetof(struct __sk_buff, cb[2])),
1200 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1201 offsetof(struct __sk_buff, cb[2]) + 2),
1202 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1203 offsetof(struct __sk_buff, cb[3])),
1204 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1205 offsetof(struct __sk_buff, cb[3]) + 2),
1206 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1207 offsetof(struct __sk_buff, cb[4])),
1208 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1209 offsetof(struct __sk_buff, cb[4]) + 2),
1210 BPF_EXIT_INSN(),
1211 },
1212 .result = ACCEPT,
1213 },
1214 {
1215 "check cb access: half, unaligned",
1216 .insns = {
1217 BPF_MOV64_IMM(BPF_REG_0, 0),
1218 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1219 offsetof(struct __sk_buff, cb[0]) + 1),
1220 BPF_EXIT_INSN(),
1221 },
1222 .errstr = "misaligned context access",
1223 .result = REJECT,
1224 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1225 },
1226 {
1227 "check __sk_buff->hash, offset 0, half store not permitted",
1228 .insns = {
1229 BPF_MOV64_IMM(BPF_REG_0, 0),
1230 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1231 offsetof(struct __sk_buff, hash)),
1232 BPF_EXIT_INSN(),
1233 },
1234 .errstr = "invalid bpf_context access",
1235 .result = REJECT,
1236 },
1237 {
1238 "check __sk_buff->tc_index, offset 2, half store not permitted",
1239 .insns = {
1240 BPF_MOV64_IMM(BPF_REG_0, 0),
1241 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1242 offsetof(struct __sk_buff, tc_index) + 2),
1243 BPF_EXIT_INSN(),
1244 },
1245 .errstr = "invalid bpf_context access",
1246 .result = REJECT,
1247 },
1248 {
1249 "check skb->hash half load permitted",
1250 .insns = {
1251 BPF_MOV64_IMM(BPF_REG_0, 0),
1252 #if __BYTE_ORDER == __LITTLE_ENDIAN
1253 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1254 offsetof(struct __sk_buff, hash)),
1255 #else
1256 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1257 offsetof(struct __sk_buff, hash) + 2),
1258 #endif
1259 BPF_EXIT_INSN(),
1260 },
1261 .result = ACCEPT,
1262 },
1263 {
1264 "check skb->hash half load not permitted",
1265 .insns = {
1266 BPF_MOV64_IMM(BPF_REG_0, 0),
1267 #if __BYTE_ORDER == __LITTLE_ENDIAN
1268 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1269 offsetof(struct __sk_buff, hash) + 2),
1270 #else
1271 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1272 offsetof(struct __sk_buff, hash)),
1273 #endif
1274 BPF_EXIT_INSN(),
1275 },
1276 .errstr = "invalid bpf_context access",
1277 .result = REJECT,
1278 },
1279 {
1280 "check cb access: half, wrong type",
1281 .insns = {
1282 BPF_MOV64_IMM(BPF_REG_0, 0),
1283 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1284 offsetof(struct __sk_buff, cb[0])),
1285 BPF_EXIT_INSN(),
1286 },
1287 .errstr = "invalid bpf_context access",
1288 .result = REJECT,
1289 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1290 },
1291 {
1292 "check cb access: word",
1293 .insns = {
1294 BPF_MOV64_IMM(BPF_REG_0, 0),
1295 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1296 offsetof(struct __sk_buff, cb[0])),
1297 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1298 offsetof(struct __sk_buff, cb[1])),
1299 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1300 offsetof(struct __sk_buff, cb[2])),
1301 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1302 offsetof(struct __sk_buff, cb[3])),
1303 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1304 offsetof(struct __sk_buff, cb[4])),
1305 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1306 offsetof(struct __sk_buff, cb[0])),
1307 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1308 offsetof(struct __sk_buff, cb[1])),
1309 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1310 offsetof(struct __sk_buff, cb[2])),
1311 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1312 offsetof(struct __sk_buff, cb[3])),
1313 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1314 offsetof(struct __sk_buff, cb[4])),
1315 BPF_EXIT_INSN(),
1316 },
1317 .result = ACCEPT,
1318 },
1319 {
1320 "check cb access: word, unaligned 1",
1321 .insns = {
1322 BPF_MOV64_IMM(BPF_REG_0, 0),
1323 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1324 offsetof(struct __sk_buff, cb[0]) + 2),
1325 BPF_EXIT_INSN(),
1326 },
1327 .errstr = "misaligned context access",
1328 .result = REJECT,
1329 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1330 },
1331 {
1332 "check cb access: word, unaligned 2",
1333 .insns = {
1334 BPF_MOV64_IMM(BPF_REG_0, 0),
1335 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1336 offsetof(struct __sk_buff, cb[4]) + 1),
1337 BPF_EXIT_INSN(),
1338 },
1339 .errstr = "misaligned context access",
1340 .result = REJECT,
1341 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1342 },
1343 {
1344 "check cb access: word, unaligned 3",
1345 .insns = {
1346 BPF_MOV64_IMM(BPF_REG_0, 0),
1347 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1348 offsetof(struct __sk_buff, cb[4]) + 2),
1349 BPF_EXIT_INSN(),
1350 },
1351 .errstr = "misaligned context access",
1352 .result = REJECT,
1353 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1354 },
1355 {
1356 "check cb access: word, unaligned 4",
1357 .insns = {
1358 BPF_MOV64_IMM(BPF_REG_0, 0),
1359 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1360 offsetof(struct __sk_buff, cb[4]) + 3),
1361 BPF_EXIT_INSN(),
1362 },
1363 .errstr = "misaligned context access",
1364 .result = REJECT,
1365 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1366 },
1367 {
1368 "check cb access: double",
1369 .insns = {
1370 BPF_MOV64_IMM(BPF_REG_0, 0),
1371 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1372 offsetof(struct __sk_buff, cb[0])),
1373 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1374 offsetof(struct __sk_buff, cb[2])),
1375 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1376 offsetof(struct __sk_buff, cb[0])),
1377 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1378 offsetof(struct __sk_buff, cb[2])),
1379 BPF_EXIT_INSN(),
1380 },
1381 .result = ACCEPT,
1382 },
1383 {
1384 "check cb access: double, unaligned 1",
1385 .insns = {
1386 BPF_MOV64_IMM(BPF_REG_0, 0),
1387 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1388 offsetof(struct __sk_buff, cb[1])),
1389 BPF_EXIT_INSN(),
1390 },
1391 .errstr = "misaligned context access",
1392 .result = REJECT,
1393 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1394 },
1395 {
1396 "check cb access: double, unaligned 2",
1397 .insns = {
1398 BPF_MOV64_IMM(BPF_REG_0, 0),
1399 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1400 offsetof(struct __sk_buff, cb[3])),
1401 BPF_EXIT_INSN(),
1402 },
1403 .errstr = "misaligned context access",
1404 .result = REJECT,
1405 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1406 },
1407 {
1408 "check cb access: double, oob 1",
1409 .insns = {
1410 BPF_MOV64_IMM(BPF_REG_0, 0),
1411 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1412 offsetof(struct __sk_buff, cb[4])),
1413 BPF_EXIT_INSN(),
1414 },
1415 .errstr = "invalid bpf_context access",
1416 .result = REJECT,
1417 },
1418 {
1419 "check cb access: double, oob 2",
1420 .insns = {
1421 BPF_MOV64_IMM(BPF_REG_0, 0),
1422 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1423 offsetof(struct __sk_buff, cb[4])),
1424 BPF_EXIT_INSN(),
1425 },
1426 .errstr = "invalid bpf_context access",
1427 .result = REJECT,
1428 },
1429 {
1430 "check __sk_buff->ifindex dw store not permitted",
1431 .insns = {
1432 BPF_MOV64_IMM(BPF_REG_0, 0),
1433 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1434 offsetof(struct __sk_buff, ifindex)),
1435 BPF_EXIT_INSN(),
1436 },
1437 .errstr = "invalid bpf_context access",
1438 .result = REJECT,
1439 },
1440 {
1441 "check __sk_buff->ifindex dw load not permitted",
1442 .insns = {
1443 BPF_MOV64_IMM(BPF_REG_0, 0),
1444 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1445 offsetof(struct __sk_buff, ifindex)),
1446 BPF_EXIT_INSN(),
1447 },
1448 .errstr = "invalid bpf_context access",
1449 .result = REJECT,
1450 },
1451 {
1452 "check cb access: double, wrong type",
1453 .insns = {
1454 BPF_MOV64_IMM(BPF_REG_0, 0),
1455 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1456 offsetof(struct __sk_buff, cb[0])),
1457 BPF_EXIT_INSN(),
1458 },
1459 .errstr = "invalid bpf_context access",
1460 .result = REJECT,
1461 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1462 },
1463 {
1464 "check out of range skb->cb access",
1465 .insns = {
1466 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1467 offsetof(struct __sk_buff, cb[0]) + 256),
1468 BPF_EXIT_INSN(),
1469 },
1470 .errstr = "invalid bpf_context access",
1471 .errstr_unpriv = "",
1472 .result = REJECT,
1473 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1474 },
1475 {
1476 "write skb fields from socket prog",
1477 .insns = {
1478 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1479 offsetof(struct __sk_buff, cb[4])),
1480 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1481 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1482 offsetof(struct __sk_buff, mark)),
1483 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1484 offsetof(struct __sk_buff, tc_index)),
1485 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1486 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1487 offsetof(struct __sk_buff, cb[0])),
1488 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1489 offsetof(struct __sk_buff, cb[2])),
1490 BPF_EXIT_INSN(),
1491 },
1492 .result = ACCEPT,
1493 .errstr_unpriv = "R1 leaks addr",
1494 .result_unpriv = REJECT,
1495 },
1496 {
1497 "write skb fields from tc_cls_act prog",
1498 .insns = {
1499 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1500 offsetof(struct __sk_buff, cb[0])),
1501 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1502 offsetof(struct __sk_buff, mark)),
1503 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1504 offsetof(struct __sk_buff, tc_index)),
1505 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1506 offsetof(struct __sk_buff, tc_index)),
1507 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1508 offsetof(struct __sk_buff, cb[3])),
1509 BPF_EXIT_INSN(),
1510 },
1511 .errstr_unpriv = "",
1512 .result_unpriv = REJECT,
1513 .result = ACCEPT,
1514 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1515 },
1516 {
1517 "PTR_TO_STACK store/load",
1518 .insns = {
1519 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1521 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1522 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1523 BPF_EXIT_INSN(),
1524 },
1525 .result = ACCEPT,
1526 },
1527 {
1528 "PTR_TO_STACK store/load - bad alignment on off",
1529 .insns = {
1530 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1532 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1534 BPF_EXIT_INSN(),
1535 },
1536 .result = REJECT,
1537 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1538 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1539 },
1540 {
1541 "PTR_TO_STACK store/load - bad alignment on reg",
1542 .insns = {
1543 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1545 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1546 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1547 BPF_EXIT_INSN(),
1548 },
1549 .result = REJECT,
1550 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1551 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1552 },
1553 {
1554 "PTR_TO_STACK store/load - out of bounds low",
1555 .insns = {
1556 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1558 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1559 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1560 BPF_EXIT_INSN(),
1561 },
1562 .result = REJECT,
1563 .errstr = "invalid stack off=-79992 size=8",
1564 },
1565 {
1566 "PTR_TO_STACK store/load - out of bounds high",
1567 .insns = {
1568 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1570 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1572 BPF_EXIT_INSN(),
1573 },
1574 .result = REJECT,
1575 .errstr = "invalid stack off=0 size=8",
1576 },
1577 {
1578 "unpriv: return pointer",
1579 .insns = {
1580 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1581 BPF_EXIT_INSN(),
1582 },
1583 .result = ACCEPT,
1584 .result_unpriv = REJECT,
1585 .errstr_unpriv = "R0 leaks addr",
1586 },
1587 {
1588 "unpriv: add const to pointer",
1589 .insns = {
1590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1591 BPF_MOV64_IMM(BPF_REG_0, 0),
1592 BPF_EXIT_INSN(),
1593 },
1594 .result = ACCEPT,
1595 },
1596 {
1597 "unpriv: add pointer to pointer",
1598 .insns = {
1599 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1600 BPF_MOV64_IMM(BPF_REG_0, 0),
1601 BPF_EXIT_INSN(),
1602 },
1603 .result = ACCEPT,
1604 .result_unpriv = REJECT,
1605 .errstr_unpriv = "R1 pointer += pointer",
1606 },
1607 {
1608 "unpriv: neg pointer",
1609 .insns = {
1610 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1611 BPF_MOV64_IMM(BPF_REG_0, 0),
1612 BPF_EXIT_INSN(),
1613 },
1614 .result = ACCEPT,
1615 .result_unpriv = REJECT,
1616 .errstr_unpriv = "R1 pointer arithmetic",
1617 },
1618 {
1619 "unpriv: cmp pointer with const",
1620 .insns = {
1621 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1622 BPF_MOV64_IMM(BPF_REG_0, 0),
1623 BPF_EXIT_INSN(),
1624 },
1625 .result = ACCEPT,
1626 .result_unpriv = REJECT,
1627 .errstr_unpriv = "R1 pointer comparison",
1628 },
1629 {
1630 "unpriv: cmp pointer with pointer",
1631 .insns = {
1632 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1633 BPF_MOV64_IMM(BPF_REG_0, 0),
1634 BPF_EXIT_INSN(),
1635 },
1636 .result = ACCEPT,
1637 .result_unpriv = REJECT,
1638 .errstr_unpriv = "R10 pointer comparison",
1639 },
1640 {
1641 "unpriv: check that printk is disallowed",
1642 .insns = {
1643 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1644 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1646 BPF_MOV64_IMM(BPF_REG_2, 8),
1647 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1648 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1649 BPF_FUNC_trace_printk),
1650 BPF_MOV64_IMM(BPF_REG_0, 0),
1651 BPF_EXIT_INSN(),
1652 },
1653 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1654 .result_unpriv = REJECT,
1655 .result = ACCEPT,
1656 },
1657 {
1658 "unpriv: pass pointer to helper function",
1659 .insns = {
1660 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1663 BPF_LD_MAP_FD(BPF_REG_1, 0),
1664 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1665 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1667 BPF_FUNC_map_update_elem),
1668 BPF_MOV64_IMM(BPF_REG_0, 0),
1669 BPF_EXIT_INSN(),
1670 },
1671 .fixup_map1 = { 3 },
1672 .errstr_unpriv = "R4 leaks addr",
1673 .result_unpriv = REJECT,
1674 .result = ACCEPT,
1675 },
1676 {
1677 "unpriv: indirectly pass pointer on stack to helper function",
1678 .insns = {
1679 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1680 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1681 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1682 BPF_LD_MAP_FD(BPF_REG_1, 0),
1683 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1684 BPF_FUNC_map_lookup_elem),
1685 BPF_MOV64_IMM(BPF_REG_0, 0),
1686 BPF_EXIT_INSN(),
1687 },
1688 .fixup_map1 = { 3 },
1689 .errstr = "invalid indirect read from stack off -8+0 size 8",
1690 .result = REJECT,
1691 },
1692 {
1693 "unpriv: mangle pointer on stack 1",
1694 .insns = {
1695 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1696 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1697 BPF_MOV64_IMM(BPF_REG_0, 0),
1698 BPF_EXIT_INSN(),
1699 },
1700 .errstr_unpriv = "attempt to corrupt spilled",
1701 .result_unpriv = REJECT,
1702 .result = ACCEPT,
1703 },
1704 {
1705 "unpriv: mangle pointer on stack 2",
1706 .insns = {
1707 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1708 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1709 BPF_MOV64_IMM(BPF_REG_0, 0),
1710 BPF_EXIT_INSN(),
1711 },
1712 .errstr_unpriv = "attempt to corrupt spilled",
1713 .result_unpriv = REJECT,
1714 .result = ACCEPT,
1715 },
1716 {
1717 "unpriv: read pointer from stack in small chunks",
1718 .insns = {
1719 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1720 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1721 BPF_MOV64_IMM(BPF_REG_0, 0),
1722 BPF_EXIT_INSN(),
1723 },
1724 .errstr = "invalid size",
1725 .result = REJECT,
1726 },
1727 {
1728 "unpriv: write pointer into ctx",
1729 .insns = {
1730 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1731 BPF_MOV64_IMM(BPF_REG_0, 0),
1732 BPF_EXIT_INSN(),
1733 },
1734 .errstr_unpriv = "R1 leaks addr",
1735 .result_unpriv = REJECT,
1736 .errstr = "invalid bpf_context access",
1737 .result = REJECT,
1738 },
1739 {
1740 "unpriv: spill/fill of ctx",
1741 .insns = {
1742 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1744 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1745 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1746 BPF_MOV64_IMM(BPF_REG_0, 0),
1747 BPF_EXIT_INSN(),
1748 },
1749 .result = ACCEPT,
1750 },
1751 {
1752 "unpriv: spill/fill of ctx 2",
1753 .insns = {
1754 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1756 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1757 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1759 BPF_FUNC_get_hash_recalc),
1760 BPF_EXIT_INSN(),
1761 },
1762 .result = ACCEPT,
1763 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1764 },
1765 {
1766 "unpriv: spill/fill of ctx 3",
1767 .insns = {
1768 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1770 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1771 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1772 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1773 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1774 BPF_FUNC_get_hash_recalc),
1775 BPF_EXIT_INSN(),
1776 },
1777 .result = REJECT,
1778 .errstr = "R1 type=fp expected=ctx",
1779 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1780 },
1781 {
1782 "unpriv: spill/fill of ctx 4",
1783 .insns = {
1784 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1785 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1786 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1787 BPF_MOV64_IMM(BPF_REG_0, 1),
1788 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
1789 BPF_REG_0, -8, 0),
1790 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1791 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1792 BPF_FUNC_get_hash_recalc),
1793 BPF_EXIT_INSN(),
1794 },
1795 .result = REJECT,
1796 .errstr = "R1 type=inv expected=ctx",
1797 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1798 },
1799 {
1800 "unpriv: spill/fill of different pointers stx",
1801 .insns = {
1802 BPF_MOV64_IMM(BPF_REG_3, 42),
1803 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1806 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1807 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
1808 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1809 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1810 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1811 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1812 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
1813 offsetof(struct __sk_buff, mark)),
1814 BPF_MOV64_IMM(BPF_REG_0, 0),
1815 BPF_EXIT_INSN(),
1816 },
1817 .result = REJECT,
1818 .errstr = "same insn cannot be used with different pointers",
1819 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1820 },
1821 {
1822 "unpriv: spill/fill of different pointers ldx",
1823 .insns = {
1824 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1826 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
1827 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
1829 -(__s32)offsetof(struct bpf_perf_event_data,
1830 sample_period) - 8),
1831 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
1832 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
1833 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
1834 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
1835 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
1836 offsetof(struct bpf_perf_event_data,
1837 sample_period)),
1838 BPF_MOV64_IMM(BPF_REG_0, 0),
1839 BPF_EXIT_INSN(),
1840 },
1841 .result = REJECT,
1842 .errstr = "same insn cannot be used with different pointers",
1843 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
1844 },
1845 {
1846 "unpriv: write pointer into map elem value",
1847 .insns = {
1848 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1851 BPF_LD_MAP_FD(BPF_REG_1, 0),
1852 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1853 BPF_FUNC_map_lookup_elem),
1854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
1855 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
1856 BPF_EXIT_INSN(),
1857 },
1858 .fixup_map1 = { 3 },
1859 .errstr_unpriv = "R0 leaks addr",
1860 .result_unpriv = REJECT,
1861 .result = ACCEPT,
1862 },
1863 {
1864 "unpriv: partial copy of pointer",
1865 .insns = {
1866 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
1867 BPF_MOV64_IMM(BPF_REG_0, 0),
1868 BPF_EXIT_INSN(),
1869 },
1870 .errstr_unpriv = "R10 partial copy",
1871 .result_unpriv = REJECT,
1872 .result = ACCEPT,
1873 },
1874 {
1875 "unpriv: pass pointer to tail_call",
1876 .insns = {
1877 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1878 BPF_LD_MAP_FD(BPF_REG_2, 0),
1879 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1880 BPF_FUNC_tail_call),
1881 BPF_MOV64_IMM(BPF_REG_0, 0),
1882 BPF_EXIT_INSN(),
1883 },
1884 .fixup_prog = { 1 },
1885 .errstr_unpriv = "R3 leaks addr into helper",
1886 .result_unpriv = REJECT,
1887 .result = ACCEPT,
1888 },
1889 {
1890 "unpriv: cmp map pointer with zero",
1891 .insns = {
1892 BPF_MOV64_IMM(BPF_REG_1, 0),
1893 BPF_LD_MAP_FD(BPF_REG_1, 0),
1894 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1895 BPF_MOV64_IMM(BPF_REG_0, 0),
1896 BPF_EXIT_INSN(),
1897 },
1898 .fixup_map1 = { 1 },
1899 .errstr_unpriv = "R1 pointer comparison",
1900 .result_unpriv = REJECT,
1901 .result = ACCEPT,
1902 },
1903 {
1904 "unpriv: write into frame pointer",
1905 .insns = {
1906 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
1907 BPF_MOV64_IMM(BPF_REG_0, 0),
1908 BPF_EXIT_INSN(),
1909 },
1910 .errstr = "frame pointer is read only",
1911 .result = REJECT,
1912 },
1913 {
1914 "unpriv: spill/fill frame pointer",
1915 .insns = {
1916 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1918 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
1919 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
1920 BPF_MOV64_IMM(BPF_REG_0, 0),
1921 BPF_EXIT_INSN(),
1922 },
1923 .errstr = "frame pointer is read only",
1924 .result = REJECT,
1925 },
1926 {
1927 "unpriv: cmp of frame pointer",
1928 .insns = {
1929 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
1930 BPF_MOV64_IMM(BPF_REG_0, 0),
1931 BPF_EXIT_INSN(),
1932 },
1933 .errstr_unpriv = "R10 pointer comparison",
1934 .result_unpriv = REJECT,
1935 .result = ACCEPT,
1936 },
1937 {
1938 "unpriv: adding of fp",
1939 .insns = {
1940 BPF_MOV64_IMM(BPF_REG_0, 0),
1941 BPF_MOV64_IMM(BPF_REG_1, 0),
1942 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1943 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
1944 BPF_EXIT_INSN(),
1945 },
1946 .result = ACCEPT,
1947 },
1948 {
1949 "unpriv: cmp of stack pointer",
1950 .insns = {
1951 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1953 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
1954 BPF_MOV64_IMM(BPF_REG_0, 0),
1955 BPF_EXIT_INSN(),
1956 },
1957 .errstr_unpriv = "R2 pointer comparison",
1958 .result_unpriv = REJECT,
1959 .result = ACCEPT,
1960 },
1961 {
1962 "stack pointer arithmetic",
1963 .insns = {
1964 BPF_MOV64_IMM(BPF_REG_1, 4),
1965 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
1966 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
1967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
1969 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1970 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
1971 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1972 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
1973 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
1974 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
1975 BPF_MOV64_IMM(BPF_REG_0, 0),
1976 BPF_EXIT_INSN(),
1977 },
1978 .result = ACCEPT,
1979 },
1980 {
1981 "raw_stack: no skb_load_bytes",
1982 .insns = {
1983 BPF_MOV64_IMM(BPF_REG_2, 4),
1984 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
1985 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
1986 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
1987 BPF_MOV64_IMM(BPF_REG_4, 8),
1988 /* Call to skb_load_bytes() omitted. */
1989 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
1990 BPF_EXIT_INSN(),
1991 },
1992 .result = REJECT,
1993 .errstr = "invalid read from stack off -8+0 size 8",
1994 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1995 },
1996 {
1997 "raw_stack: skb_load_bytes, negative len",
1998 .insns = {
1999 BPF_MOV64_IMM(BPF_REG_2, 4),
2000 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2001 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2002 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2003 BPF_MOV64_IMM(BPF_REG_4, -8),
2004 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2005 BPF_FUNC_skb_load_bytes),
2006 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2007 BPF_EXIT_INSN(),
2008 },
2009 .result = REJECT,
2010 .errstr = "R4 min value is negative",
2011 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2012 },
2013 {
2014 "raw_stack: skb_load_bytes, negative len 2",
2015 .insns = {
2016 BPF_MOV64_IMM(BPF_REG_2, 4),
2017 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2019 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2020 BPF_MOV64_IMM(BPF_REG_4, ~0),
2021 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2022 BPF_FUNC_skb_load_bytes),
2023 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2024 BPF_EXIT_INSN(),
2025 },
2026 .result = REJECT,
2027 .errstr = "R4 min value is negative",
2028 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2029 },
2030 {
2031 "raw_stack: skb_load_bytes, zero len",
2032 .insns = {
2033 BPF_MOV64_IMM(BPF_REG_2, 4),
2034 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2035 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2036 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2037 BPF_MOV64_IMM(BPF_REG_4, 0),
2038 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2039 BPF_FUNC_skb_load_bytes),
2040 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2041 BPF_EXIT_INSN(),
2042 },
2043 .result = REJECT,
2044 .errstr = "invalid stack type R3",
2045 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2046 },
2047 {
2048 "raw_stack: skb_load_bytes, no init",
2049 .insns = {
2050 BPF_MOV64_IMM(BPF_REG_2, 4),
2051 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2052 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2053 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2054 BPF_MOV64_IMM(BPF_REG_4, 8),
2055 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2056 BPF_FUNC_skb_load_bytes),
2057 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2058 BPF_EXIT_INSN(),
2059 },
2060 .result = ACCEPT,
2061 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2062 },
2063 {
2064 "raw_stack: skb_load_bytes, init",
2065 .insns = {
2066 BPF_MOV64_IMM(BPF_REG_2, 4),
2067 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2068 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2069 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2070 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2071 BPF_MOV64_IMM(BPF_REG_4, 8),
2072 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2073 BPF_FUNC_skb_load_bytes),
2074 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2075 BPF_EXIT_INSN(),
2076 },
2077 .result = ACCEPT,
2078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2079 },
2080 {
2081 "raw_stack: skb_load_bytes, spilled regs around bounds",
2082 .insns = {
2083 BPF_MOV64_IMM(BPF_REG_2, 4),
2084 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2086 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2087 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2088 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2089 BPF_MOV64_IMM(BPF_REG_4, 8),
2090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2091 BPF_FUNC_skb_load_bytes),
2092 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2093 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2094 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2095 offsetof(struct __sk_buff, mark)),
2096 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2097 offsetof(struct __sk_buff, priority)),
2098 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2099 BPF_EXIT_INSN(),
2100 },
2101 .result = ACCEPT,
2102 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2103 },
2104 {
2105 "raw_stack: skb_load_bytes, spilled regs corruption",
2106 .insns = {
2107 BPF_MOV64_IMM(BPF_REG_2, 4),
2108 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2110 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2111 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2112 BPF_MOV64_IMM(BPF_REG_4, 8),
2113 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2114 BPF_FUNC_skb_load_bytes),
2115 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2116 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2117 offsetof(struct __sk_buff, mark)),
2118 BPF_EXIT_INSN(),
2119 },
2120 .result = REJECT,
2121 .errstr = "R0 invalid mem access 'inv'",
2122 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2123 },
2124 {
2125 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2126 .insns = {
2127 BPF_MOV64_IMM(BPF_REG_2, 4),
2128 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2129 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2130 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2131 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2132 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2133 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2134 BPF_MOV64_IMM(BPF_REG_4, 8),
2135 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2136 BPF_FUNC_skb_load_bytes),
2137 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2138 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2139 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2140 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2141 offsetof(struct __sk_buff, mark)),
2142 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2143 offsetof(struct __sk_buff, priority)),
2144 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2145 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2146 offsetof(struct __sk_buff, pkt_type)),
2147 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2148 BPF_EXIT_INSN(),
2149 },
2150 .result = REJECT,
2151 .errstr = "R3 invalid mem access 'inv'",
2152 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2153 },
2154 {
2155 "raw_stack: skb_load_bytes, spilled regs + data",
2156 .insns = {
2157 BPF_MOV64_IMM(BPF_REG_2, 4),
2158 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2160 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2161 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2162 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2163 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2164 BPF_MOV64_IMM(BPF_REG_4, 8),
2165 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2166 BPF_FUNC_skb_load_bytes),
2167 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2168 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2169 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2170 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2171 offsetof(struct __sk_buff, mark)),
2172 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2173 offsetof(struct __sk_buff, priority)),
2174 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2175 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2176 BPF_EXIT_INSN(),
2177 },
2178 .result = ACCEPT,
2179 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2180 },
2181 {
2182 "raw_stack: skb_load_bytes, invalid access 1",
2183 .insns = {
2184 BPF_MOV64_IMM(BPF_REG_2, 4),
2185 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2186 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2187 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2188 BPF_MOV64_IMM(BPF_REG_4, 8),
2189 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2190 BPF_FUNC_skb_load_bytes),
2191 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2192 BPF_EXIT_INSN(),
2193 },
2194 .result = REJECT,
2195 .errstr = "invalid stack type R3 off=-513 access_size=8",
2196 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2197 },
2198 {
2199 "raw_stack: skb_load_bytes, invalid access 2",
2200 .insns = {
2201 BPF_MOV64_IMM(BPF_REG_2, 4),
2202 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2203 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2204 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2205 BPF_MOV64_IMM(BPF_REG_4, 8),
2206 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2207 BPF_FUNC_skb_load_bytes),
2208 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2209 BPF_EXIT_INSN(),
2210 },
2211 .result = REJECT,
2212 .errstr = "invalid stack type R3 off=-1 access_size=8",
2213 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2214 },
2215 {
2216 "raw_stack: skb_load_bytes, invalid access 3",
2217 .insns = {
2218 BPF_MOV64_IMM(BPF_REG_2, 4),
2219 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2221 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2222 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2224 BPF_FUNC_skb_load_bytes),
2225 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2226 BPF_EXIT_INSN(),
2227 },
2228 .result = REJECT,
2229 .errstr = "R4 min value is negative",
2230 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2231 },
2232 {
2233 "raw_stack: skb_load_bytes, invalid access 4",
2234 .insns = {
2235 BPF_MOV64_IMM(BPF_REG_2, 4),
2236 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2238 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2239 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2241 BPF_FUNC_skb_load_bytes),
2242 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2243 BPF_EXIT_INSN(),
2244 },
2245 .result = REJECT,
2246 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2247 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2248 },
2249 {
2250 "raw_stack: skb_load_bytes, invalid access 5",
2251 .insns = {
2252 BPF_MOV64_IMM(BPF_REG_2, 4),
2253 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2255 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2256 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2257 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2258 BPF_FUNC_skb_load_bytes),
2259 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2260 BPF_EXIT_INSN(),
2261 },
2262 .result = REJECT,
2263 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2264 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2265 },
2266 {
2267 "raw_stack: skb_load_bytes, invalid access 6",
2268 .insns = {
2269 BPF_MOV64_IMM(BPF_REG_2, 4),
2270 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2272 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2273 BPF_MOV64_IMM(BPF_REG_4, 0),
2274 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2275 BPF_FUNC_skb_load_bytes),
2276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2277 BPF_EXIT_INSN(),
2278 },
2279 .result = REJECT,
2280 .errstr = "invalid stack type R3 off=-512 access_size=0",
2281 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2282 },
2283 {
2284 "raw_stack: skb_load_bytes, large access",
2285 .insns = {
2286 BPF_MOV64_IMM(BPF_REG_2, 4),
2287 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2289 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2290 BPF_MOV64_IMM(BPF_REG_4, 512),
2291 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2292 BPF_FUNC_skb_load_bytes),
2293 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2294 BPF_EXIT_INSN(),
2295 },
2296 .result = ACCEPT,
2297 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2298 },
2299 {
2300 "direct packet access: test1",
2301 .insns = {
2302 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2303 offsetof(struct __sk_buff, data)),
2304 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2305 offsetof(struct __sk_buff, data_end)),
2306 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2308 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2309 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2310 BPF_MOV64_IMM(BPF_REG_0, 0),
2311 BPF_EXIT_INSN(),
2312 },
2313 .result = ACCEPT,
2314 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2315 },
2316 {
2317 "direct packet access: test2",
2318 .insns = {
2319 BPF_MOV64_IMM(BPF_REG_0, 1),
2320 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2321 offsetof(struct __sk_buff, data_end)),
2322 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2323 offsetof(struct __sk_buff, data)),
2324 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2326 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2327 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2328 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2329 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2330 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2331 offsetof(struct __sk_buff, data)),
2332 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2333 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2334 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2335 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2336 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2337 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2338 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2339 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2340 offsetof(struct __sk_buff, data_end)),
2341 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2342 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2343 BPF_MOV64_IMM(BPF_REG_0, 0),
2344 BPF_EXIT_INSN(),
2345 },
2346 .result = ACCEPT,
2347 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2348 },
2349 {
2350 "direct packet access: test3",
2351 .insns = {
2352 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2353 offsetof(struct __sk_buff, data)),
2354 BPF_MOV64_IMM(BPF_REG_0, 0),
2355 BPF_EXIT_INSN(),
2356 },
2357 .errstr = "invalid bpf_context access off=76",
2358 .result = REJECT,
2359 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2360 },
2361 {
2362 "direct packet access: test4 (write)",
2363 .insns = {
2364 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2365 offsetof(struct __sk_buff, data)),
2366 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2367 offsetof(struct __sk_buff, data_end)),
2368 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2369 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2370 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2371 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2372 BPF_MOV64_IMM(BPF_REG_0, 0),
2373 BPF_EXIT_INSN(),
2374 },
2375 .result = ACCEPT,
2376 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2377 },
2378 {
2379 "direct packet access: test5 (pkt_end >= reg, good access)",
2380 .insns = {
2381 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2382 offsetof(struct __sk_buff, data)),
2383 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2384 offsetof(struct __sk_buff, data_end)),
2385 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2386 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2387 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2388 BPF_MOV64_IMM(BPF_REG_0, 1),
2389 BPF_EXIT_INSN(),
2390 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2391 BPF_MOV64_IMM(BPF_REG_0, 0),
2392 BPF_EXIT_INSN(),
2393 },
2394 .result = ACCEPT,
2395 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2396 },
2397 {
2398 "direct packet access: test6 (pkt_end >= reg, bad access)",
2399 .insns = {
2400 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2401 offsetof(struct __sk_buff, data)),
2402 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2403 offsetof(struct __sk_buff, data_end)),
2404 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2405 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2406 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2407 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2408 BPF_MOV64_IMM(BPF_REG_0, 1),
2409 BPF_EXIT_INSN(),
2410 BPF_MOV64_IMM(BPF_REG_0, 0),
2411 BPF_EXIT_INSN(),
2412 },
2413 .errstr = "invalid access to packet",
2414 .result = REJECT,
2415 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2416 },
2417 {
2418 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2419 .insns = {
2420 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2421 offsetof(struct __sk_buff, data)),
2422 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2423 offsetof(struct __sk_buff, data_end)),
2424 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2426 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2427 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2428 BPF_MOV64_IMM(BPF_REG_0, 1),
2429 BPF_EXIT_INSN(),
2430 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2431 BPF_MOV64_IMM(BPF_REG_0, 0),
2432 BPF_EXIT_INSN(),
2433 },
2434 .errstr = "invalid access to packet",
2435 .result = REJECT,
2436 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2437 },
2438 {
2439 "direct packet access: test8 (double test, variant 1)",
2440 .insns = {
2441 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2442 offsetof(struct __sk_buff, data)),
2443 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2444 offsetof(struct __sk_buff, data_end)),
2445 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2446 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2447 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2448 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2449 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2450 BPF_MOV64_IMM(BPF_REG_0, 1),
2451 BPF_EXIT_INSN(),
2452 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2453 BPF_MOV64_IMM(BPF_REG_0, 0),
2454 BPF_EXIT_INSN(),
2455 },
2456 .result = ACCEPT,
2457 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2458 },
2459 {
2460 "direct packet access: test9 (double test, variant 2)",
2461 .insns = {
2462 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2463 offsetof(struct __sk_buff, data)),
2464 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2465 offsetof(struct __sk_buff, data_end)),
2466 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2468 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2469 BPF_MOV64_IMM(BPF_REG_0, 1),
2470 BPF_EXIT_INSN(),
2471 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2472 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2473 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2474 BPF_MOV64_IMM(BPF_REG_0, 0),
2475 BPF_EXIT_INSN(),
2476 },
2477 .result = ACCEPT,
2478 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2479 },
2480 {
2481 "direct packet access: test10 (write invalid)",
2482 .insns = {
2483 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2484 offsetof(struct __sk_buff, data)),
2485 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2486 offsetof(struct __sk_buff, data_end)),
2487 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2488 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2489 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2490 BPF_MOV64_IMM(BPF_REG_0, 0),
2491 BPF_EXIT_INSN(),
2492 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2493 BPF_MOV64_IMM(BPF_REG_0, 0),
2494 BPF_EXIT_INSN(),
2495 },
2496 .errstr = "invalid access to packet",
2497 .result = REJECT,
2498 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2499 },
2500 {
2501 "direct packet access: test11 (shift, good access)",
2502 .insns = {
2503 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2504 offsetof(struct __sk_buff, data)),
2505 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2506 offsetof(struct __sk_buff, data_end)),
2507 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2508 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2509 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2510 BPF_MOV64_IMM(BPF_REG_3, 144),
2511 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2513 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2514 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2515 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2516 BPF_MOV64_IMM(BPF_REG_0, 1),
2517 BPF_EXIT_INSN(),
2518 BPF_MOV64_IMM(BPF_REG_0, 0),
2519 BPF_EXIT_INSN(),
2520 },
2521 .result = ACCEPT,
2522 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2523 },
2524 {
2525 "direct packet access: test12 (and, good access)",
2526 .insns = {
2527 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2528 offsetof(struct __sk_buff, data)),
2529 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2530 offsetof(struct __sk_buff, data_end)),
2531 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2533 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2534 BPF_MOV64_IMM(BPF_REG_3, 144),
2535 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2536 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2537 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2538 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2539 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2540 BPF_MOV64_IMM(BPF_REG_0, 1),
2541 BPF_EXIT_INSN(),
2542 BPF_MOV64_IMM(BPF_REG_0, 0),
2543 BPF_EXIT_INSN(),
2544 },
2545 .result = ACCEPT,
2546 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2547 },
2548 {
2549 "direct packet access: test13 (branches, good access)",
2550 .insns = {
2551 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2552 offsetof(struct __sk_buff, data)),
2553 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2554 offsetof(struct __sk_buff, data_end)),
2555 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2557 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2558 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2559 offsetof(struct __sk_buff, mark)),
2560 BPF_MOV64_IMM(BPF_REG_4, 1),
2561 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2562 BPF_MOV64_IMM(BPF_REG_3, 14),
2563 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2564 BPF_MOV64_IMM(BPF_REG_3, 24),
2565 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2567 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2568 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2569 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2570 BPF_MOV64_IMM(BPF_REG_0, 1),
2571 BPF_EXIT_INSN(),
2572 BPF_MOV64_IMM(BPF_REG_0, 0),
2573 BPF_EXIT_INSN(),
2574 },
2575 .result = ACCEPT,
2576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2577 },
2578 {
2579 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2580 .insns = {
2581 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2582 offsetof(struct __sk_buff, data)),
2583 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2584 offsetof(struct __sk_buff, data_end)),
2585 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2587 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2588 BPF_MOV64_IMM(BPF_REG_5, 12),
2589 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2590 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2591 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2592 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2593 BPF_MOV64_IMM(BPF_REG_0, 1),
2594 BPF_EXIT_INSN(),
2595 BPF_MOV64_IMM(BPF_REG_0, 0),
2596 BPF_EXIT_INSN(),
2597 },
2598 .result = ACCEPT,
2599 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2600 },
2601 {
2602 "direct packet access: test15 (spill with xadd)",
2603 .insns = {
2604 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2605 offsetof(struct __sk_buff, data)),
2606 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2607 offsetof(struct __sk_buff, data_end)),
2608 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2609 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2610 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2611 BPF_MOV64_IMM(BPF_REG_5, 4096),
2612 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2613 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2614 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2615 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2616 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2617 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2618 BPF_MOV64_IMM(BPF_REG_0, 0),
2619 BPF_EXIT_INSN(),
2620 },
2621 .errstr = "R2 invalid mem access 'inv'",
2622 .result = REJECT,
2623 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2624 },
2625 {
2626 "direct packet access: test16 (arith on data_end)",
2627 .insns = {
2628 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2629 offsetof(struct __sk_buff, data)),
2630 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2631 offsetof(struct __sk_buff, data_end)),
2632 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2633 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2634 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2635 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2636 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2637 BPF_MOV64_IMM(BPF_REG_0, 0),
2638 BPF_EXIT_INSN(),
2639 },
2640 .errstr = "invalid access to packet",
2641 .result = REJECT,
2642 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2643 },
2644 {
2645 "direct packet access: test17 (pruning, alignment)",
2646 .insns = {
2647 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2648 offsetof(struct __sk_buff, data)),
2649 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2650 offsetof(struct __sk_buff, data_end)),
2651 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2652 offsetof(struct __sk_buff, mark)),
2653 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2654 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2655 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2656 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2657 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2658 BPF_MOV64_IMM(BPF_REG_0, 0),
2659 BPF_EXIT_INSN(),
2660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2661 BPF_JMP_A(-6),
2662 },
2663 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2664 .result = REJECT,
2665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2666 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2667 },
2668 {
2669 "direct packet access: test18 (imm += pkt_ptr, 1)",
2670 .insns = {
2671 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2672 offsetof(struct __sk_buff, data)),
2673 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2674 offsetof(struct __sk_buff, data_end)),
2675 BPF_MOV64_IMM(BPF_REG_0, 8),
2676 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2677 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2678 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2679 BPF_MOV64_IMM(BPF_REG_0, 0),
2680 BPF_EXIT_INSN(),
2681 },
2682 .result = ACCEPT,
2683 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2684 },
2685 {
2686 "direct packet access: test19 (imm += pkt_ptr, 2)",
2687 .insns = {
2688 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2689 offsetof(struct __sk_buff, data)),
2690 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2691 offsetof(struct __sk_buff, data_end)),
2692 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2693 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2694 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2695 BPF_MOV64_IMM(BPF_REG_4, 4),
2696 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2697 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2698 BPF_MOV64_IMM(BPF_REG_0, 0),
2699 BPF_EXIT_INSN(),
2700 },
2701 .result = ACCEPT,
2702 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2703 },
2704 {
2705 "direct packet access: test20 (x += pkt_ptr, 1)",
2706 .insns = {
2707 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2708 offsetof(struct __sk_buff, data)),
2709 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2710 offsetof(struct __sk_buff, data_end)),
2711 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2712 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2713 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2714 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
2715 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2716 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2717 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2718 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2719 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2720 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2721 BPF_MOV64_IMM(BPF_REG_0, 0),
2722 BPF_EXIT_INSN(),
2723 },
2724 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2725 .result = ACCEPT,
2726 },
2727 {
2728 "direct packet access: test21 (x += pkt_ptr, 2)",
2729 .insns = {
2730 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2731 offsetof(struct __sk_buff, data)),
2732 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2733 offsetof(struct __sk_buff, data_end)),
2734 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2736 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2737 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2738 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2739 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2740 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
2741 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2742 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2744 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2745 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2746 BPF_MOV64_IMM(BPF_REG_0, 0),
2747 BPF_EXIT_INSN(),
2748 },
2749 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2750 .result = ACCEPT,
2751 },
2752 {
2753 "direct packet access: test22 (x += pkt_ptr, 3)",
2754 .insns = {
2755 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2756 offsetof(struct __sk_buff, data)),
2757 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2758 offsetof(struct __sk_buff, data_end)),
2759 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2760 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2761 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
2762 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
2763 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
2764 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
2765 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
2766 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2767 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2768 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2769 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
2770 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2771 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
2772 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
2773 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2774 BPF_MOV64_IMM(BPF_REG_2, 1),
2775 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
2776 BPF_MOV64_IMM(BPF_REG_0, 0),
2777 BPF_EXIT_INSN(),
2778 },
2779 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2780 .result = ACCEPT,
2781 },
2782 {
2783 "direct packet access: test23 (x += pkt_ptr, 4)",
2784 .insns = {
2785 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2786 offsetof(struct __sk_buff, data)),
2787 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2788 offsetof(struct __sk_buff, data_end)),
2789 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2790 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2791 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2792 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
2793 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2794 BPF_MOV64_IMM(BPF_REG_0, 31),
2795 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2796 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2797 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2798 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
2799 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2800 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2801 BPF_MOV64_IMM(BPF_REG_0, 0),
2802 BPF_EXIT_INSN(),
2803 },
2804 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2805 .result = REJECT,
2806 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
2807 },
2808 {
2809 "direct packet access: test24 (x += pkt_ptr, 5)",
2810 .insns = {
2811 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2812 offsetof(struct __sk_buff, data)),
2813 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2814 offsetof(struct __sk_buff, data_end)),
2815 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2816 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2817 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2818 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
2819 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2820 BPF_MOV64_IMM(BPF_REG_0, 64),
2821 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
2822 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2823 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
2824 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
2825 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2826 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
2827 BPF_MOV64_IMM(BPF_REG_0, 0),
2828 BPF_EXIT_INSN(),
2829 },
2830 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2831 .result = ACCEPT,
2832 },
2833 {
2834 "direct packet access: test25 (marking on <, good access)",
2835 .insns = {
2836 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2837 offsetof(struct __sk_buff, data)),
2838 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2839 offsetof(struct __sk_buff, data_end)),
2840 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2842 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
2843 BPF_MOV64_IMM(BPF_REG_0, 0),
2844 BPF_EXIT_INSN(),
2845 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2846 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
2847 },
2848 .result = ACCEPT,
2849 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2850 },
2851 {
2852 "direct packet access: test26 (marking on <, bad access)",
2853 .insns = {
2854 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2855 offsetof(struct __sk_buff, data)),
2856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2857 offsetof(struct __sk_buff, data_end)),
2858 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2859 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2860 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
2861 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2862 BPF_MOV64_IMM(BPF_REG_0, 0),
2863 BPF_EXIT_INSN(),
2864 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
2865 },
2866 .result = REJECT,
2867 .errstr = "invalid access to packet",
2868 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2869 },
2870 {
2871 "direct packet access: test27 (marking on <=, good access)",
2872 .insns = {
2873 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2874 offsetof(struct __sk_buff, data)),
2875 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2876 offsetof(struct __sk_buff, data_end)),
2877 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2878 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2879 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
2880 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2881 BPF_MOV64_IMM(BPF_REG_0, 1),
2882 BPF_EXIT_INSN(),
2883 },
2884 .result = ACCEPT,
2885 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2886 },
2887 {
2888 "direct packet access: test28 (marking on <=, bad access)",
2889 .insns = {
2890 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2891 offsetof(struct __sk_buff, data)),
2892 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2893 offsetof(struct __sk_buff, data_end)),
2894 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2895 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2896 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
2897 BPF_MOV64_IMM(BPF_REG_0, 1),
2898 BPF_EXIT_INSN(),
2899 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2900 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
2901 },
2902 .result = REJECT,
2903 .errstr = "invalid access to packet",
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 },
2906 {
2907 "helper access to packet: test1, valid packet_ptr range",
2908 .insns = {
2909 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2910 offsetof(struct xdp_md, data)),
2911 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2912 offsetof(struct xdp_md, data_end)),
2913 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2915 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
2916 BPF_LD_MAP_FD(BPF_REG_1, 0),
2917 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
2918 BPF_MOV64_IMM(BPF_REG_4, 0),
2919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2920 BPF_FUNC_map_update_elem),
2921 BPF_MOV64_IMM(BPF_REG_0, 0),
2922 BPF_EXIT_INSN(),
2923 },
2924 .fixup_map1 = { 5 },
2925 .result_unpriv = ACCEPT,
2926 .result = ACCEPT,
2927 .prog_type = BPF_PROG_TYPE_XDP,
2928 },
2929 {
2930 "helper access to packet: test2, unchecked packet_ptr",
2931 .insns = {
2932 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2933 offsetof(struct xdp_md, data)),
2934 BPF_LD_MAP_FD(BPF_REG_1, 0),
2935 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2936 BPF_FUNC_map_lookup_elem),
2937 BPF_MOV64_IMM(BPF_REG_0, 0),
2938 BPF_EXIT_INSN(),
2939 },
2940 .fixup_map1 = { 1 },
2941 .result = REJECT,
2942 .errstr = "invalid access to packet",
2943 .prog_type = BPF_PROG_TYPE_XDP,
2944 },
2945 {
2946 "helper access to packet: test3, variable add",
2947 .insns = {
2948 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2949 offsetof(struct xdp_md, data)),
2950 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2951 offsetof(struct xdp_md, data_end)),
2952 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
2954 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
2955 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
2956 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2957 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
2958 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2959 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
2960 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
2961 BPF_LD_MAP_FD(BPF_REG_1, 0),
2962 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
2963 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2964 BPF_FUNC_map_lookup_elem),
2965 BPF_MOV64_IMM(BPF_REG_0, 0),
2966 BPF_EXIT_INSN(),
2967 },
2968 .fixup_map1 = { 11 },
2969 .result = ACCEPT,
2970 .prog_type = BPF_PROG_TYPE_XDP,
2971 },
2972 {
2973 "helper access to packet: test4, packet_ptr with bad range",
2974 .insns = {
2975 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2976 offsetof(struct xdp_md, data)),
2977 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2978 offsetof(struct xdp_md, data_end)),
2979 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
2980 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
2981 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
2982 BPF_MOV64_IMM(BPF_REG_0, 0),
2983 BPF_EXIT_INSN(),
2984 BPF_LD_MAP_FD(BPF_REG_1, 0),
2985 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2986 BPF_FUNC_map_lookup_elem),
2987 BPF_MOV64_IMM(BPF_REG_0, 0),
2988 BPF_EXIT_INSN(),
2989 },
2990 .fixup_map1 = { 7 },
2991 .result = REJECT,
2992 .errstr = "invalid access to packet",
2993 .prog_type = BPF_PROG_TYPE_XDP,
2994 },
2995 {
2996 "helper access to packet: test5, packet_ptr with too short range",
2997 .insns = {
2998 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2999 offsetof(struct xdp_md, data)),
3000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3001 offsetof(struct xdp_md, data_end)),
3002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3003 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3004 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3005 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3006 BPF_LD_MAP_FD(BPF_REG_1, 0),
3007 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3008 BPF_FUNC_map_lookup_elem),
3009 BPF_MOV64_IMM(BPF_REG_0, 0),
3010 BPF_EXIT_INSN(),
3011 },
3012 .fixup_map1 = { 6 },
3013 .result = REJECT,
3014 .errstr = "invalid access to packet",
3015 .prog_type = BPF_PROG_TYPE_XDP,
3016 },
3017 {
3018 "helper access to packet: test6, cls valid packet_ptr range",
3019 .insns = {
3020 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3021 offsetof(struct __sk_buff, data)),
3022 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3023 offsetof(struct __sk_buff, data_end)),
3024 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3025 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3026 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3027 BPF_LD_MAP_FD(BPF_REG_1, 0),
3028 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3029 BPF_MOV64_IMM(BPF_REG_4, 0),
3030 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3031 BPF_FUNC_map_update_elem),
3032 BPF_MOV64_IMM(BPF_REG_0, 0),
3033 BPF_EXIT_INSN(),
3034 },
3035 .fixup_map1 = { 5 },
3036 .result = ACCEPT,
3037 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3038 },
3039 {
3040 "helper access to packet: test7, cls unchecked packet_ptr",
3041 .insns = {
3042 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3043 offsetof(struct __sk_buff, data)),
3044 BPF_LD_MAP_FD(BPF_REG_1, 0),
3045 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3046 BPF_FUNC_map_lookup_elem),
3047 BPF_MOV64_IMM(BPF_REG_0, 0),
3048 BPF_EXIT_INSN(),
3049 },
3050 .fixup_map1 = { 1 },
3051 .result = REJECT,
3052 .errstr = "invalid access to packet",
3053 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3054 },
3055 {
3056 "helper access to packet: test8, cls variable add",
3057 .insns = {
3058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3059 offsetof(struct __sk_buff, data)),
3060 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3061 offsetof(struct __sk_buff, data_end)),
3062 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3064 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3065 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3066 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3067 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3068 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3069 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3070 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3071 BPF_LD_MAP_FD(BPF_REG_1, 0),
3072 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3073 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3074 BPF_FUNC_map_lookup_elem),
3075 BPF_MOV64_IMM(BPF_REG_0, 0),
3076 BPF_EXIT_INSN(),
3077 },
3078 .fixup_map1 = { 11 },
3079 .result = ACCEPT,
3080 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3081 },
3082 {
3083 "helper access to packet: test9, cls packet_ptr with bad range",
3084 .insns = {
3085 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3086 offsetof(struct __sk_buff, data)),
3087 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3088 offsetof(struct __sk_buff, data_end)),
3089 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3090 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3091 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3092 BPF_MOV64_IMM(BPF_REG_0, 0),
3093 BPF_EXIT_INSN(),
3094 BPF_LD_MAP_FD(BPF_REG_1, 0),
3095 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3096 BPF_FUNC_map_lookup_elem),
3097 BPF_MOV64_IMM(BPF_REG_0, 0),
3098 BPF_EXIT_INSN(),
3099 },
3100 .fixup_map1 = { 7 },
3101 .result = REJECT,
3102 .errstr = "invalid access to packet",
3103 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3104 },
3105 {
3106 "helper access to packet: test10, cls packet_ptr with too short range",
3107 .insns = {
3108 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3109 offsetof(struct __sk_buff, data)),
3110 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3111 offsetof(struct __sk_buff, data_end)),
3112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3113 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3115 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3116 BPF_LD_MAP_FD(BPF_REG_1, 0),
3117 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3118 BPF_FUNC_map_lookup_elem),
3119 BPF_MOV64_IMM(BPF_REG_0, 0),
3120 BPF_EXIT_INSN(),
3121 },
3122 .fixup_map1 = { 6 },
3123 .result = REJECT,
3124 .errstr = "invalid access to packet",
3125 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3126 },
3127 {
3128 "helper access to packet: test11, cls unsuitable helper 1",
3129 .insns = {
3130 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3131 offsetof(struct __sk_buff, data)),
3132 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3133 offsetof(struct __sk_buff, data_end)),
3134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3135 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3137 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3138 BPF_MOV64_IMM(BPF_REG_2, 0),
3139 BPF_MOV64_IMM(BPF_REG_4, 42),
3140 BPF_MOV64_IMM(BPF_REG_5, 0),
3141 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3142 BPF_FUNC_skb_store_bytes),
3143 BPF_MOV64_IMM(BPF_REG_0, 0),
3144 BPF_EXIT_INSN(),
3145 },
3146 .result = REJECT,
3147 .errstr = "helper access to the packet",
3148 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3149 },
3150 {
3151 "helper access to packet: test12, cls unsuitable helper 2",
3152 .insns = {
3153 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3154 offsetof(struct __sk_buff, data)),
3155 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3156 offsetof(struct __sk_buff, data_end)),
3157 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3158 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3159 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3160 BPF_MOV64_IMM(BPF_REG_2, 0),
3161 BPF_MOV64_IMM(BPF_REG_4, 4),
3162 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3163 BPF_FUNC_skb_load_bytes),
3164 BPF_MOV64_IMM(BPF_REG_0, 0),
3165 BPF_EXIT_INSN(),
3166 },
3167 .result = REJECT,
3168 .errstr = "helper access to the packet",
3169 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3170 },
3171 {
3172 "helper access to packet: test13, cls helper ok",
3173 .insns = {
3174 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3175 offsetof(struct __sk_buff, data)),
3176 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3177 offsetof(struct __sk_buff, data_end)),
3178 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3179 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3181 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3182 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3183 BPF_MOV64_IMM(BPF_REG_2, 4),
3184 BPF_MOV64_IMM(BPF_REG_3, 0),
3185 BPF_MOV64_IMM(BPF_REG_4, 0),
3186 BPF_MOV64_IMM(BPF_REG_5, 0),
3187 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3188 BPF_FUNC_csum_diff),
3189 BPF_MOV64_IMM(BPF_REG_0, 0),
3190 BPF_EXIT_INSN(),
3191 },
3192 .result = ACCEPT,
3193 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3194 },
3195 {
3196 "helper access to packet: test14, cls helper ok sub",
3197 .insns = {
3198 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3199 offsetof(struct __sk_buff, data)),
3200 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3201 offsetof(struct __sk_buff, data_end)),
3202 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3205 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3206 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3207 BPF_MOV64_IMM(BPF_REG_2, 4),
3208 BPF_MOV64_IMM(BPF_REG_3, 0),
3209 BPF_MOV64_IMM(BPF_REG_4, 0),
3210 BPF_MOV64_IMM(BPF_REG_5, 0),
3211 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3212 BPF_FUNC_csum_diff),
3213 BPF_MOV64_IMM(BPF_REG_0, 0),
3214 BPF_EXIT_INSN(),
3215 },
3216 .result = ACCEPT,
3217 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3218 },
3219 {
3220 "helper access to packet: test15, cls helper fail sub",
3221 .insns = {
3222 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3223 offsetof(struct __sk_buff, data)),
3224 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3225 offsetof(struct __sk_buff, data_end)),
3226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3227 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3229 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3230 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3231 BPF_MOV64_IMM(BPF_REG_2, 4),
3232 BPF_MOV64_IMM(BPF_REG_3, 0),
3233 BPF_MOV64_IMM(BPF_REG_4, 0),
3234 BPF_MOV64_IMM(BPF_REG_5, 0),
3235 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3236 BPF_FUNC_csum_diff),
3237 BPF_MOV64_IMM(BPF_REG_0, 0),
3238 BPF_EXIT_INSN(),
3239 },
3240 .result = REJECT,
3241 .errstr = "invalid access to packet",
3242 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3243 },
3244 {
3245 "helper access to packet: test16, cls helper fail range 1",
3246 .insns = {
3247 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3248 offsetof(struct __sk_buff, data)),
3249 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3250 offsetof(struct __sk_buff, data_end)),
3251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3252 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3253 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3254 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3255 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3256 BPF_MOV64_IMM(BPF_REG_2, 8),
3257 BPF_MOV64_IMM(BPF_REG_3, 0),
3258 BPF_MOV64_IMM(BPF_REG_4, 0),
3259 BPF_MOV64_IMM(BPF_REG_5, 0),
3260 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3261 BPF_FUNC_csum_diff),
3262 BPF_MOV64_IMM(BPF_REG_0, 0),
3263 BPF_EXIT_INSN(),
3264 },
3265 .result = REJECT,
3266 .errstr = "invalid access to packet",
3267 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3268 },
3269 {
3270 "helper access to packet: test17, cls helper fail range 2",
3271 .insns = {
3272 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3273 offsetof(struct __sk_buff, data)),
3274 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3275 offsetof(struct __sk_buff, data_end)),
3276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3278 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3279 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3280 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3281 BPF_MOV64_IMM(BPF_REG_2, -9),
3282 BPF_MOV64_IMM(BPF_REG_3, 0),
3283 BPF_MOV64_IMM(BPF_REG_4, 0),
3284 BPF_MOV64_IMM(BPF_REG_5, 0),
3285 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3286 BPF_FUNC_csum_diff),
3287 BPF_MOV64_IMM(BPF_REG_0, 0),
3288 BPF_EXIT_INSN(),
3289 },
3290 .result = REJECT,
3291 .errstr = "R2 min value is negative",
3292 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3293 },
3294 {
3295 "helper access to packet: test18, cls helper fail range 3",
3296 .insns = {
3297 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3298 offsetof(struct __sk_buff, data)),
3299 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3300 offsetof(struct __sk_buff, data_end)),
3301 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3302 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3304 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3306 BPF_MOV64_IMM(BPF_REG_2, ~0),
3307 BPF_MOV64_IMM(BPF_REG_3, 0),
3308 BPF_MOV64_IMM(BPF_REG_4, 0),
3309 BPF_MOV64_IMM(BPF_REG_5, 0),
3310 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3311 BPF_FUNC_csum_diff),
3312 BPF_MOV64_IMM(BPF_REG_0, 0),
3313 BPF_EXIT_INSN(),
3314 },
3315 .result = REJECT,
3316 .errstr = "R2 min value is negative",
3317 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3318 },
3319 {
3320 "helper access to packet: test19, cls helper fail range zero",
3321 .insns = {
3322 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3323 offsetof(struct __sk_buff, data)),
3324 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3325 offsetof(struct __sk_buff, data_end)),
3326 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3327 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3329 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3330 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3331 BPF_MOV64_IMM(BPF_REG_2, 0),
3332 BPF_MOV64_IMM(BPF_REG_3, 0),
3333 BPF_MOV64_IMM(BPF_REG_4, 0),
3334 BPF_MOV64_IMM(BPF_REG_5, 0),
3335 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3336 BPF_FUNC_csum_diff),
3337 BPF_MOV64_IMM(BPF_REG_0, 0),
3338 BPF_EXIT_INSN(),
3339 },
3340 .result = REJECT,
3341 .errstr = "invalid access to packet",
3342 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3343 },
3344 {
3345 "helper access to packet: test20, pkt end as input",
3346 .insns = {
3347 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3348 offsetof(struct __sk_buff, data)),
3349 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3350 offsetof(struct __sk_buff, data_end)),
3351 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3352 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3353 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3354 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3355 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3356 BPF_MOV64_IMM(BPF_REG_2, 4),
3357 BPF_MOV64_IMM(BPF_REG_3, 0),
3358 BPF_MOV64_IMM(BPF_REG_4, 0),
3359 BPF_MOV64_IMM(BPF_REG_5, 0),
3360 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3361 BPF_FUNC_csum_diff),
3362 BPF_MOV64_IMM(BPF_REG_0, 0),
3363 BPF_EXIT_INSN(),
3364 },
3365 .result = REJECT,
3366 .errstr = "R1 type=pkt_end expected=fp",
3367 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3368 },
3369 {
3370 "helper access to packet: test21, wrong reg",
3371 .insns = {
3372 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3373 offsetof(struct __sk_buff, data)),
3374 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3375 offsetof(struct __sk_buff, data_end)),
3376 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3379 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3380 BPF_MOV64_IMM(BPF_REG_2, 4),
3381 BPF_MOV64_IMM(BPF_REG_3, 0),
3382 BPF_MOV64_IMM(BPF_REG_4, 0),
3383 BPF_MOV64_IMM(BPF_REG_5, 0),
3384 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3385 BPF_FUNC_csum_diff),
3386 BPF_MOV64_IMM(BPF_REG_0, 0),
3387 BPF_EXIT_INSN(),
3388 },
3389 .result = REJECT,
3390 .errstr = "invalid access to packet",
3391 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3392 },
3393 {
3394 "valid map access into an array with a constant",
3395 .insns = {
3396 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3397 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3398 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3399 BPF_LD_MAP_FD(BPF_REG_1, 0),
3400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3401 BPF_FUNC_map_lookup_elem),
3402 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3403 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3404 offsetof(struct test_val, foo)),
3405 BPF_EXIT_INSN(),
3406 },
3407 .fixup_map2 = { 3 },
3408 .errstr_unpriv = "R0 leaks addr",
3409 .result_unpriv = REJECT,
3410 .result = ACCEPT,
3411 },
3412 {
3413 "valid map access into an array with a register",
3414 .insns = {
3415 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3416 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3418 BPF_LD_MAP_FD(BPF_REG_1, 0),
3419 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3420 BPF_FUNC_map_lookup_elem),
3421 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3422 BPF_MOV64_IMM(BPF_REG_1, 4),
3423 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3424 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3425 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3426 offsetof(struct test_val, foo)),
3427 BPF_EXIT_INSN(),
3428 },
3429 .fixup_map2 = { 3 },
3430 .errstr_unpriv = "R0 leaks addr",
3431 .result_unpriv = REJECT,
3432 .result = ACCEPT,
3433 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3434 },
3435 {
3436 "valid map access into an array with a variable",
3437 .insns = {
3438 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3439 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3441 BPF_LD_MAP_FD(BPF_REG_1, 0),
3442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3443 BPF_FUNC_map_lookup_elem),
3444 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3445 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3446 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3447 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3448 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3449 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3450 offsetof(struct test_val, foo)),
3451 BPF_EXIT_INSN(),
3452 },
3453 .fixup_map2 = { 3 },
3454 .errstr_unpriv = "R0 leaks addr",
3455 .result_unpriv = REJECT,
3456 .result = ACCEPT,
3457 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3458 },
3459 {
3460 "valid map access into an array with a signed variable",
3461 .insns = {
3462 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3463 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3465 BPF_LD_MAP_FD(BPF_REG_1, 0),
3466 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3467 BPF_FUNC_map_lookup_elem),
3468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3469 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3470 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3471 BPF_MOV32_IMM(BPF_REG_1, 0),
3472 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3473 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3474 BPF_MOV32_IMM(BPF_REG_1, 0),
3475 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3476 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3477 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3478 offsetof(struct test_val, foo)),
3479 BPF_EXIT_INSN(),
3480 },
3481 .fixup_map2 = { 3 },
3482 .errstr_unpriv = "R0 leaks addr",
3483 .result_unpriv = REJECT,
3484 .result = ACCEPT,
3485 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3486 },
3487 {
3488 "invalid map access into an array with a constant",
3489 .insns = {
3490 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3491 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3492 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3493 BPF_LD_MAP_FD(BPF_REG_1, 0),
3494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3495 BPF_FUNC_map_lookup_elem),
3496 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3497 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3498 offsetof(struct test_val, foo)),
3499 BPF_EXIT_INSN(),
3500 },
3501 .fixup_map2 = { 3 },
3502 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3503 .result = REJECT,
3504 },
3505 {
3506 "invalid map access into an array with a register",
3507 .insns = {
3508 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3509 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3511 BPF_LD_MAP_FD(BPF_REG_1, 0),
3512 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3513 BPF_FUNC_map_lookup_elem),
3514 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3515 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3516 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3517 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3518 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3519 offsetof(struct test_val, foo)),
3520 BPF_EXIT_INSN(),
3521 },
3522 .fixup_map2 = { 3 },
3523 .errstr = "R0 min value is outside of the array range",
3524 .result = REJECT,
3525 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3526 },
3527 {
3528 "invalid map access into an array with a variable",
3529 .insns = {
3530 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3531 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3532 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3533 BPF_LD_MAP_FD(BPF_REG_1, 0),
3534 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3535 BPF_FUNC_map_lookup_elem),
3536 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3537 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3538 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3539 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3540 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3541 offsetof(struct test_val, foo)),
3542 BPF_EXIT_INSN(),
3543 },
3544 .fixup_map2 = { 3 },
3545 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3546 .result = REJECT,
3547 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3548 },
3549 {
3550 "invalid map access into an array with no floor check",
3551 .insns = {
3552 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3553 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3554 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3555 BPF_LD_MAP_FD(BPF_REG_1, 0),
3556 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3557 BPF_FUNC_map_lookup_elem),
3558 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3559 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3560 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3561 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3562 BPF_MOV32_IMM(BPF_REG_1, 0),
3563 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3564 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3565 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3566 offsetof(struct test_val, foo)),
3567 BPF_EXIT_INSN(),
3568 },
3569 .fixup_map2 = { 3 },
3570 .errstr_unpriv = "R0 leaks addr",
3571 .errstr = "R0 unbounded memory access",
3572 .result_unpriv = REJECT,
3573 .result = REJECT,
3574 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3575 },
3576 {
3577 "invalid map access into an array with a invalid max check",
3578 .insns = {
3579 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3580 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3581 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3582 BPF_LD_MAP_FD(BPF_REG_1, 0),
3583 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3584 BPF_FUNC_map_lookup_elem),
3585 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3586 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3587 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3588 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3589 BPF_MOV32_IMM(BPF_REG_1, 0),
3590 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3591 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3592 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3593 offsetof(struct test_val, foo)),
3594 BPF_EXIT_INSN(),
3595 },
3596 .fixup_map2 = { 3 },
3597 .errstr_unpriv = "R0 leaks addr",
3598 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3599 .result_unpriv = REJECT,
3600 .result = REJECT,
3601 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3602 },
3603 {
3604 "invalid map access into an array with a invalid max check",
3605 .insns = {
3606 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3607 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3608 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3609 BPF_LD_MAP_FD(BPF_REG_1, 0),
3610 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3611 BPF_FUNC_map_lookup_elem),
3612 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3613 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3614 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3615 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3616 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3617 BPF_LD_MAP_FD(BPF_REG_1, 0),
3618 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem),
3620 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3621 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3622 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3623 offsetof(struct test_val, foo)),
3624 BPF_EXIT_INSN(),
3625 },
3626 .fixup_map2 = { 3, 11 },
3627 .errstr_unpriv = "R0 pointer += pointer",
3628 .errstr = "R0 invalid mem access 'inv'",
3629 .result_unpriv = REJECT,
3630 .result = REJECT,
3631 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3632 },
3633 {
3634 "multiple registers share map_lookup_elem result",
3635 .insns = {
3636 BPF_MOV64_IMM(BPF_REG_1, 10),
3637 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3640 BPF_LD_MAP_FD(BPF_REG_1, 0),
3641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3642 BPF_FUNC_map_lookup_elem),
3643 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3644 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3645 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3646 BPF_EXIT_INSN(),
3647 },
3648 .fixup_map1 = { 4 },
3649 .result = ACCEPT,
3650 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3651 },
3652 {
3653 "alu ops on ptr_to_map_value_or_null, 1",
3654 .insns = {
3655 BPF_MOV64_IMM(BPF_REG_1, 10),
3656 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3657 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3658 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3659 BPF_LD_MAP_FD(BPF_REG_1, 0),
3660 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3661 BPF_FUNC_map_lookup_elem),
3662 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3666 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3667 BPF_EXIT_INSN(),
3668 },
3669 .fixup_map1 = { 4 },
3670 .errstr = "R4 invalid mem access",
3671 .result = REJECT,
3672 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3673 },
3674 {
3675 "alu ops on ptr_to_map_value_or_null, 2",
3676 .insns = {
3677 BPF_MOV64_IMM(BPF_REG_1, 10),
3678 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3679 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3680 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3681 BPF_LD_MAP_FD(BPF_REG_1, 0),
3682 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3683 BPF_FUNC_map_lookup_elem),
3684 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3685 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3687 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3688 BPF_EXIT_INSN(),
3689 },
3690 .fixup_map1 = { 4 },
3691 .errstr = "R4 invalid mem access",
3692 .result = REJECT,
3693 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3694 },
3695 {
3696 "alu ops on ptr_to_map_value_or_null, 3",
3697 .insns = {
3698 BPF_MOV64_IMM(BPF_REG_1, 10),
3699 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3700 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3701 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3702 BPF_LD_MAP_FD(BPF_REG_1, 0),
3703 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3704 BPF_FUNC_map_lookup_elem),
3705 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3706 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3708 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3709 BPF_EXIT_INSN(),
3710 },
3711 .fixup_map1 = { 4 },
3712 .errstr = "R4 invalid mem access",
3713 .result = REJECT,
3714 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3715 },
3716 {
3717 "invalid memory access with multiple map_lookup_elem calls",
3718 .insns = {
3719 BPF_MOV64_IMM(BPF_REG_1, 10),
3720 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3721 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3722 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3723 BPF_LD_MAP_FD(BPF_REG_1, 0),
3724 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3725 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3726 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3727 BPF_FUNC_map_lookup_elem),
3728 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3729 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3730 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3731 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3732 BPF_FUNC_map_lookup_elem),
3733 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3734 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3735 BPF_EXIT_INSN(),
3736 },
3737 .fixup_map1 = { 4 },
3738 .result = REJECT,
3739 .errstr = "R4 !read_ok",
3740 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3741 },
3742 {
3743 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3744 .insns = {
3745 BPF_MOV64_IMM(BPF_REG_1, 10),
3746 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3747 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3748 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3749 BPF_LD_MAP_FD(BPF_REG_1, 0),
3750 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3751 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3752 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3753 BPF_FUNC_map_lookup_elem),
3754 BPF_MOV64_IMM(BPF_REG_2, 10),
3755 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
3756 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3758 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3759 BPF_FUNC_map_lookup_elem),
3760 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3761 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3762 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3763 BPF_EXIT_INSN(),
3764 },
3765 .fixup_map1 = { 4 },
3766 .result = ACCEPT,
3767 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3768 },
3769 {
3770 "invalid map access from else condition",
3771 .insns = {
3772 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3773 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3774 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3775 BPF_LD_MAP_FD(BPF_REG_1, 0),
3776 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
3777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
3778 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3779 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
3780 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
3781 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3782 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3783 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
3784 BPF_EXIT_INSN(),
3785 },
3786 .fixup_map2 = { 3 },
3787 .errstr = "R0 unbounded memory access",
3788 .result = REJECT,
3789 .errstr_unpriv = "R0 leaks addr",
3790 .result_unpriv = REJECT,
3791 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3792 },
3793 {
3794 "constant register |= constant should keep constant type",
3795 .insns = {
3796 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3797 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3798 BPF_MOV64_IMM(BPF_REG_2, 34),
3799 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
3800 BPF_MOV64_IMM(BPF_REG_3, 0),
3801 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3802 BPF_EXIT_INSN(),
3803 },
3804 .result = ACCEPT,
3805 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3806 },
3807 {
3808 "constant register |= constant should not bypass stack boundary checks",
3809 .insns = {
3810 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3811 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3812 BPF_MOV64_IMM(BPF_REG_2, 34),
3813 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
3814 BPF_MOV64_IMM(BPF_REG_3, 0),
3815 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3816 BPF_EXIT_INSN(),
3817 },
3818 .errstr = "invalid stack type R1 off=-48 access_size=58",
3819 .result = REJECT,
3820 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3821 },
3822 {
3823 "constant register |= constant register should keep constant type",
3824 .insns = {
3825 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3826 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3827 BPF_MOV64_IMM(BPF_REG_2, 34),
3828 BPF_MOV64_IMM(BPF_REG_4, 13),
3829 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3830 BPF_MOV64_IMM(BPF_REG_3, 0),
3831 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3832 BPF_EXIT_INSN(),
3833 },
3834 .result = ACCEPT,
3835 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3836 },
3837 {
3838 "constant register |= constant register should not bypass stack boundary checks",
3839 .insns = {
3840 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
3841 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
3842 BPF_MOV64_IMM(BPF_REG_2, 34),
3843 BPF_MOV64_IMM(BPF_REG_4, 24),
3844 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
3845 BPF_MOV64_IMM(BPF_REG_3, 0),
3846 BPF_EMIT_CALL(BPF_FUNC_probe_read),
3847 BPF_EXIT_INSN(),
3848 },
3849 .errstr = "invalid stack type R1 off=-48 access_size=58",
3850 .result = REJECT,
3851 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
3852 },
3853 {
3854 "invalid direct packet write for LWT_IN",
3855 .insns = {
3856 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3857 offsetof(struct __sk_buff, data)),
3858 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3859 offsetof(struct __sk_buff, data_end)),
3860 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3861 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3862 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3863 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3864 BPF_MOV64_IMM(BPF_REG_0, 0),
3865 BPF_EXIT_INSN(),
3866 },
3867 .errstr = "cannot write into packet",
3868 .result = REJECT,
3869 .prog_type = BPF_PROG_TYPE_LWT_IN,
3870 },
3871 {
3872 "invalid direct packet write for LWT_OUT",
3873 .insns = {
3874 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3875 offsetof(struct __sk_buff, data)),
3876 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3877 offsetof(struct __sk_buff, data_end)),
3878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3879 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3880 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3881 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3882 BPF_MOV64_IMM(BPF_REG_0, 0),
3883 BPF_EXIT_INSN(),
3884 },
3885 .errstr = "cannot write into packet",
3886 .result = REJECT,
3887 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3888 },
3889 {
3890 "direct packet write for LWT_XMIT",
3891 .insns = {
3892 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3893 offsetof(struct __sk_buff, data)),
3894 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3895 offsetof(struct __sk_buff, data_end)),
3896 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3898 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3899 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
3900 BPF_MOV64_IMM(BPF_REG_0, 0),
3901 BPF_EXIT_INSN(),
3902 },
3903 .result = ACCEPT,
3904 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3905 },
3906 {
3907 "direct packet read for LWT_IN",
3908 .insns = {
3909 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3910 offsetof(struct __sk_buff, data)),
3911 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3912 offsetof(struct __sk_buff, data_end)),
3913 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3915 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3916 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3917 BPF_MOV64_IMM(BPF_REG_0, 0),
3918 BPF_EXIT_INSN(),
3919 },
3920 .result = ACCEPT,
3921 .prog_type = BPF_PROG_TYPE_LWT_IN,
3922 },
3923 {
3924 "direct packet read for LWT_OUT",
3925 .insns = {
3926 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3927 offsetof(struct __sk_buff, data)),
3928 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3929 offsetof(struct __sk_buff, data_end)),
3930 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3932 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3933 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3934 BPF_MOV64_IMM(BPF_REG_0, 0),
3935 BPF_EXIT_INSN(),
3936 },
3937 .result = ACCEPT,
3938 .prog_type = BPF_PROG_TYPE_LWT_OUT,
3939 },
3940 {
3941 "direct packet read for LWT_XMIT",
3942 .insns = {
3943 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3944 offsetof(struct __sk_buff, data)),
3945 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3946 offsetof(struct __sk_buff, data_end)),
3947 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3948 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3949 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3950 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3951 BPF_MOV64_IMM(BPF_REG_0, 0),
3952 BPF_EXIT_INSN(),
3953 },
3954 .result = ACCEPT,
3955 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3956 },
3957 {
3958 "overlapping checks for direct packet access",
3959 .insns = {
3960 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3961 offsetof(struct __sk_buff, data)),
3962 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3963 offsetof(struct __sk_buff, data_end)),
3964 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3965 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3966 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
3967 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3968 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
3969 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
3970 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
3971 BPF_MOV64_IMM(BPF_REG_0, 0),
3972 BPF_EXIT_INSN(),
3973 },
3974 .result = ACCEPT,
3975 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
3976 },
3977 {
3978 "invalid access of tc_classid for LWT_IN",
3979 .insns = {
3980 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3981 offsetof(struct __sk_buff, tc_classid)),
3982 BPF_EXIT_INSN(),
3983 },
3984 .result = REJECT,
3985 .errstr = "invalid bpf_context access",
3986 },
3987 {
3988 "invalid access of tc_classid for LWT_OUT",
3989 .insns = {
3990 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
3991 offsetof(struct __sk_buff, tc_classid)),
3992 BPF_EXIT_INSN(),
3993 },
3994 .result = REJECT,
3995 .errstr = "invalid bpf_context access",
3996 },
3997 {
3998 "invalid access of tc_classid for LWT_XMIT",
3999 .insns = {
4000 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4001 offsetof(struct __sk_buff, tc_classid)),
4002 BPF_EXIT_INSN(),
4003 },
4004 .result = REJECT,
4005 .errstr = "invalid bpf_context access",
4006 },
4007 {
4008 "leak pointer into ctx 1",
4009 .insns = {
4010 BPF_MOV64_IMM(BPF_REG_0, 0),
4011 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4012 offsetof(struct __sk_buff, cb[0])),
4013 BPF_LD_MAP_FD(BPF_REG_2, 0),
4014 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4015 offsetof(struct __sk_buff, cb[0])),
4016 BPF_EXIT_INSN(),
4017 },
4018 .fixup_map1 = { 2 },
4019 .errstr_unpriv = "R2 leaks addr into mem",
4020 .result_unpriv = REJECT,
4021 .result = ACCEPT,
4022 },
4023 {
4024 "leak pointer into ctx 2",
4025 .insns = {
4026 BPF_MOV64_IMM(BPF_REG_0, 0),
4027 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4028 offsetof(struct __sk_buff, cb[0])),
4029 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4030 offsetof(struct __sk_buff, cb[0])),
4031 BPF_EXIT_INSN(),
4032 },
4033 .errstr_unpriv = "R10 leaks addr into mem",
4034 .result_unpriv = REJECT,
4035 .result = ACCEPT,
4036 },
4037 {
4038 "leak pointer into ctx 3",
4039 .insns = {
4040 BPF_MOV64_IMM(BPF_REG_0, 0),
4041 BPF_LD_MAP_FD(BPF_REG_2, 0),
4042 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4043 offsetof(struct __sk_buff, cb[0])),
4044 BPF_EXIT_INSN(),
4045 },
4046 .fixup_map1 = { 1 },
4047 .errstr_unpriv = "R2 leaks addr into ctx",
4048 .result_unpriv = REJECT,
4049 .result = ACCEPT,
4050 },
4051 {
4052 "leak pointer into map val",
4053 .insns = {
4054 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4055 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4056 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4058 BPF_LD_MAP_FD(BPF_REG_1, 0),
4059 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4060 BPF_FUNC_map_lookup_elem),
4061 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4062 BPF_MOV64_IMM(BPF_REG_3, 0),
4063 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4064 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4065 BPF_MOV64_IMM(BPF_REG_0, 0),
4066 BPF_EXIT_INSN(),
4067 },
4068 .fixup_map1 = { 4 },
4069 .errstr_unpriv = "R6 leaks addr into mem",
4070 .result_unpriv = REJECT,
4071 .result = ACCEPT,
4072 },
4073 {
4074 "helper access to map: full range",
4075 .insns = {
4076 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4077 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4078 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4079 BPF_LD_MAP_FD(BPF_REG_1, 0),
4080 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4081 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4082 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4083 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4084 BPF_MOV64_IMM(BPF_REG_3, 0),
4085 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4086 BPF_EXIT_INSN(),
4087 },
4088 .fixup_map2 = { 3 },
4089 .result = ACCEPT,
4090 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4091 },
4092 {
4093 "helper access to map: partial range",
4094 .insns = {
4095 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4097 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4098 BPF_LD_MAP_FD(BPF_REG_1, 0),
4099 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4100 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4101 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4102 BPF_MOV64_IMM(BPF_REG_2, 8),
4103 BPF_MOV64_IMM(BPF_REG_3, 0),
4104 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4105 BPF_EXIT_INSN(),
4106 },
4107 .fixup_map2 = { 3 },
4108 .result = ACCEPT,
4109 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4110 },
4111 {
4112 "helper access to map: empty range",
4113 .insns = {
4114 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4116 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4117 BPF_LD_MAP_FD(BPF_REG_1, 0),
4118 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4119 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4120 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4121 BPF_MOV64_IMM(BPF_REG_2, 0),
4122 BPF_MOV64_IMM(BPF_REG_3, 0),
4123 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4124 BPF_EXIT_INSN(),
4125 },
4126 .fixup_map2 = { 3 },
4127 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4128 .result = REJECT,
4129 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4130 },
4131 {
4132 "helper access to map: out-of-bound range",
4133 .insns = {
4134 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4135 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4136 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4137 BPF_LD_MAP_FD(BPF_REG_1, 0),
4138 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4139 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4140 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4141 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4142 BPF_MOV64_IMM(BPF_REG_3, 0),
4143 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4144 BPF_EXIT_INSN(),
4145 },
4146 .fixup_map2 = { 3 },
4147 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4148 .result = REJECT,
4149 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4150 },
4151 {
4152 "helper access to map: negative range",
4153 .insns = {
4154 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4155 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4156 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4157 BPF_LD_MAP_FD(BPF_REG_1, 0),
4158 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4159 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4160 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4161 BPF_MOV64_IMM(BPF_REG_2, -8),
4162 BPF_MOV64_IMM(BPF_REG_3, 0),
4163 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4164 BPF_EXIT_INSN(),
4165 },
4166 .fixup_map2 = { 3 },
4167 .errstr = "R2 min value is negative",
4168 .result = REJECT,
4169 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4170 },
4171 {
4172 "helper access to adjusted map (via const imm): full range",
4173 .insns = {
4174 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4176 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4177 BPF_LD_MAP_FD(BPF_REG_1, 0),
4178 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4179 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4180 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4181 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4182 offsetof(struct test_val, foo)),
4183 BPF_MOV64_IMM(BPF_REG_2,
4184 sizeof(struct test_val) -
4185 offsetof(struct test_val, foo)),
4186 BPF_MOV64_IMM(BPF_REG_3, 0),
4187 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4188 BPF_EXIT_INSN(),
4189 },
4190 .fixup_map2 = { 3 },
4191 .result = ACCEPT,
4192 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4193 },
4194 {
4195 "helper access to adjusted map (via const imm): partial range",
4196 .insns = {
4197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4199 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4200 BPF_LD_MAP_FD(BPF_REG_1, 0),
4201 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4203 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4204 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4205 offsetof(struct test_val, foo)),
4206 BPF_MOV64_IMM(BPF_REG_2, 8),
4207 BPF_MOV64_IMM(BPF_REG_3, 0),
4208 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4209 BPF_EXIT_INSN(),
4210 },
4211 .fixup_map2 = { 3 },
4212 .result = ACCEPT,
4213 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4214 },
4215 {
4216 "helper access to adjusted map (via const imm): empty range",
4217 .insns = {
4218 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4219 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4220 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4221 BPF_LD_MAP_FD(BPF_REG_1, 0),
4222 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4223 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4224 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4225 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4226 offsetof(struct test_val, foo)),
4227 BPF_MOV64_IMM(BPF_REG_2, 0),
4228 BPF_MOV64_IMM(BPF_REG_3, 0),
4229 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4230 BPF_EXIT_INSN(),
4231 },
4232 .fixup_map2 = { 3 },
4233 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
4234 .result = REJECT,
4235 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4236 },
4237 {
4238 "helper access to adjusted map (via const imm): out-of-bound range",
4239 .insns = {
4240 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4241 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4242 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4243 BPF_LD_MAP_FD(BPF_REG_1, 0),
4244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4245 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4246 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4248 offsetof(struct test_val, foo)),
4249 BPF_MOV64_IMM(BPF_REG_2,
4250 sizeof(struct test_val) -
4251 offsetof(struct test_val, foo) + 8),
4252 BPF_MOV64_IMM(BPF_REG_3, 0),
4253 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4254 BPF_EXIT_INSN(),
4255 },
4256 .fixup_map2 = { 3 },
4257 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4258 .result = REJECT,
4259 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4260 },
4261 {
4262 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4263 .insns = {
4264 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4265 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4266 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4267 BPF_LD_MAP_FD(BPF_REG_1, 0),
4268 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4269 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4270 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4271 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4272 offsetof(struct test_val, foo)),
4273 BPF_MOV64_IMM(BPF_REG_2, -8),
4274 BPF_MOV64_IMM(BPF_REG_3, 0),
4275 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4276 BPF_EXIT_INSN(),
4277 },
4278 .fixup_map2 = { 3 },
4279 .errstr = "R2 min value is negative",
4280 .result = REJECT,
4281 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4282 },
4283 {
4284 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4285 .insns = {
4286 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4287 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4288 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4289 BPF_LD_MAP_FD(BPF_REG_1, 0),
4290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4291 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4294 offsetof(struct test_val, foo)),
4295 BPF_MOV64_IMM(BPF_REG_2, -1),
4296 BPF_MOV64_IMM(BPF_REG_3, 0),
4297 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4298 BPF_EXIT_INSN(),
4299 },
4300 .fixup_map2 = { 3 },
4301 .errstr = "R2 min value is negative",
4302 .result = REJECT,
4303 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4304 },
4305 {
4306 "helper access to adjusted map (via const reg): full range",
4307 .insns = {
4308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4309 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4310 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4311 BPF_LD_MAP_FD(BPF_REG_1, 0),
4312 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4313 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4314 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4315 BPF_MOV64_IMM(BPF_REG_3,
4316 offsetof(struct test_val, foo)),
4317 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4318 BPF_MOV64_IMM(BPF_REG_2,
4319 sizeof(struct test_val) -
4320 offsetof(struct test_val, foo)),
4321 BPF_MOV64_IMM(BPF_REG_3, 0),
4322 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4323 BPF_EXIT_INSN(),
4324 },
4325 .fixup_map2 = { 3 },
4326 .result = ACCEPT,
4327 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4328 },
4329 {
4330 "helper access to adjusted map (via const reg): partial range",
4331 .insns = {
4332 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4333 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4334 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4335 BPF_LD_MAP_FD(BPF_REG_1, 0),
4336 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4337 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4338 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4339 BPF_MOV64_IMM(BPF_REG_3,
4340 offsetof(struct test_val, foo)),
4341 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4342 BPF_MOV64_IMM(BPF_REG_2, 8),
4343 BPF_MOV64_IMM(BPF_REG_3, 0),
4344 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4345 BPF_EXIT_INSN(),
4346 },
4347 .fixup_map2 = { 3 },
4348 .result = ACCEPT,
4349 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4350 },
4351 {
4352 "helper access to adjusted map (via const reg): empty range",
4353 .insns = {
4354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4356 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4357 BPF_LD_MAP_FD(BPF_REG_1, 0),
4358 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4359 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4360 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4361 BPF_MOV64_IMM(BPF_REG_3, 0),
4362 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4363 BPF_MOV64_IMM(BPF_REG_2, 0),
4364 BPF_MOV64_IMM(BPF_REG_3, 0),
4365 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4366 BPF_EXIT_INSN(),
4367 },
4368 .fixup_map2 = { 3 },
4369 .errstr = "R1 min value is outside of the array range",
4370 .result = REJECT,
4371 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4372 },
4373 {
4374 "helper access to adjusted map (via const reg): out-of-bound range",
4375 .insns = {
4376 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4377 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4378 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4379 BPF_LD_MAP_FD(BPF_REG_1, 0),
4380 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4381 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4382 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4383 BPF_MOV64_IMM(BPF_REG_3,
4384 offsetof(struct test_val, foo)),
4385 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4386 BPF_MOV64_IMM(BPF_REG_2,
4387 sizeof(struct test_val) -
4388 offsetof(struct test_val, foo) + 8),
4389 BPF_MOV64_IMM(BPF_REG_3, 0),
4390 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4391 BPF_EXIT_INSN(),
4392 },
4393 .fixup_map2 = { 3 },
4394 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4395 .result = REJECT,
4396 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4397 },
4398 {
4399 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4400 .insns = {
4401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4403 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4404 BPF_LD_MAP_FD(BPF_REG_1, 0),
4405 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4406 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4407 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4408 BPF_MOV64_IMM(BPF_REG_3,
4409 offsetof(struct test_val, foo)),
4410 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4411 BPF_MOV64_IMM(BPF_REG_2, -8),
4412 BPF_MOV64_IMM(BPF_REG_3, 0),
4413 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4414 BPF_EXIT_INSN(),
4415 },
4416 .fixup_map2 = { 3 },
4417 .errstr = "R2 min value is negative",
4418 .result = REJECT,
4419 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4420 },
4421 {
4422 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4423 .insns = {
4424 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4425 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4426 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4427 BPF_LD_MAP_FD(BPF_REG_1, 0),
4428 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4429 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4430 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4431 BPF_MOV64_IMM(BPF_REG_3,
4432 offsetof(struct test_val, foo)),
4433 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4434 BPF_MOV64_IMM(BPF_REG_2, -1),
4435 BPF_MOV64_IMM(BPF_REG_3, 0),
4436 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4437 BPF_EXIT_INSN(),
4438 },
4439 .fixup_map2 = { 3 },
4440 .errstr = "R2 min value is negative",
4441 .result = REJECT,
4442 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4443 },
4444 {
4445 "helper access to adjusted map (via variable): full range",
4446 .insns = {
4447 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4448 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4449 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4450 BPF_LD_MAP_FD(BPF_REG_1, 0),
4451 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4452 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4453 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4455 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4456 offsetof(struct test_val, foo), 4),
4457 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4458 BPF_MOV64_IMM(BPF_REG_2,
4459 sizeof(struct test_val) -
4460 offsetof(struct test_val, foo)),
4461 BPF_MOV64_IMM(BPF_REG_3, 0),
4462 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4463 BPF_EXIT_INSN(),
4464 },
4465 .fixup_map2 = { 3 },
4466 .result = ACCEPT,
4467 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4468 },
4469 {
4470 "helper access to adjusted map (via variable): partial range",
4471 .insns = {
4472 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4473 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4474 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4475 BPF_LD_MAP_FD(BPF_REG_1, 0),
4476 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4477 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4478 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4479 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4480 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4481 offsetof(struct test_val, foo), 4),
4482 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4483 BPF_MOV64_IMM(BPF_REG_2, 8),
4484 BPF_MOV64_IMM(BPF_REG_3, 0),
4485 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4486 BPF_EXIT_INSN(),
4487 },
4488 .fixup_map2 = { 3 },
4489 .result = ACCEPT,
4490 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4491 },
4492 {
4493 "helper access to adjusted map (via variable): empty range",
4494 .insns = {
4495 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4497 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4498 BPF_LD_MAP_FD(BPF_REG_1, 0),
4499 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4500 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4501 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4502 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4503 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4504 offsetof(struct test_val, foo), 4),
4505 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4506 BPF_MOV64_IMM(BPF_REG_2, 0),
4507 BPF_MOV64_IMM(BPF_REG_3, 0),
4508 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4509 BPF_EXIT_INSN(),
4510 },
4511 .fixup_map2 = { 3 },
4512 .errstr = "R1 min value is outside of the array range",
4513 .result = REJECT,
4514 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4515 },
4516 {
4517 "helper access to adjusted map (via variable): no max check",
4518 .insns = {
4519 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4520 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4521 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4522 BPF_LD_MAP_FD(BPF_REG_1, 0),
4523 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4524 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4525 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4526 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4527 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4528 BPF_MOV64_IMM(BPF_REG_2, 1),
4529 BPF_MOV64_IMM(BPF_REG_3, 0),
4530 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4531 BPF_EXIT_INSN(),
4532 },
4533 .fixup_map2 = { 3 },
4534 .errstr = "R1 unbounded memory access",
4535 .result = REJECT,
4536 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4537 },
4538 {
4539 "helper access to adjusted map (via variable): wrong max check",
4540 .insns = {
4541 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4542 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4543 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4544 BPF_LD_MAP_FD(BPF_REG_1, 0),
4545 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4546 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4547 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4548 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4549 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4550 offsetof(struct test_val, foo), 4),
4551 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4552 BPF_MOV64_IMM(BPF_REG_2,
4553 sizeof(struct test_val) -
4554 offsetof(struct test_val, foo) + 1),
4555 BPF_MOV64_IMM(BPF_REG_3, 0),
4556 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4557 BPF_EXIT_INSN(),
4558 },
4559 .fixup_map2 = { 3 },
4560 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4561 .result = REJECT,
4562 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4563 },
4564 {
4565 "helper access to map: bounds check using <, good access",
4566 .insns = {
4567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4569 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4570 BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4574 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4575 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4576 BPF_MOV64_IMM(BPF_REG_0, 0),
4577 BPF_EXIT_INSN(),
4578 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4579 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4580 BPF_MOV64_IMM(BPF_REG_0, 0),
4581 BPF_EXIT_INSN(),
4582 },
4583 .fixup_map2 = { 3 },
4584 .result = ACCEPT,
4585 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4586 },
4587 {
4588 "helper access to map: bounds check using <, bad access",
4589 .insns = {
4590 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4591 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4592 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4593 BPF_LD_MAP_FD(BPF_REG_1, 0),
4594 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4595 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4596 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4597 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4598 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4599 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4600 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4601 BPF_MOV64_IMM(BPF_REG_0, 0),
4602 BPF_EXIT_INSN(),
4603 BPF_MOV64_IMM(BPF_REG_0, 0),
4604 BPF_EXIT_INSN(),
4605 },
4606 .fixup_map2 = { 3 },
4607 .result = REJECT,
4608 .errstr = "R1 unbounded memory access",
4609 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4610 },
4611 {
4612 "helper access to map: bounds check using <=, good access",
4613 .insns = {
4614 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4616 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4617 BPF_LD_MAP_FD(BPF_REG_1, 0),
4618 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4619 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4620 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4621 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4622 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4623 BPF_MOV64_IMM(BPF_REG_0, 0),
4624 BPF_EXIT_INSN(),
4625 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4626 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4627 BPF_MOV64_IMM(BPF_REG_0, 0),
4628 BPF_EXIT_INSN(),
4629 },
4630 .fixup_map2 = { 3 },
4631 .result = ACCEPT,
4632 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4633 },
4634 {
4635 "helper access to map: bounds check using <=, bad access",
4636 .insns = {
4637 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4638 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4639 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4640 BPF_LD_MAP_FD(BPF_REG_1, 0),
4641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4642 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4643 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4644 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4645 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4646 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4647 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4648 BPF_MOV64_IMM(BPF_REG_0, 0),
4649 BPF_EXIT_INSN(),
4650 BPF_MOV64_IMM(BPF_REG_0, 0),
4651 BPF_EXIT_INSN(),
4652 },
4653 .fixup_map2 = { 3 },
4654 .result = REJECT,
4655 .errstr = "R1 unbounded memory access",
4656 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4657 },
4658 {
4659 "helper access to map: bounds check using s<, good access",
4660 .insns = {
4661 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4662 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4663 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4664 BPF_LD_MAP_FD(BPF_REG_1, 0),
4665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4666 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4667 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4668 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4669 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4670 BPF_MOV64_IMM(BPF_REG_0, 0),
4671 BPF_EXIT_INSN(),
4672 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4673 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4674 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4675 BPF_MOV64_IMM(BPF_REG_0, 0),
4676 BPF_EXIT_INSN(),
4677 },
4678 .fixup_map2 = { 3 },
4679 .result = ACCEPT,
4680 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4681 },
4682 {
4683 "helper access to map: bounds check using s<, good access 2",
4684 .insns = {
4685 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4686 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4687 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4688 BPF_LD_MAP_FD(BPF_REG_1, 0),
4689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4691 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4692 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4693 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4694 BPF_MOV64_IMM(BPF_REG_0, 0),
4695 BPF_EXIT_INSN(),
4696 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4697 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4698 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4699 BPF_MOV64_IMM(BPF_REG_0, 0),
4700 BPF_EXIT_INSN(),
4701 },
4702 .fixup_map2 = { 3 },
4703 .result = ACCEPT,
4704 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4705 },
4706 {
4707 "helper access to map: bounds check using s<, bad access",
4708 .insns = {
4709 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4710 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4711 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4712 BPF_LD_MAP_FD(BPF_REG_1, 0),
4713 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4714 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4716 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4717 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4718 BPF_MOV64_IMM(BPF_REG_0, 0),
4719 BPF_EXIT_INSN(),
4720 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4721 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4722 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4723 BPF_MOV64_IMM(BPF_REG_0, 0),
4724 BPF_EXIT_INSN(),
4725 },
4726 .fixup_map2 = { 3 },
4727 .result = REJECT,
4728 .errstr = "R1 min value is negative",
4729 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4730 },
4731 {
4732 "helper access to map: bounds check using s<=, good access",
4733 .insns = {
4734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4735 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4736 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4737 BPF_LD_MAP_FD(BPF_REG_1, 0),
4738 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4740 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4741 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4742 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4743 BPF_MOV64_IMM(BPF_REG_0, 0),
4744 BPF_EXIT_INSN(),
4745 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
4746 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4747 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4748 BPF_MOV64_IMM(BPF_REG_0, 0),
4749 BPF_EXIT_INSN(),
4750 },
4751 .fixup_map2 = { 3 },
4752 .result = ACCEPT,
4753 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4754 },
4755 {
4756 "helper access to map: bounds check using s<=, good access 2",
4757 .insns = {
4758 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4761 BPF_LD_MAP_FD(BPF_REG_1, 0),
4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4763 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4765 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4766 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4767 BPF_MOV64_IMM(BPF_REG_0, 0),
4768 BPF_EXIT_INSN(),
4769 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4770 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4771 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4772 BPF_MOV64_IMM(BPF_REG_0, 0),
4773 BPF_EXIT_INSN(),
4774 },
4775 .fixup_map2 = { 3 },
4776 .result = ACCEPT,
4777 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4778 },
4779 {
4780 "helper access to map: bounds check using s<=, bad access",
4781 .insns = {
4782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4784 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4785 BPF_LD_MAP_FD(BPF_REG_1, 0),
4786 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4788 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4789 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4790 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
4791 BPF_MOV64_IMM(BPF_REG_0, 0),
4792 BPF_EXIT_INSN(),
4793 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
4794 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4795 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4796 BPF_MOV64_IMM(BPF_REG_0, 0),
4797 BPF_EXIT_INSN(),
4798 },
4799 .fixup_map2 = { 3 },
4800 .result = REJECT,
4801 .errstr = "R1 min value is negative",
4802 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4803 },
4804 {
4805 "map element value is preserved across register spilling",
4806 .insns = {
4807 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4808 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4809 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4810 BPF_LD_MAP_FD(BPF_REG_1, 0),
4811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4812 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4813 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4814 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
4816 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4817 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4818 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4819 BPF_EXIT_INSN(),
4820 },
4821 .fixup_map2 = { 3 },
4822 .errstr_unpriv = "R0 leaks addr",
4823 .result = ACCEPT,
4824 .result_unpriv = REJECT,
4825 },
4826 {
4827 "map element value or null is marked on register spilling",
4828 .insns = {
4829 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4830 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4831 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4832 BPF_LD_MAP_FD(BPF_REG_1, 0),
4833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4834 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4835 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
4836 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
4837 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4838 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
4839 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
4840 BPF_EXIT_INSN(),
4841 },
4842 .fixup_map2 = { 3 },
4843 .errstr_unpriv = "R0 leaks addr",
4844 .result = ACCEPT,
4845 .result_unpriv = REJECT,
4846 },
4847 {
4848 "map element value store of cleared call register",
4849 .insns = {
4850 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4851 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4852 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4853 BPF_LD_MAP_FD(BPF_REG_1, 0),
4854 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4855 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4856 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
4857 BPF_EXIT_INSN(),
4858 },
4859 .fixup_map2 = { 3 },
4860 .errstr_unpriv = "R1 !read_ok",
4861 .errstr = "R1 !read_ok",
4862 .result = REJECT,
4863 .result_unpriv = REJECT,
4864 },
4865 {
4866 "map element value with unaligned store",
4867 .insns = {
4868 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4869 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4870 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4871 BPF_LD_MAP_FD(BPF_REG_1, 0),
4872 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4873 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
4874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4875 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
4876 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
4877 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
4878 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4879 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
4880 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
4881 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
4882 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
4883 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
4884 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
4885 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
4886 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
4887 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
4888 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
4889 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
4890 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
4891 BPF_EXIT_INSN(),
4892 },
4893 .fixup_map2 = { 3 },
4894 .errstr_unpriv = "R0 leaks addr",
4895 .result = ACCEPT,
4896 .result_unpriv = REJECT,
4897 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4898 },
4899 {
4900 "map element value with unaligned load",
4901 .insns = {
4902 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4903 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4904 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4905 BPF_LD_MAP_FD(BPF_REG_1, 0),
4906 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4907 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
4908 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4909 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
4910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
4911 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4912 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
4913 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
4914 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
4915 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
4916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
4917 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
4918 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
4919 BPF_EXIT_INSN(),
4920 },
4921 .fixup_map2 = { 3 },
4922 .errstr_unpriv = "R0 leaks addr",
4923 .result = ACCEPT,
4924 .result_unpriv = REJECT,
4925 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4926 },
4927 {
4928 "map element value illegal alu op, 1",
4929 .insns = {
4930 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4931 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4932 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4933 BPF_LD_MAP_FD(BPF_REG_1, 0),
4934 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4935 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4936 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
4937 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4938 BPF_EXIT_INSN(),
4939 },
4940 .fixup_map2 = { 3 },
4941 .errstr_unpriv = "R0 bitwise operator &= on pointer",
4942 .errstr = "invalid mem access 'inv'",
4943 .result = REJECT,
4944 .result_unpriv = REJECT,
4945 },
4946 {
4947 "map element value illegal alu op, 2",
4948 .insns = {
4949 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4950 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4951 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4952 BPF_LD_MAP_FD(BPF_REG_1, 0),
4953 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4954 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4955 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
4956 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4957 BPF_EXIT_INSN(),
4958 },
4959 .fixup_map2 = { 3 },
4960 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
4961 .errstr = "invalid mem access 'inv'",
4962 .result = REJECT,
4963 .result_unpriv = REJECT,
4964 },
4965 {
4966 "map element value illegal alu op, 3",
4967 .insns = {
4968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4973 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4974 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
4975 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4976 BPF_EXIT_INSN(),
4977 },
4978 .fixup_map2 = { 3 },
4979 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
4980 .errstr = "invalid mem access 'inv'",
4981 .result = REJECT,
4982 .result_unpriv = REJECT,
4983 },
4984 {
4985 "map element value illegal alu op, 4",
4986 .insns = {
4987 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4988 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4989 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4990 BPF_LD_MAP_FD(BPF_REG_1, 0),
4991 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4992 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
4993 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
4994 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
4995 BPF_EXIT_INSN(),
4996 },
4997 .fixup_map2 = { 3 },
4998 .errstr_unpriv = "R0 pointer arithmetic prohibited",
4999 .errstr = "invalid mem access 'inv'",
5000 .result = REJECT,
5001 .result_unpriv = REJECT,
5002 },
5003 {
5004 "map element value illegal alu op, 5",
5005 .insns = {
5006 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5008 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5009 BPF_LD_MAP_FD(BPF_REG_1, 0),
5010 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5011 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5012 BPF_MOV64_IMM(BPF_REG_3, 4096),
5013 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5015 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5016 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5017 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5018 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5019 BPF_EXIT_INSN(),
5020 },
5021 .fixup_map2 = { 3 },
5022 .errstr = "R0 invalid mem access 'inv'",
5023 .result = REJECT,
5024 },
5025 {
5026 "map element value is preserved across register spilling",
5027 .insns = {
5028 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5029 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5030 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5031 BPF_LD_MAP_FD(BPF_REG_1, 0),
5032 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5033 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5034 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5035 offsetof(struct test_val, foo)),
5036 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5037 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5038 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5039 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5040 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5041 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5042 BPF_EXIT_INSN(),
5043 },
5044 .fixup_map2 = { 3 },
5045 .errstr_unpriv = "R0 leaks addr",
5046 .result = ACCEPT,
5047 .result_unpriv = REJECT,
5048 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5049 },
5050 {
5051 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5052 .insns = {
5053 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5055 BPF_MOV64_IMM(BPF_REG_0, 0),
5056 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5057 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5058 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5059 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5060 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5061 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5062 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5064 BPF_MOV64_IMM(BPF_REG_2, 16),
5065 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5066 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5067 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5068 BPF_MOV64_IMM(BPF_REG_4, 0),
5069 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5070 BPF_MOV64_IMM(BPF_REG_3, 0),
5071 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5072 BPF_MOV64_IMM(BPF_REG_0, 0),
5073 BPF_EXIT_INSN(),
5074 },
5075 .result = ACCEPT,
5076 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5077 },
5078 {
5079 "helper access to variable memory: stack, bitwise AND, zero included",
5080 .insns = {
5081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5083 BPF_MOV64_IMM(BPF_REG_2, 16),
5084 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5085 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5086 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5087 BPF_MOV64_IMM(BPF_REG_3, 0),
5088 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5089 BPF_EXIT_INSN(),
5090 },
5091 .errstr = "invalid stack type R1 off=-64 access_size=0",
5092 .result = REJECT,
5093 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5094 },
5095 {
5096 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5097 .insns = {
5098 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5100 BPF_MOV64_IMM(BPF_REG_2, 16),
5101 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5102 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5103 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5104 BPF_MOV64_IMM(BPF_REG_4, 0),
5105 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5106 BPF_MOV64_IMM(BPF_REG_3, 0),
5107 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5108 BPF_MOV64_IMM(BPF_REG_0, 0),
5109 BPF_EXIT_INSN(),
5110 },
5111 .errstr = "invalid stack type R1 off=-64 access_size=65",
5112 .result = REJECT,
5113 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5114 },
5115 {
5116 "helper access to variable memory: stack, JMP, correct bounds",
5117 .insns = {
5118 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5119 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5120 BPF_MOV64_IMM(BPF_REG_0, 0),
5121 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5122 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5123 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5124 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5125 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5126 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5127 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5128 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5129 BPF_MOV64_IMM(BPF_REG_2, 16),
5130 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5131 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5132 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5133 BPF_MOV64_IMM(BPF_REG_4, 0),
5134 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5135 BPF_MOV64_IMM(BPF_REG_3, 0),
5136 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5137 BPF_MOV64_IMM(BPF_REG_0, 0),
5138 BPF_EXIT_INSN(),
5139 },
5140 .result = ACCEPT,
5141 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5142 },
5143 {
5144 "helper access to variable memory: stack, JMP (signed), correct bounds",
5145 .insns = {
5146 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5148 BPF_MOV64_IMM(BPF_REG_0, 0),
5149 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5150 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5151 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5152 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5153 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5154 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5155 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5156 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5157 BPF_MOV64_IMM(BPF_REG_2, 16),
5158 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5159 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5160 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5161 BPF_MOV64_IMM(BPF_REG_4, 0),
5162 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5163 BPF_MOV64_IMM(BPF_REG_3, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5165 BPF_MOV64_IMM(BPF_REG_0, 0),
5166 BPF_EXIT_INSN(),
5167 },
5168 .result = ACCEPT,
5169 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5170 },
5171 {
5172 "helper access to variable memory: stack, JMP, bounds + offset",
5173 .insns = {
5174 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5176 BPF_MOV64_IMM(BPF_REG_2, 16),
5177 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5178 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5179 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5180 BPF_MOV64_IMM(BPF_REG_4, 0),
5181 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5182 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5183 BPF_MOV64_IMM(BPF_REG_3, 0),
5184 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5185 BPF_MOV64_IMM(BPF_REG_0, 0),
5186 BPF_EXIT_INSN(),
5187 },
5188 .errstr = "invalid stack type R1 off=-64 access_size=65",
5189 .result = REJECT,
5190 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5191 },
5192 {
5193 "helper access to variable memory: stack, JMP, wrong max",
5194 .insns = {
5195 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5196 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5197 BPF_MOV64_IMM(BPF_REG_2, 16),
5198 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5199 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5200 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5201 BPF_MOV64_IMM(BPF_REG_4, 0),
5202 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5203 BPF_MOV64_IMM(BPF_REG_3, 0),
5204 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5205 BPF_MOV64_IMM(BPF_REG_0, 0),
5206 BPF_EXIT_INSN(),
5207 },
5208 .errstr = "invalid stack type R1 off=-64 access_size=65",
5209 .result = REJECT,
5210 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5211 },
5212 {
5213 "helper access to variable memory: stack, JMP, no max check",
5214 .insns = {
5215 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5217 BPF_MOV64_IMM(BPF_REG_2, 16),
5218 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5219 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5220 BPF_MOV64_IMM(BPF_REG_4, 0),
5221 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5222 BPF_MOV64_IMM(BPF_REG_3, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5224 BPF_MOV64_IMM(BPF_REG_0, 0),
5225 BPF_EXIT_INSN(),
5226 },
5227 /* because max wasn't checked, signed min is negative */
5228 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5229 .result = REJECT,
5230 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5231 },
5232 {
5233 "helper access to variable memory: stack, JMP, no min check",
5234 .insns = {
5235 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5236 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5237 BPF_MOV64_IMM(BPF_REG_2, 16),
5238 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5239 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5240 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5241 BPF_MOV64_IMM(BPF_REG_3, 0),
5242 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5243 BPF_MOV64_IMM(BPF_REG_0, 0),
5244 BPF_EXIT_INSN(),
5245 },
5246 .errstr = "invalid stack type R1 off=-64 access_size=0",
5247 .result = REJECT,
5248 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5249 },
5250 {
5251 "helper access to variable memory: stack, JMP (signed), no min check",
5252 .insns = {
5253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5255 BPF_MOV64_IMM(BPF_REG_2, 16),
5256 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5257 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5258 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5259 BPF_MOV64_IMM(BPF_REG_3, 0),
5260 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5261 BPF_MOV64_IMM(BPF_REG_0, 0),
5262 BPF_EXIT_INSN(),
5263 },
5264 .errstr = "R2 min value is negative",
5265 .result = REJECT,
5266 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5267 },
5268 {
5269 "helper access to variable memory: map, JMP, correct bounds",
5270 .insns = {
5271 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5272 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5273 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5274 BPF_LD_MAP_FD(BPF_REG_1, 0),
5275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5276 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5277 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5278 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5279 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5280 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5281 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5282 sizeof(struct test_val), 4),
5283 BPF_MOV64_IMM(BPF_REG_4, 0),
5284 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5285 BPF_MOV64_IMM(BPF_REG_3, 0),
5286 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5287 BPF_MOV64_IMM(BPF_REG_0, 0),
5288 BPF_EXIT_INSN(),
5289 },
5290 .fixup_map2 = { 3 },
5291 .result = ACCEPT,
5292 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5293 },
5294 {
5295 "helper access to variable memory: map, JMP, wrong max",
5296 .insns = {
5297 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5298 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5299 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5300 BPF_LD_MAP_FD(BPF_REG_1, 0),
5301 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5302 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5303 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5304 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5305 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5306 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5307 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5308 sizeof(struct test_val) + 1, 4),
5309 BPF_MOV64_IMM(BPF_REG_4, 0),
5310 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5311 BPF_MOV64_IMM(BPF_REG_3, 0),
5312 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5313 BPF_MOV64_IMM(BPF_REG_0, 0),
5314 BPF_EXIT_INSN(),
5315 },
5316 .fixup_map2 = { 3 },
5317 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5318 .result = REJECT,
5319 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5320 },
5321 {
5322 "helper access to variable memory: map adjusted, JMP, correct bounds",
5323 .insns = {
5324 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5325 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5326 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5327 BPF_LD_MAP_FD(BPF_REG_1, 0),
5328 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5329 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5330 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5331 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5332 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5333 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5334 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5335 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5336 sizeof(struct test_val) - 20, 4),
5337 BPF_MOV64_IMM(BPF_REG_4, 0),
5338 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5339 BPF_MOV64_IMM(BPF_REG_3, 0),
5340 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5341 BPF_MOV64_IMM(BPF_REG_0, 0),
5342 BPF_EXIT_INSN(),
5343 },
5344 .fixup_map2 = { 3 },
5345 .result = ACCEPT,
5346 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5347 },
5348 {
5349 "helper access to variable memory: map adjusted, JMP, wrong max",
5350 .insns = {
5351 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5352 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5353 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5354 BPF_LD_MAP_FD(BPF_REG_1, 0),
5355 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5356 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5359 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5360 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5361 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5362 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5363 sizeof(struct test_val) - 19, 4),
5364 BPF_MOV64_IMM(BPF_REG_4, 0),
5365 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5366 BPF_MOV64_IMM(BPF_REG_3, 0),
5367 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5368 BPF_MOV64_IMM(BPF_REG_0, 0),
5369 BPF_EXIT_INSN(),
5370 },
5371 .fixup_map2 = { 3 },
5372 .errstr = "R1 min value is outside of the array range",
5373 .result = REJECT,
5374 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5375 },
5376 {
5377 "helper access to variable memory: size = 0 allowed on NULL",
5378 .insns = {
5379 BPF_MOV64_IMM(BPF_REG_1, 0),
5380 BPF_MOV64_IMM(BPF_REG_2, 0),
5381 BPF_MOV64_IMM(BPF_REG_3, 0),
5382 BPF_MOV64_IMM(BPF_REG_4, 0),
5383 BPF_MOV64_IMM(BPF_REG_5, 0),
5384 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5385 BPF_EXIT_INSN(),
5386 },
5387 .result = ACCEPT,
5388 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5389 },
5390 {
5391 "helper access to variable memory: size > 0 not allowed on NULL",
5392 .insns = {
5393 BPF_MOV64_IMM(BPF_REG_1, 0),
5394 BPF_MOV64_IMM(BPF_REG_2, 0),
5395 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5396 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5397 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5398 BPF_MOV64_IMM(BPF_REG_3, 0),
5399 BPF_MOV64_IMM(BPF_REG_4, 0),
5400 BPF_MOV64_IMM(BPF_REG_5, 0),
5401 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5402 BPF_EXIT_INSN(),
5403 },
5404 .errstr = "R1 type=inv expected=fp",
5405 .result = REJECT,
5406 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5407 },
5408 {
5409 "helper access to variable memory: size = 0 not allowed on != NULL",
5410 .insns = {
5411 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5412 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5413 BPF_MOV64_IMM(BPF_REG_2, 0),
5414 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5415 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5416 BPF_MOV64_IMM(BPF_REG_3, 0),
5417 BPF_MOV64_IMM(BPF_REG_4, 0),
5418 BPF_MOV64_IMM(BPF_REG_5, 0),
5419 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5420 BPF_EXIT_INSN(),
5421 },
5422 .errstr = "invalid stack type R1 off=-8 access_size=0",
5423 .result = REJECT,
5424 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5425 },
5426 {
5427 "helper access to variable memory: 8 bytes leak",
5428 .insns = {
5429 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5430 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5431 BPF_MOV64_IMM(BPF_REG_0, 0),
5432 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5433 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5435 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5436 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5437 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5438 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5439 BPF_MOV64_IMM(BPF_REG_2, 0),
5440 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5441 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5442 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5443 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5444 BPF_MOV64_IMM(BPF_REG_3, 0),
5445 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5446 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5447 BPF_EXIT_INSN(),
5448 },
5449 .errstr = "invalid indirect read from stack off -64+32 size 64",
5450 .result = REJECT,
5451 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5452 },
5453 {
5454 "helper access to variable memory: 8 bytes no leak (init memory)",
5455 .insns = {
5456 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5457 BPF_MOV64_IMM(BPF_REG_0, 0),
5458 BPF_MOV64_IMM(BPF_REG_0, 0),
5459 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5460 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5461 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5462 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5463 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5464 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5465 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5466 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5467 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5468 BPF_MOV64_IMM(BPF_REG_2, 0),
5469 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5470 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5471 BPF_MOV64_IMM(BPF_REG_3, 0),
5472 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5473 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5474 BPF_EXIT_INSN(),
5475 },
5476 .result = ACCEPT,
5477 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5478 },
5479 {
5480 "invalid and of negative number",
5481 .insns = {
5482 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5483 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5485 BPF_LD_MAP_FD(BPF_REG_1, 0),
5486 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5487 BPF_FUNC_map_lookup_elem),
5488 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5489 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5490 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5491 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5492 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5493 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5494 offsetof(struct test_val, foo)),
5495 BPF_EXIT_INSN(),
5496 },
5497 .fixup_map2 = { 3 },
5498 .errstr = "R0 max value is outside of the array range",
5499 .result = REJECT,
5500 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5501 },
5502 {
5503 "invalid range check",
5504 .insns = {
5505 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5506 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5507 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5508 BPF_LD_MAP_FD(BPF_REG_1, 0),
5509 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5510 BPF_FUNC_map_lookup_elem),
5511 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5512 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5513 BPF_MOV64_IMM(BPF_REG_9, 1),
5514 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5515 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5516 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5517 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5518 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5519 BPF_MOV32_IMM(BPF_REG_3, 1),
5520 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5521 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5522 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5523 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5524 BPF_MOV64_REG(BPF_REG_0, 0),
5525 BPF_EXIT_INSN(),
5526 },
5527 .fixup_map2 = { 3 },
5528 .errstr = "R0 max value is outside of the array range",
5529 .result = REJECT,
5530 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5531 },
5532 {
5533 "map in map access",
5534 .insns = {
5535 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5536 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5538 BPF_LD_MAP_FD(BPF_REG_1, 0),
5539 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5540 BPF_FUNC_map_lookup_elem),
5541 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5542 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5543 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5544 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5545 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5546 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5547 BPF_FUNC_map_lookup_elem),
5548 BPF_MOV64_REG(BPF_REG_0, 0),
5549 BPF_EXIT_INSN(),
5550 },
5551 .fixup_map_in_map = { 3 },
5552 .result = ACCEPT,
5553 },
5554 {
5555 "invalid inner map pointer",
5556 .insns = {
5557 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5558 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5559 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5560 BPF_LD_MAP_FD(BPF_REG_1, 0),
5561 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5562 BPF_FUNC_map_lookup_elem),
5563 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5564 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5565 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5570 BPF_FUNC_map_lookup_elem),
5571 BPF_MOV64_REG(BPF_REG_0, 0),
5572 BPF_EXIT_INSN(),
5573 },
5574 .fixup_map_in_map = { 3 },
5575 .errstr = "R1 type=inv expected=map_ptr",
5576 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5577 .result = REJECT,
5578 },
5579 {
5580 "forgot null checking on the inner map pointer",
5581 .insns = {
5582 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5585 BPF_LD_MAP_FD(BPF_REG_1, 0),
5586 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5587 BPF_FUNC_map_lookup_elem),
5588 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5589 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5591 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5592 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5593 BPF_FUNC_map_lookup_elem),
5594 BPF_MOV64_REG(BPF_REG_0, 0),
5595 BPF_EXIT_INSN(),
5596 },
5597 .fixup_map_in_map = { 3 },
5598 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5599 .result = REJECT,
5600 },
5601 {
5602 "ld_abs: check calling conv, r1",
5603 .insns = {
5604 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5605 BPF_MOV64_IMM(BPF_REG_1, 0),
5606 BPF_LD_ABS(BPF_W, -0x200000),
5607 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5608 BPF_EXIT_INSN(),
5609 },
5610 .errstr = "R1 !read_ok",
5611 .result = REJECT,
5612 },
5613 {
5614 "ld_abs: check calling conv, r2",
5615 .insns = {
5616 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5617 BPF_MOV64_IMM(BPF_REG_2, 0),
5618 BPF_LD_ABS(BPF_W, -0x200000),
5619 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5620 BPF_EXIT_INSN(),
5621 },
5622 .errstr = "R2 !read_ok",
5623 .result = REJECT,
5624 },
5625 {
5626 "ld_abs: check calling conv, r3",
5627 .insns = {
5628 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5629 BPF_MOV64_IMM(BPF_REG_3, 0),
5630 BPF_LD_ABS(BPF_W, -0x200000),
5631 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5632 BPF_EXIT_INSN(),
5633 },
5634 .errstr = "R3 !read_ok",
5635 .result = REJECT,
5636 },
5637 {
5638 "ld_abs: check calling conv, r4",
5639 .insns = {
5640 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5641 BPF_MOV64_IMM(BPF_REG_4, 0),
5642 BPF_LD_ABS(BPF_W, -0x200000),
5643 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5644 BPF_EXIT_INSN(),
5645 },
5646 .errstr = "R4 !read_ok",
5647 .result = REJECT,
5648 },
5649 {
5650 "ld_abs: check calling conv, r5",
5651 .insns = {
5652 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5653 BPF_MOV64_IMM(BPF_REG_5, 0),
5654 BPF_LD_ABS(BPF_W, -0x200000),
5655 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5656 BPF_EXIT_INSN(),
5657 },
5658 .errstr = "R5 !read_ok",
5659 .result = REJECT,
5660 },
5661 {
5662 "ld_abs: check calling conv, r7",
5663 .insns = {
5664 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5665 BPF_MOV64_IMM(BPF_REG_7, 0),
5666 BPF_LD_ABS(BPF_W, -0x200000),
5667 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5668 BPF_EXIT_INSN(),
5669 },
5670 .result = ACCEPT,
5671 },
5672 {
5673 "ld_ind: check calling conv, r1",
5674 .insns = {
5675 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5676 BPF_MOV64_IMM(BPF_REG_1, 1),
5677 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5678 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5679 BPF_EXIT_INSN(),
5680 },
5681 .errstr = "R1 !read_ok",
5682 .result = REJECT,
5683 },
5684 {
5685 "ld_ind: check calling conv, r2",
5686 .insns = {
5687 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5688 BPF_MOV64_IMM(BPF_REG_2, 1),
5689 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5690 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5691 BPF_EXIT_INSN(),
5692 },
5693 .errstr = "R2 !read_ok",
5694 .result = REJECT,
5695 },
5696 {
5697 "ld_ind: check calling conv, r3",
5698 .insns = {
5699 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5700 BPF_MOV64_IMM(BPF_REG_3, 1),
5701 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5702 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5703 BPF_EXIT_INSN(),
5704 },
5705 .errstr = "R3 !read_ok",
5706 .result = REJECT,
5707 },
5708 {
5709 "ld_ind: check calling conv, r4",
5710 .insns = {
5711 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5712 BPF_MOV64_IMM(BPF_REG_4, 1),
5713 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5714 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5715 BPF_EXIT_INSN(),
5716 },
5717 .errstr = "R4 !read_ok",
5718 .result = REJECT,
5719 },
5720 {
5721 "ld_ind: check calling conv, r5",
5722 .insns = {
5723 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5724 BPF_MOV64_IMM(BPF_REG_5, 1),
5725 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5726 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5727 BPF_EXIT_INSN(),
5728 },
5729 .errstr = "R5 !read_ok",
5730 .result = REJECT,
5731 },
5732 {
5733 "ld_ind: check calling conv, r7",
5734 .insns = {
5735 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5736 BPF_MOV64_IMM(BPF_REG_7, 1),
5737 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5738 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5739 BPF_EXIT_INSN(),
5740 },
5741 .result = ACCEPT,
5742 },
5743 {
5744 "check bpf_perf_event_data->sample_period byte load permitted",
5745 .insns = {
5746 BPF_MOV64_IMM(BPF_REG_0, 0),
5747 #if __BYTE_ORDER == __LITTLE_ENDIAN
5748 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5749 offsetof(struct bpf_perf_event_data, sample_period)),
5750 #else
5751 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
5752 offsetof(struct bpf_perf_event_data, sample_period) + 7),
5753 #endif
5754 BPF_EXIT_INSN(),
5755 },
5756 .result = ACCEPT,
5757 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5758 },
5759 {
5760 "check bpf_perf_event_data->sample_period half load permitted",
5761 .insns = {
5762 BPF_MOV64_IMM(BPF_REG_0, 0),
5763 #if __BYTE_ORDER == __LITTLE_ENDIAN
5764 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5765 offsetof(struct bpf_perf_event_data, sample_period)),
5766 #else
5767 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5768 offsetof(struct bpf_perf_event_data, sample_period) + 6),
5769 #endif
5770 BPF_EXIT_INSN(),
5771 },
5772 .result = ACCEPT,
5773 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5774 },
5775 {
5776 "check bpf_perf_event_data->sample_period word load permitted",
5777 .insns = {
5778 BPF_MOV64_IMM(BPF_REG_0, 0),
5779 #if __BYTE_ORDER == __LITTLE_ENDIAN
5780 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5781 offsetof(struct bpf_perf_event_data, sample_period)),
5782 #else
5783 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
5784 offsetof(struct bpf_perf_event_data, sample_period) + 4),
5785 #endif
5786 BPF_EXIT_INSN(),
5787 },
5788 .result = ACCEPT,
5789 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5790 },
5791 {
5792 "check bpf_perf_event_data->sample_period dword load permitted",
5793 .insns = {
5794 BPF_MOV64_IMM(BPF_REG_0, 0),
5795 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
5796 offsetof(struct bpf_perf_event_data, sample_period)),
5797 BPF_EXIT_INSN(),
5798 },
5799 .result = ACCEPT,
5800 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
5801 },
5802 {
5803 "check skb->data half load not permitted",
5804 .insns = {
5805 BPF_MOV64_IMM(BPF_REG_0, 0),
5806 #if __BYTE_ORDER == __LITTLE_ENDIAN
5807 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5808 offsetof(struct __sk_buff, data)),
5809 #else
5810 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5811 offsetof(struct __sk_buff, data) + 2),
5812 #endif
5813 BPF_EXIT_INSN(),
5814 },
5815 .result = REJECT,
5816 .errstr = "invalid bpf_context access",
5817 },
5818 {
5819 "check skb->tc_classid half load not permitted for lwt prog",
5820 .insns = {
5821 BPF_MOV64_IMM(BPF_REG_0, 0),
5822 #if __BYTE_ORDER == __LITTLE_ENDIAN
5823 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5824 offsetof(struct __sk_buff, tc_classid)),
5825 #else
5826 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
5827 offsetof(struct __sk_buff, tc_classid) + 2),
5828 #endif
5829 BPF_EXIT_INSN(),
5830 },
5831 .result = REJECT,
5832 .errstr = "invalid bpf_context access",
5833 .prog_type = BPF_PROG_TYPE_LWT_IN,
5834 },
5835 {
5836 "bounds checks mixing signed and unsigned, positive bounds",
5837 .insns = {
5838 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5841 BPF_LD_MAP_FD(BPF_REG_1, 0),
5842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5843 BPF_FUNC_map_lookup_elem),
5844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5845 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5846 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5847 BPF_MOV64_IMM(BPF_REG_2, 2),
5848 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
5849 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
5850 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5851 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
5852 BPF_MOV64_IMM(BPF_REG_0, 0),
5853 BPF_EXIT_INSN(),
5854 },
5855 .fixup_map1 = { 3 },
5856 .errstr = "R0 min value is negative",
5857 .result = REJECT,
5858 },
5859 {
5860 "bounds checks mixing signed and unsigned",
5861 .insns = {
5862 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5863 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5864 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5865 BPF_LD_MAP_FD(BPF_REG_1, 0),
5866 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5867 BPF_FUNC_map_lookup_elem),
5868 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5869 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5870 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5871 BPF_MOV64_IMM(BPF_REG_2, -1),
5872 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
5873 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
5874 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5875 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
5876 BPF_MOV64_IMM(BPF_REG_0, 0),
5877 BPF_EXIT_INSN(),
5878 },
5879 .fixup_map1 = { 3 },
5880 .errstr = "R0 min value is negative",
5881 .result = REJECT,
5882 },
5883 {
5884 "bounds checks mixing signed and unsigned, variant 2",
5885 .insns = {
5886 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5887 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5888 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5889 BPF_LD_MAP_FD(BPF_REG_1, 0),
5890 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5891 BPF_FUNC_map_lookup_elem),
5892 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5893 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5894 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5895 BPF_MOV64_IMM(BPF_REG_2, -1),
5896 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
5897 BPF_MOV64_IMM(BPF_REG_8, 0),
5898 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
5899 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
5900 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
5901 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
5902 BPF_MOV64_IMM(BPF_REG_0, 0),
5903 BPF_EXIT_INSN(),
5904 },
5905 .fixup_map1 = { 3 },
5906 .errstr = "R8 invalid mem access 'inv'",
5907 .result = REJECT,
5908 },
5909 {
5910 "bounds checks mixing signed and unsigned, variant 3",
5911 .insns = {
5912 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5913 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5914 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5915 BPF_LD_MAP_FD(BPF_REG_1, 0),
5916 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5917 BPF_FUNC_map_lookup_elem),
5918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
5919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5920 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5921 BPF_MOV64_IMM(BPF_REG_2, -1),
5922 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
5923 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
5924 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
5925 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
5926 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
5927 BPF_MOV64_IMM(BPF_REG_0, 0),
5928 BPF_EXIT_INSN(),
5929 },
5930 .fixup_map1 = { 3 },
5931 .errstr = "R8 invalid mem access 'inv'",
5932 .result = REJECT,
5933 },
5934 {
5935 "bounds checks mixing signed and unsigned, variant 4",
5936 .insns = {
5937 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5940 BPF_LD_MAP_FD(BPF_REG_1, 0),
5941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5942 BPF_FUNC_map_lookup_elem),
5943 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5944 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5945 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5946 BPF_MOV64_IMM(BPF_REG_2, 1),
5947 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
5948 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
5949 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5950 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
5951 BPF_MOV64_IMM(BPF_REG_0, 0),
5952 BPF_EXIT_INSN(),
5953 },
5954 .fixup_map1 = { 3 },
5955 .result = ACCEPT,
5956 },
5957 {
5958 "bounds checks mixing signed and unsigned, variant 5",
5959 .insns = {
5960 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5961 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5962 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5963 BPF_LD_MAP_FD(BPF_REG_1, 0),
5964 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5965 BPF_FUNC_map_lookup_elem),
5966 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
5967 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5968 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5969 BPF_MOV64_IMM(BPF_REG_2, -1),
5970 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
5971 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
5972 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
5973 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
5974 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
5975 BPF_MOV64_IMM(BPF_REG_0, 0),
5976 BPF_EXIT_INSN(),
5977 },
5978 .fixup_map1 = { 3 },
5979 .errstr = "R0 min value is negative",
5980 .result = REJECT,
5981 },
5982 {
5983 "bounds checks mixing signed and unsigned, variant 6",
5984 .insns = {
5985 BPF_MOV64_IMM(BPF_REG_2, 0),
5986 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
5987 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
5988 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
5989 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
5990 BPF_MOV64_IMM(BPF_REG_6, -1),
5991 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
5992 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
5993 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
5994 BPF_MOV64_IMM(BPF_REG_5, 0),
5995 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
5996 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5997 BPF_FUNC_skb_load_bytes),
5998 BPF_MOV64_IMM(BPF_REG_0, 0),
5999 BPF_EXIT_INSN(),
6000 },
6001 .errstr = "R4 min value is negative, either use unsigned",
6002 .result = REJECT,
6003 },
6004 {
6005 "bounds checks mixing signed and unsigned, variant 7",
6006 .insns = {
6007 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6008 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6009 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6010 BPF_LD_MAP_FD(BPF_REG_1, 0),
6011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6012 BPF_FUNC_map_lookup_elem),
6013 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6014 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6015 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6016 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6017 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6018 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6019 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6020 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6021 BPF_MOV64_IMM(BPF_REG_0, 0),
6022 BPF_EXIT_INSN(),
6023 },
6024 .fixup_map1 = { 3 },
6025 .result = ACCEPT,
6026 },
6027 {
6028 "bounds checks mixing signed and unsigned, variant 8",
6029 .insns = {
6030 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6031 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6032 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6033 BPF_LD_MAP_FD(BPF_REG_1, 0),
6034 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6035 BPF_FUNC_map_lookup_elem),
6036 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6037 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6038 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6039 BPF_MOV64_IMM(BPF_REG_2, -1),
6040 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6041 BPF_MOV64_IMM(BPF_REG_0, 0),
6042 BPF_EXIT_INSN(),
6043 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6044 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6045 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6046 BPF_MOV64_IMM(BPF_REG_0, 0),
6047 BPF_EXIT_INSN(),
6048 },
6049 .fixup_map1 = { 3 },
6050 .errstr = "R0 min value is negative",
6051 .result = REJECT,
6052 },
6053 {
6054 "bounds checks mixing signed and unsigned, variant 9",
6055 .insns = {
6056 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6059 BPF_LD_MAP_FD(BPF_REG_1, 0),
6060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6061 BPF_FUNC_map_lookup_elem),
6062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6063 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6064 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6065 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6066 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6067 BPF_MOV64_IMM(BPF_REG_0, 0),
6068 BPF_EXIT_INSN(),
6069 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6070 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6071 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6072 BPF_MOV64_IMM(BPF_REG_0, 0),
6073 BPF_EXIT_INSN(),
6074 },
6075 .fixup_map1 = { 3 },
6076 .result = ACCEPT,
6077 },
6078 {
6079 "bounds checks mixing signed and unsigned, variant 10",
6080 .insns = {
6081 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6082 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6084 BPF_LD_MAP_FD(BPF_REG_1, 0),
6085 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6086 BPF_FUNC_map_lookup_elem),
6087 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6088 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6089 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6090 BPF_MOV64_IMM(BPF_REG_2, 0),
6091 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6092 BPF_MOV64_IMM(BPF_REG_0, 0),
6093 BPF_EXIT_INSN(),
6094 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6095 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6096 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6097 BPF_MOV64_IMM(BPF_REG_0, 0),
6098 BPF_EXIT_INSN(),
6099 },
6100 .fixup_map1 = { 3 },
6101 .errstr = "R0 min value is negative",
6102 .result = REJECT,
6103 },
6104 {
6105 "bounds checks mixing signed and unsigned, variant 11",
6106 .insns = {
6107 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6110 BPF_LD_MAP_FD(BPF_REG_1, 0),
6111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6112 BPF_FUNC_map_lookup_elem),
6113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6114 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6115 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6116 BPF_MOV64_IMM(BPF_REG_2, -1),
6117 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6118 /* Dead branch. */
6119 BPF_MOV64_IMM(BPF_REG_0, 0),
6120 BPF_EXIT_INSN(),
6121 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6122 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6123 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6124 BPF_MOV64_IMM(BPF_REG_0, 0),
6125 BPF_EXIT_INSN(),
6126 },
6127 .fixup_map1 = { 3 },
6128 .errstr = "R0 min value is negative",
6129 .result = REJECT,
6130 },
6131 {
6132 "bounds checks mixing signed and unsigned, variant 12",
6133 .insns = {
6134 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6135 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6136 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6137 BPF_LD_MAP_FD(BPF_REG_1, 0),
6138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6139 BPF_FUNC_map_lookup_elem),
6140 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6141 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6142 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6143 BPF_MOV64_IMM(BPF_REG_2, -6),
6144 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6145 BPF_MOV64_IMM(BPF_REG_0, 0),
6146 BPF_EXIT_INSN(),
6147 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6148 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6149 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6150 BPF_MOV64_IMM(BPF_REG_0, 0),
6151 BPF_EXIT_INSN(),
6152 },
6153 .fixup_map1 = { 3 },
6154 .errstr = "R0 min value is negative",
6155 .result = REJECT,
6156 },
6157 {
6158 "bounds checks mixing signed and unsigned, variant 13",
6159 .insns = {
6160 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6161 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6163 BPF_LD_MAP_FD(BPF_REG_1, 0),
6164 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6165 BPF_FUNC_map_lookup_elem),
6166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6167 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6168 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6169 BPF_MOV64_IMM(BPF_REG_2, 2),
6170 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6171 BPF_MOV64_IMM(BPF_REG_7, 1),
6172 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6173 BPF_MOV64_IMM(BPF_REG_0, 0),
6174 BPF_EXIT_INSN(),
6175 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6176 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6177 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6178 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6179 BPF_MOV64_IMM(BPF_REG_0, 0),
6180 BPF_EXIT_INSN(),
6181 },
6182 .fixup_map1 = { 3 },
6183 .errstr = "R0 min value is negative",
6184 .result = REJECT,
6185 },
6186 {
6187 "bounds checks mixing signed and unsigned, variant 14",
6188 .insns = {
6189 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6190 offsetof(struct __sk_buff, mark)),
6191 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6192 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6193 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6194 BPF_LD_MAP_FD(BPF_REG_1, 0),
6195 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6196 BPF_FUNC_map_lookup_elem),
6197 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6198 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6199 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6200 BPF_MOV64_IMM(BPF_REG_2, -1),
6201 BPF_MOV64_IMM(BPF_REG_8, 2),
6202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6203 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6204 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6205 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6206 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6207 BPF_MOV64_IMM(BPF_REG_0, 0),
6208 BPF_EXIT_INSN(),
6209 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6210 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6211 },
6212 .fixup_map1 = { 4 },
6213 .errstr = "R0 min value is negative",
6214 .result = REJECT,
6215 },
6216 {
6217 "bounds checks mixing signed and unsigned, variant 15",
6218 .insns = {
6219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6222 BPF_LD_MAP_FD(BPF_REG_1, 0),
6223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6224 BPF_FUNC_map_lookup_elem),
6225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6226 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6227 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6228 BPF_MOV64_IMM(BPF_REG_2, -6),
6229 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6230 BPF_MOV64_IMM(BPF_REG_0, 0),
6231 BPF_EXIT_INSN(),
6232 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6233 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6234 BPF_MOV64_IMM(BPF_REG_0, 0),
6235 BPF_EXIT_INSN(),
6236 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6237 BPF_MOV64_IMM(BPF_REG_0, 0),
6238 BPF_EXIT_INSN(),
6239 },
6240 .fixup_map1 = { 3 },
6241 .errstr_unpriv = "R0 pointer comparison prohibited",
6242 .errstr = "R0 min value is negative",
6243 .result = REJECT,
6244 .result_unpriv = REJECT,
6245 },
6246 {
6247 "subtraction bounds (map value) variant 1",
6248 .insns = {
6249 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6250 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6251 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6252 BPF_LD_MAP_FD(BPF_REG_1, 0),
6253 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6254 BPF_FUNC_map_lookup_elem),
6255 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6256 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6257 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6258 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6259 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6260 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6261 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6262 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6263 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6264 BPF_EXIT_INSN(),
6265 BPF_MOV64_IMM(BPF_REG_0, 0),
6266 BPF_EXIT_INSN(),
6267 },
6268 .fixup_map1 = { 3 },
6269 .errstr = "R0 max value is outside of the array range",
6270 .result = REJECT,
6271 },
6272 {
6273 "subtraction bounds (map value) variant 2",
6274 .insns = {
6275 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6276 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6278 BPF_LD_MAP_FD(BPF_REG_1, 0),
6279 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6280 BPF_FUNC_map_lookup_elem),
6281 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6282 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6283 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6284 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6285 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6286 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6287 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6288 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6289 BPF_EXIT_INSN(),
6290 BPF_MOV64_IMM(BPF_REG_0, 0),
6291 BPF_EXIT_INSN(),
6292 },
6293 .fixup_map1 = { 3 },
6294 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6295 .result = REJECT,
6296 },
6297 {
6298 "variable-offset ctx access",
6299 .insns = {
6300 /* Get an unknown value */
6301 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6302 /* Make it small and 4-byte aligned */
6303 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6304 /* add it to skb. We now have either &skb->len or
6305 * &skb->pkt_type, but we don't know which
6306 */
6307 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6308 /* dereference it */
6309 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
6310 BPF_EXIT_INSN(),
6311 },
6312 .errstr = "variable ctx access var_off=(0x0; 0x4)",
6313 .result = REJECT,
6314 .prog_type = BPF_PROG_TYPE_LWT_IN,
6315 },
6316 {
6317 "variable-offset stack access",
6318 .insns = {
6319 /* Fill the top 8 bytes of the stack */
6320 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6321 /* Get an unknown value */
6322 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6323 /* Make it small and 4-byte aligned */
6324 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
6325 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
6326 /* add it to fp. We now have either fp-4 or fp-8, but
6327 * we don't know which
6328 */
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
6330 /* dereference it */
6331 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
6332 BPF_EXIT_INSN(),
6333 },
6334 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6335 .result = REJECT,
6336 .prog_type = BPF_PROG_TYPE_LWT_IN,
6337 },
6338 };
6339
6340 static int probe_filter_length(const struct bpf_insn *fp)
6341 {
6342 int len;
6343
6344 for (len = MAX_INSNS - 1; len > 0; --len)
6345 if (fp[len].code != 0 || fp[len].imm != 0)
6346 break;
6347 return len + 1;
6348 }
6349
6350 static int create_map(uint32_t size_value, uint32_t max_elem)
6351 {
6352 int fd;
6353
6354 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
6355 size_value, max_elem, BPF_F_NO_PREALLOC);
6356 if (fd < 0)
6357 printf("Failed to create hash map '%s'!\n", strerror(errno));
6358
6359 return fd;
6360 }
6361
6362 static int create_prog_array(void)
6363 {
6364 int fd;
6365
6366 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
6367 sizeof(int), 4, 0);
6368 if (fd < 0)
6369 printf("Failed to create prog array '%s'!\n", strerror(errno));
6370
6371 return fd;
6372 }
6373
6374 static int create_map_in_map(void)
6375 {
6376 int inner_map_fd, outer_map_fd;
6377
6378 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
6379 sizeof(int), 1, 0);
6380 if (inner_map_fd < 0) {
6381 printf("Failed to create array '%s'!\n", strerror(errno));
6382 return inner_map_fd;
6383 }
6384
6385 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
6386 sizeof(int), inner_map_fd, 1, 0);
6387 if (outer_map_fd < 0)
6388 printf("Failed to create array of maps '%s'!\n",
6389 strerror(errno));
6390
6391 close(inner_map_fd);
6392
6393 return outer_map_fd;
6394 }
6395
6396 static char bpf_vlog[32768];
6397
6398 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
6399 int *map_fds)
6400 {
6401 int *fixup_map1 = test->fixup_map1;
6402 int *fixup_map2 = test->fixup_map2;
6403 int *fixup_prog = test->fixup_prog;
6404 int *fixup_map_in_map = test->fixup_map_in_map;
6405
6406 /* Allocating HTs with 1 elem is fine here, since we only test
6407 * for verifier and not do a runtime lookup, so the only thing
6408 * that really matters is value size in this case.
6409 */
6410 if (*fixup_map1) {
6411 map_fds[0] = create_map(sizeof(long long), 1);
6412 do {
6413 prog[*fixup_map1].imm = map_fds[0];
6414 fixup_map1++;
6415 } while (*fixup_map1);
6416 }
6417
6418 if (*fixup_map2) {
6419 map_fds[1] = create_map(sizeof(struct test_val), 1);
6420 do {
6421 prog[*fixup_map2].imm = map_fds[1];
6422 fixup_map2++;
6423 } while (*fixup_map2);
6424 }
6425
6426 if (*fixup_prog) {
6427 map_fds[2] = create_prog_array();
6428 do {
6429 prog[*fixup_prog].imm = map_fds[2];
6430 fixup_prog++;
6431 } while (*fixup_prog);
6432 }
6433
6434 if (*fixup_map_in_map) {
6435 map_fds[3] = create_map_in_map();
6436 do {
6437 prog[*fixup_map_in_map].imm = map_fds[3];
6438 fixup_map_in_map++;
6439 } while (*fixup_map_in_map);
6440 }
6441 }
6442
6443 static void do_test_single(struct bpf_test *test, bool unpriv,
6444 int *passes, int *errors)
6445 {
6446 int fd_prog, expected_ret, reject_from_alignment;
6447 struct bpf_insn *prog = test->insns;
6448 int prog_len = probe_filter_length(prog);
6449 int prog_type = test->prog_type;
6450 int map_fds[MAX_NR_MAPS];
6451 const char *expected_err;
6452 int i;
6453
6454 for (i = 0; i < MAX_NR_MAPS; i++)
6455 map_fds[i] = -1;
6456
6457 do_test_fixup(test, prog, map_fds);
6458
6459 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
6460 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
6461 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
6462
6463 expected_ret = unpriv && test->result_unpriv != UNDEF ?
6464 test->result_unpriv : test->result;
6465 expected_err = unpriv && test->errstr_unpriv ?
6466 test->errstr_unpriv : test->errstr;
6467
6468 reject_from_alignment = fd_prog < 0 &&
6469 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
6470 strstr(bpf_vlog, "Unknown alignment.");
6471 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6472 if (reject_from_alignment) {
6473 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6474 strerror(errno));
6475 goto fail_log;
6476 }
6477 #endif
6478 if (expected_ret == ACCEPT) {
6479 if (fd_prog < 0 && !reject_from_alignment) {
6480 printf("FAIL\nFailed to load prog '%s'!\n",
6481 strerror(errno));
6482 goto fail_log;
6483 }
6484 } else {
6485 if (fd_prog >= 0) {
6486 printf("FAIL\nUnexpected success to load!\n");
6487 goto fail_log;
6488 }
6489 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
6490 printf("FAIL\nUnexpected error message!\n");
6491 goto fail_log;
6492 }
6493 }
6494
6495 (*passes)++;
6496 printf("OK%s\n", reject_from_alignment ?
6497 " (NOTE: reject due to unknown alignment)" : "");
6498 close_fds:
6499 close(fd_prog);
6500 for (i = 0; i < MAX_NR_MAPS; i++)
6501 close(map_fds[i]);
6502 sched_yield();
6503 return;
6504 fail_log:
6505 (*errors)++;
6506 printf("%s", bpf_vlog);
6507 goto close_fds;
6508 }
6509
6510 static bool is_admin(void)
6511 {
6512 cap_t caps;
6513 cap_flag_value_t sysadmin = CAP_CLEAR;
6514 const cap_value_t cap_val = CAP_SYS_ADMIN;
6515
6516 #ifdef CAP_IS_SUPPORTED
6517 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
6518 perror("cap_get_flag");
6519 return false;
6520 }
6521 #endif
6522 caps = cap_get_proc();
6523 if (!caps) {
6524 perror("cap_get_proc");
6525 return false;
6526 }
6527 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
6528 perror("cap_get_flag");
6529 if (cap_free(caps))
6530 perror("cap_free");
6531 return (sysadmin == CAP_SET);
6532 }
6533
6534 static int set_admin(bool admin)
6535 {
6536 cap_t caps;
6537 const cap_value_t cap_val = CAP_SYS_ADMIN;
6538 int ret = -1;
6539
6540 caps = cap_get_proc();
6541 if (!caps) {
6542 perror("cap_get_proc");
6543 return -1;
6544 }
6545 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
6546 admin ? CAP_SET : CAP_CLEAR)) {
6547 perror("cap_set_flag");
6548 goto out;
6549 }
6550 if (cap_set_proc(caps)) {
6551 perror("cap_set_proc");
6552 goto out;
6553 }
6554 ret = 0;
6555 out:
6556 if (cap_free(caps))
6557 perror("cap_free");
6558 return ret;
6559 }
6560
6561 static int do_test(bool unpriv, unsigned int from, unsigned int to)
6562 {
6563 int i, passes = 0, errors = 0;
6564
6565 for (i = from; i < to; i++) {
6566 struct bpf_test *test = &tests[i];
6567
6568 /* Program types that are not supported by non-root we
6569 * skip right away.
6570 */
6571 if (!test->prog_type) {
6572 if (!unpriv)
6573 set_admin(false);
6574 printf("#%d/u %s ", i, test->descr);
6575 do_test_single(test, true, &passes, &errors);
6576 if (!unpriv)
6577 set_admin(true);
6578 }
6579
6580 if (!unpriv) {
6581 printf("#%d/p %s ", i, test->descr);
6582 do_test_single(test, false, &passes, &errors);
6583 }
6584 }
6585
6586 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
6587 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
6588 }
6589
6590 int main(int argc, char **argv)
6591 {
6592 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
6593 struct rlimit rlim = { 1 << 20, 1 << 20 };
6594 unsigned int from = 0, to = ARRAY_SIZE(tests);
6595 bool unpriv = !is_admin();
6596
6597 if (argc == 3) {
6598 unsigned int l = atoi(argv[argc - 2]);
6599 unsigned int u = atoi(argv[argc - 1]);
6600
6601 if (l < to && u < to) {
6602 from = l;
6603 to = u + 1;
6604 }
6605 } else if (argc == 2) {
6606 unsigned int t = atoi(argv[argc - 1]);
6607
6608 if (t < to) {
6609 from = t;
6610 to = t + 1;
6611 }
6612 }
6613
6614 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
6615 return do_test(unpriv, from, to);
6616 }