2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns
[MAX_INSNS
];
58 int fixup_map1
[MAX_FIXUPS
];
59 int fixup_map2
[MAX_FIXUPS
];
60 int fixup_prog
[MAX_FIXUPS
];
61 int fixup_map_in_map
[MAX_FIXUPS
];
63 const char *errstr_unpriv
;
68 } result
, result_unpriv
;
69 enum bpf_prog_type prog_type
;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests
[] = {
87 BPF_MOV64_IMM(BPF_REG_1
, 1),
88 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
89 BPF_MOV64_IMM(BPF_REG_2
, 3),
90 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
91 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
92 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
93 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
104 .errstr
= "unreachable",
110 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
114 .errstr
= "unreachable",
120 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
123 .errstr
= "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
132 .errstr
= "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0
, 0),
140 BPF_LD_IMM64(BPF_REG_0
, 0),
141 BPF_LD_IMM64(BPF_REG_0
, 1),
142 BPF_LD_IMM64(BPF_REG_0
, 1),
143 BPF_MOV64_IMM(BPF_REG_0
, 2),
146 .errstr
= "invalid BPF_LD_IMM insn",
147 .errstr_unpriv
= "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0
, 0),
155 BPF_LD_IMM64(BPF_REG_0
, 0),
156 BPF_LD_IMM64(BPF_REG_0
, 1),
157 BPF_LD_IMM64(BPF_REG_0
, 1),
160 .errstr
= "invalid BPF_LD_IMM insn",
161 .errstr_unpriv
= "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
168 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0
, 0),
170 BPF_LD_IMM64(BPF_REG_0
, 0),
171 BPF_LD_IMM64(BPF_REG_0
, 1),
172 BPF_LD_IMM64(BPF_REG_0
, 1),
175 .errstr
= "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
184 .errstr
= "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
192 .errstr
= "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr
= "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr
= "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1
, 0, 0, 1),
240 .errstr
= "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
250 .errstr
= "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1
, 0),
257 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr
= "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1
, 0),
268 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
272 .errstr
= "invalid bpf_ld_imm64 insn",
278 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
280 .errstr
= "jump out of range",
286 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
289 .errstr
= "back-edge",
295 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
297 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
298 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
301 .errstr
= "back-edge",
307 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
308 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
309 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
310 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
313 .errstr
= "back-edge",
317 "read uninitialized register",
319 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
322 .errstr
= "R2 !read_ok",
326 "read invalid register",
328 BPF_MOV64_REG(BPF_REG_0
, -1),
331 .errstr
= "R15 is invalid",
335 "program doesn't init R0 before exit",
337 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
340 .errstr
= "R0 !read_ok",
344 "program doesn't init R0 before exit in all branches",
346 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0
, 1),
348 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
351 .errstr
= "R0 !read_ok",
352 .errstr_unpriv
= "R1 pointer comparison",
356 "stack out of bounds",
358 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
361 .errstr
= "invalid stack",
365 "invalid call insn1",
367 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
370 .errstr
= "BPF_CALL uses reserved",
374 "invalid call insn2",
376 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
379 .errstr
= "BPF_CALL uses reserved",
383 "invalid function call",
385 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
388 .errstr
= "invalid func unknown#1234567",
392 "uninitialized stack1",
394 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
395 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
397 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem
),
402 .errstr
= "invalid indirect read from stack",
406 "uninitialized stack2",
408 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
409 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
412 .errstr
= "invalid read from stack",
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
419 BPF_MOV64_IMM(BPF_REG_0
, 0),
420 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
421 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 8),
422 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
425 .errstr_unpriv
= "R1 subtraction from stack pointer",
426 .result_unpriv
= REJECT
,
427 .errstr
= "R1 invalid mem access",
431 "non-invalid fp arithmetic",
433 BPF_MOV64_IMM(BPF_REG_0
, 0),
434 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
440 "invalid argument register",
442 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid
),
444 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid
),
448 .errstr
= "R1 !read_ok",
450 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
453 "non-invalid argument register",
455 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
456 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid
),
458 BPF_ALU64_REG(BPF_MOV
, BPF_REG_1
, BPF_REG_6
),
459 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid
),
464 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
467 "check valid spill/fill",
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
478 .errstr_unpriv
= "R0 leaks addr",
480 .result_unpriv
= REJECT
,
483 "check valid spill/fill, skb mark",
485 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
486 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
487 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
488 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
489 offsetof(struct __sk_buff
, mark
)),
493 .result_unpriv
= ACCEPT
,
496 "check corrupted spill/fill",
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
506 .errstr_unpriv
= "attempt to corrupt spilled",
507 .errstr
= "corrupted spill",
511 "invalid src register in STX",
513 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
516 .errstr
= "R15 is invalid",
520 "invalid dst register in STX",
522 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
525 .errstr
= "R14 is invalid",
529 "invalid dst register in ST",
531 BPF_ST_MEM(BPF_B
, 14, -1, -1),
534 .errstr
= "R14 is invalid",
538 "invalid src register in LDX",
540 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
543 .errstr
= "R12 is invalid",
547 "invalid dst register in LDX",
549 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
552 .errstr
= "R11 is invalid",
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
561 .errstr
= "invalid BPF_LD_IMM",
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
570 .errstr
= "BPF_LDX uses reserved fields",
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
579 .errstr
= "invalid BPF_ALU opcode f0",
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
588 .errstr
= "invalid BPF_ALU opcode f0",
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
597 .errstr
= "BPF_ALU uses reserved fields",
601 "misaligned read from stack",
603 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
604 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
607 .errstr
= "misaligned stack access",
609 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
612 "invalid map_fd for function call",
614 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
615 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
616 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
617 BPF_LD_MAP_FD(BPF_REG_1
, 0),
618 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
619 BPF_FUNC_map_delete_elem
),
622 .errstr
= "fd 0 is not pointing to valid bpf_map",
626 "don't check return value before access",
628 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
629 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
630 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
631 BPF_LD_MAP_FD(BPF_REG_1
, 0),
632 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
633 BPF_FUNC_map_lookup_elem
),
634 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
638 .errstr
= "R0 invalid mem access 'map_value_or_null'",
642 "access memory with incorrect alignment",
644 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
645 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
646 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
647 BPF_LD_MAP_FD(BPF_REG_1
, 0),
648 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
649 BPF_FUNC_map_lookup_elem
),
650 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
651 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
655 .errstr
= "misaligned value access",
657 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
660 "sometimes access memory with incorrect alignment",
662 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
665 BPF_LD_MAP_FD(BPF_REG_1
, 0),
666 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
667 BPF_FUNC_map_lookup_elem
),
668 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
669 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
671 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
675 .errstr
= "R0 invalid mem access",
676 .errstr_unpriv
= "R0 leaks addr",
678 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
683 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
684 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
685 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
686 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
687 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
688 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
689 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
690 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
691 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
692 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
693 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
694 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
695 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
696 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
697 BPF_MOV64_IMM(BPF_REG_0
, 0),
700 .errstr_unpriv
= "R1 pointer comparison",
701 .result_unpriv
= REJECT
,
707 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
708 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
709 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
710 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
711 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
712 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
713 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
714 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
715 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
716 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
717 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
718 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
719 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
720 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
721 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
722 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
723 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
724 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
725 BPF_MOV64_IMM(BPF_REG_0
, 0),
728 .errstr_unpriv
= "R1 pointer comparison",
729 .result_unpriv
= REJECT
,
735 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
736 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
737 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
738 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
739 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
740 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
741 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
742 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
743 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
744 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
745 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
746 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
747 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
748 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
749 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
750 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
751 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
752 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
753 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
754 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
755 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
756 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
757 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
758 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
759 BPF_LD_MAP_FD(BPF_REG_1
, 0),
760 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
761 BPF_FUNC_map_delete_elem
),
764 .fixup_map1
= { 24 },
765 .errstr_unpriv
= "R1 pointer comparison",
766 .result_unpriv
= REJECT
,
772 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
774 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
775 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
776 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
777 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
778 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
779 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
780 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
781 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
782 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
783 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
784 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
785 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
786 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
787 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
788 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
789 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
790 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
791 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
792 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
793 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
794 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
795 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
796 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
797 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
798 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
799 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
800 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
801 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
802 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
803 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
804 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
805 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
806 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
807 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
808 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
809 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
810 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
811 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
812 BPF_MOV64_IMM(BPF_REG_0
, 0),
815 .errstr_unpriv
= "R1 pointer comparison",
816 .result_unpriv
= REJECT
,
822 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
823 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
824 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
825 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
826 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
827 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
828 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
829 BPF_MOV64_IMM(BPF_REG_0
, 0),
830 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
831 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
832 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
833 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
834 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
835 BPF_MOV64_IMM(BPF_REG_0
, 0),
836 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
837 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
838 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
839 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
840 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
841 BPF_MOV64_IMM(BPF_REG_0
, 0),
842 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
843 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
844 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
845 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
846 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
847 BPF_MOV64_IMM(BPF_REG_0
, 0),
848 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
849 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
850 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
851 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
852 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
853 BPF_MOV64_IMM(BPF_REG_0
, 0),
856 .errstr_unpriv
= "R1 pointer comparison",
857 .result_unpriv
= REJECT
,
861 "access skb fields ok",
863 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
864 offsetof(struct __sk_buff
, len
)),
865 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
866 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
867 offsetof(struct __sk_buff
, mark
)),
868 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
869 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
870 offsetof(struct __sk_buff
, pkt_type
)),
871 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
872 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
873 offsetof(struct __sk_buff
, queue_mapping
)),
874 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
875 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
876 offsetof(struct __sk_buff
, protocol
)),
877 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
878 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
879 offsetof(struct __sk_buff
, vlan_present
)),
880 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
881 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
882 offsetof(struct __sk_buff
, vlan_tci
)),
883 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
884 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
885 offsetof(struct __sk_buff
, napi_id
)),
886 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
892 "access skb fields bad1",
894 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
897 .errstr
= "invalid bpf_context access",
901 "access skb fields bad2",
903 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
904 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
905 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
906 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
907 BPF_LD_MAP_FD(BPF_REG_1
, 0),
908 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
909 BPF_FUNC_map_lookup_elem
),
910 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
912 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
913 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
914 offsetof(struct __sk_buff
, pkt_type
)),
918 .errstr
= "different pointers",
919 .errstr_unpriv
= "R1 pointer comparison",
923 "access skb fields bad3",
925 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
926 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
927 offsetof(struct __sk_buff
, pkt_type
)),
929 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
930 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
931 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
932 BPF_LD_MAP_FD(BPF_REG_1
, 0),
933 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
934 BPF_FUNC_map_lookup_elem
),
935 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
937 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
938 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
941 .errstr
= "different pointers",
942 .errstr_unpriv
= "R1 pointer comparison",
946 "access skb fields bad4",
948 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
949 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
950 offsetof(struct __sk_buff
, len
)),
951 BPF_MOV64_IMM(BPF_REG_0
, 0),
953 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
954 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
955 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
956 BPF_LD_MAP_FD(BPF_REG_1
, 0),
957 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
958 BPF_FUNC_map_lookup_elem
),
959 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
961 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
962 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
965 .errstr
= "different pointers",
966 .errstr_unpriv
= "R1 pointer comparison",
970 "check skb->mark is not writeable by sockets",
972 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
973 offsetof(struct __sk_buff
, mark
)),
976 .errstr
= "invalid bpf_context access",
977 .errstr_unpriv
= "R1 leaks addr",
981 "check skb->tc_index is not writeable by sockets",
983 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
984 offsetof(struct __sk_buff
, tc_index
)),
987 .errstr
= "invalid bpf_context access",
988 .errstr_unpriv
= "R1 leaks addr",
992 "check cb access: byte",
994 BPF_MOV64_IMM(BPF_REG_0
, 0),
995 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
996 offsetof(struct __sk_buff
, cb
[0])),
997 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
998 offsetof(struct __sk_buff
, cb
[0]) + 1),
999 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1000 offsetof(struct __sk_buff
, cb
[0]) + 2),
1001 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1002 offsetof(struct __sk_buff
, cb
[0]) + 3),
1003 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1004 offsetof(struct __sk_buff
, cb
[1])),
1005 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1006 offsetof(struct __sk_buff
, cb
[1]) + 1),
1007 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1008 offsetof(struct __sk_buff
, cb
[1]) + 2),
1009 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1010 offsetof(struct __sk_buff
, cb
[1]) + 3),
1011 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1012 offsetof(struct __sk_buff
, cb
[2])),
1013 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1014 offsetof(struct __sk_buff
, cb
[2]) + 1),
1015 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1016 offsetof(struct __sk_buff
, cb
[2]) + 2),
1017 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1018 offsetof(struct __sk_buff
, cb
[2]) + 3),
1019 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1020 offsetof(struct __sk_buff
, cb
[3])),
1021 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1022 offsetof(struct __sk_buff
, cb
[3]) + 1),
1023 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1024 offsetof(struct __sk_buff
, cb
[3]) + 2),
1025 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1026 offsetof(struct __sk_buff
, cb
[3]) + 3),
1027 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1028 offsetof(struct __sk_buff
, cb
[4])),
1029 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1030 offsetof(struct __sk_buff
, cb
[4]) + 1),
1031 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1032 offsetof(struct __sk_buff
, cb
[4]) + 2),
1033 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1034 offsetof(struct __sk_buff
, cb
[4]) + 3),
1035 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1036 offsetof(struct __sk_buff
, cb
[0])),
1037 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1038 offsetof(struct __sk_buff
, cb
[0]) + 1),
1039 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1040 offsetof(struct __sk_buff
, cb
[0]) + 2),
1041 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1042 offsetof(struct __sk_buff
, cb
[0]) + 3),
1043 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1044 offsetof(struct __sk_buff
, cb
[1])),
1045 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1046 offsetof(struct __sk_buff
, cb
[1]) + 1),
1047 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1048 offsetof(struct __sk_buff
, cb
[1]) + 2),
1049 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1050 offsetof(struct __sk_buff
, cb
[1]) + 3),
1051 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1052 offsetof(struct __sk_buff
, cb
[2])),
1053 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1054 offsetof(struct __sk_buff
, cb
[2]) + 1),
1055 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1056 offsetof(struct __sk_buff
, cb
[2]) + 2),
1057 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1058 offsetof(struct __sk_buff
, cb
[2]) + 3),
1059 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1060 offsetof(struct __sk_buff
, cb
[3])),
1061 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1062 offsetof(struct __sk_buff
, cb
[3]) + 1),
1063 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1064 offsetof(struct __sk_buff
, cb
[3]) + 2),
1065 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1066 offsetof(struct __sk_buff
, cb
[3]) + 3),
1067 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1068 offsetof(struct __sk_buff
, cb
[4])),
1069 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1070 offsetof(struct __sk_buff
, cb
[4]) + 1),
1071 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1072 offsetof(struct __sk_buff
, cb
[4]) + 2),
1073 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1074 offsetof(struct __sk_buff
, cb
[4]) + 3),
1080 "__sk_buff->hash, offset 0, byte store not permitted",
1082 BPF_MOV64_IMM(BPF_REG_0
, 0),
1083 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1084 offsetof(struct __sk_buff
, hash
)),
1087 .errstr
= "invalid bpf_context access",
1091 "__sk_buff->tc_index, offset 3, byte store not permitted",
1093 BPF_MOV64_IMM(BPF_REG_0
, 0),
1094 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1095 offsetof(struct __sk_buff
, tc_index
) + 3),
1098 .errstr
= "invalid bpf_context access",
1102 "check skb->hash byte load permitted",
1104 BPF_MOV64_IMM(BPF_REG_0
, 0),
1105 #if __BYTE_ORDER == __LITTLE_ENDIAN
1106 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1107 offsetof(struct __sk_buff
, hash
)),
1109 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1110 offsetof(struct __sk_buff
, hash
) + 3),
1117 "check skb->hash byte load not permitted 1",
1119 BPF_MOV64_IMM(BPF_REG_0
, 0),
1120 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1121 offsetof(struct __sk_buff
, hash
) + 1),
1124 .errstr
= "invalid bpf_context access",
1128 "check skb->hash byte load not permitted 2",
1130 BPF_MOV64_IMM(BPF_REG_0
, 0),
1131 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1132 offsetof(struct __sk_buff
, hash
) + 2),
1135 .errstr
= "invalid bpf_context access",
1139 "check skb->hash byte load not permitted 3",
1141 BPF_MOV64_IMM(BPF_REG_0
, 0),
1142 #if __BYTE_ORDER == __LITTLE_ENDIAN
1143 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1144 offsetof(struct __sk_buff
, hash
) + 3),
1146 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1147 offsetof(struct __sk_buff
, hash
)),
1151 .errstr
= "invalid bpf_context access",
1155 "check cb access: byte, wrong type",
1157 BPF_MOV64_IMM(BPF_REG_0
, 0),
1158 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1159 offsetof(struct __sk_buff
, cb
[0])),
1162 .errstr
= "invalid bpf_context access",
1164 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1167 "check cb access: half",
1169 BPF_MOV64_IMM(BPF_REG_0
, 0),
1170 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1171 offsetof(struct __sk_buff
, cb
[0])),
1172 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1173 offsetof(struct __sk_buff
, cb
[0]) + 2),
1174 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1175 offsetof(struct __sk_buff
, cb
[1])),
1176 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1177 offsetof(struct __sk_buff
, cb
[1]) + 2),
1178 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1179 offsetof(struct __sk_buff
, cb
[2])),
1180 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1181 offsetof(struct __sk_buff
, cb
[2]) + 2),
1182 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1183 offsetof(struct __sk_buff
, cb
[3])),
1184 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1185 offsetof(struct __sk_buff
, cb
[3]) + 2),
1186 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1187 offsetof(struct __sk_buff
, cb
[4])),
1188 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1189 offsetof(struct __sk_buff
, cb
[4]) + 2),
1190 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1191 offsetof(struct __sk_buff
, cb
[0])),
1192 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1193 offsetof(struct __sk_buff
, cb
[0]) + 2),
1194 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1195 offsetof(struct __sk_buff
, cb
[1])),
1196 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1197 offsetof(struct __sk_buff
, cb
[1]) + 2),
1198 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1199 offsetof(struct __sk_buff
, cb
[2])),
1200 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1201 offsetof(struct __sk_buff
, cb
[2]) + 2),
1202 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1203 offsetof(struct __sk_buff
, cb
[3])),
1204 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1205 offsetof(struct __sk_buff
, cb
[3]) + 2),
1206 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1207 offsetof(struct __sk_buff
, cb
[4])),
1208 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1209 offsetof(struct __sk_buff
, cb
[4]) + 2),
1215 "check cb access: half, unaligned",
1217 BPF_MOV64_IMM(BPF_REG_0
, 0),
1218 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1219 offsetof(struct __sk_buff
, cb
[0]) + 1),
1222 .errstr
= "misaligned context access",
1224 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1227 "check __sk_buff->hash, offset 0, half store not permitted",
1229 BPF_MOV64_IMM(BPF_REG_0
, 0),
1230 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1231 offsetof(struct __sk_buff
, hash
)),
1234 .errstr
= "invalid bpf_context access",
1238 "check __sk_buff->tc_index, offset 2, half store not permitted",
1240 BPF_MOV64_IMM(BPF_REG_0
, 0),
1241 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1242 offsetof(struct __sk_buff
, tc_index
) + 2),
1245 .errstr
= "invalid bpf_context access",
1249 "check skb->hash half load permitted",
1251 BPF_MOV64_IMM(BPF_REG_0
, 0),
1252 #if __BYTE_ORDER == __LITTLE_ENDIAN
1253 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1254 offsetof(struct __sk_buff
, hash
)),
1256 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1257 offsetof(struct __sk_buff
, hash
) + 2),
1264 "check skb->hash half load not permitted",
1266 BPF_MOV64_IMM(BPF_REG_0
, 0),
1267 #if __BYTE_ORDER == __LITTLE_ENDIAN
1268 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1269 offsetof(struct __sk_buff
, hash
) + 2),
1271 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1272 offsetof(struct __sk_buff
, hash
)),
1276 .errstr
= "invalid bpf_context access",
1280 "check cb access: half, wrong type",
1282 BPF_MOV64_IMM(BPF_REG_0
, 0),
1283 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1284 offsetof(struct __sk_buff
, cb
[0])),
1287 .errstr
= "invalid bpf_context access",
1289 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1292 "check cb access: word",
1294 BPF_MOV64_IMM(BPF_REG_0
, 0),
1295 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1296 offsetof(struct __sk_buff
, cb
[0])),
1297 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1298 offsetof(struct __sk_buff
, cb
[1])),
1299 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1300 offsetof(struct __sk_buff
, cb
[2])),
1301 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1302 offsetof(struct __sk_buff
, cb
[3])),
1303 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1304 offsetof(struct __sk_buff
, cb
[4])),
1305 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1306 offsetof(struct __sk_buff
, cb
[0])),
1307 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1308 offsetof(struct __sk_buff
, cb
[1])),
1309 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1310 offsetof(struct __sk_buff
, cb
[2])),
1311 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1312 offsetof(struct __sk_buff
, cb
[3])),
1313 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1314 offsetof(struct __sk_buff
, cb
[4])),
1320 "check cb access: word, unaligned 1",
1322 BPF_MOV64_IMM(BPF_REG_0
, 0),
1323 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1324 offsetof(struct __sk_buff
, cb
[0]) + 2),
1327 .errstr
= "misaligned context access",
1329 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1332 "check cb access: word, unaligned 2",
1334 BPF_MOV64_IMM(BPF_REG_0
, 0),
1335 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1336 offsetof(struct __sk_buff
, cb
[4]) + 1),
1339 .errstr
= "misaligned context access",
1341 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1344 "check cb access: word, unaligned 3",
1346 BPF_MOV64_IMM(BPF_REG_0
, 0),
1347 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1348 offsetof(struct __sk_buff
, cb
[4]) + 2),
1351 .errstr
= "misaligned context access",
1353 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1356 "check cb access: word, unaligned 4",
1358 BPF_MOV64_IMM(BPF_REG_0
, 0),
1359 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1360 offsetof(struct __sk_buff
, cb
[4]) + 3),
1363 .errstr
= "misaligned context access",
1365 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1368 "check cb access: double",
1370 BPF_MOV64_IMM(BPF_REG_0
, 0),
1371 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1372 offsetof(struct __sk_buff
, cb
[0])),
1373 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1374 offsetof(struct __sk_buff
, cb
[2])),
1375 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1376 offsetof(struct __sk_buff
, cb
[0])),
1377 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1378 offsetof(struct __sk_buff
, cb
[2])),
1384 "check cb access: double, unaligned 1",
1386 BPF_MOV64_IMM(BPF_REG_0
, 0),
1387 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1388 offsetof(struct __sk_buff
, cb
[1])),
1391 .errstr
= "misaligned context access",
1393 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1396 "check cb access: double, unaligned 2",
1398 BPF_MOV64_IMM(BPF_REG_0
, 0),
1399 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1400 offsetof(struct __sk_buff
, cb
[3])),
1403 .errstr
= "misaligned context access",
1405 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1408 "check cb access: double, oob 1",
1410 BPF_MOV64_IMM(BPF_REG_0
, 0),
1411 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1412 offsetof(struct __sk_buff
, cb
[4])),
1415 .errstr
= "invalid bpf_context access",
1419 "check cb access: double, oob 2",
1421 BPF_MOV64_IMM(BPF_REG_0
, 0),
1422 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1423 offsetof(struct __sk_buff
, cb
[4])),
1426 .errstr
= "invalid bpf_context access",
1430 "check __sk_buff->ifindex dw store not permitted",
1432 BPF_MOV64_IMM(BPF_REG_0
, 0),
1433 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1434 offsetof(struct __sk_buff
, ifindex
)),
1437 .errstr
= "invalid bpf_context access",
1441 "check __sk_buff->ifindex dw load not permitted",
1443 BPF_MOV64_IMM(BPF_REG_0
, 0),
1444 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1445 offsetof(struct __sk_buff
, ifindex
)),
1448 .errstr
= "invalid bpf_context access",
1452 "check cb access: double, wrong type",
1454 BPF_MOV64_IMM(BPF_REG_0
, 0),
1455 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1456 offsetof(struct __sk_buff
, cb
[0])),
1459 .errstr
= "invalid bpf_context access",
1461 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1464 "check out of range skb->cb access",
1466 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1467 offsetof(struct __sk_buff
, cb
[0]) + 256),
1470 .errstr
= "invalid bpf_context access",
1471 .errstr_unpriv
= "",
1473 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
1476 "write skb fields from socket prog",
1478 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1479 offsetof(struct __sk_buff
, cb
[4])),
1480 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1481 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1482 offsetof(struct __sk_buff
, mark
)),
1483 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1484 offsetof(struct __sk_buff
, tc_index
)),
1485 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1486 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1487 offsetof(struct __sk_buff
, cb
[0])),
1488 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1489 offsetof(struct __sk_buff
, cb
[2])),
1493 .errstr_unpriv
= "R1 leaks addr",
1494 .result_unpriv
= REJECT
,
1497 "write skb fields from tc_cls_act prog",
1499 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1500 offsetof(struct __sk_buff
, cb
[0])),
1501 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1502 offsetof(struct __sk_buff
, mark
)),
1503 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1504 offsetof(struct __sk_buff
, tc_index
)),
1505 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1506 offsetof(struct __sk_buff
, tc_index
)),
1507 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1508 offsetof(struct __sk_buff
, cb
[3])),
1511 .errstr_unpriv
= "",
1512 .result_unpriv
= REJECT
,
1514 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1517 "PTR_TO_STACK store/load",
1519 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1520 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1521 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1522 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1528 "PTR_TO_STACK store/load - bad alignment on off",
1530 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1531 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1532 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1533 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1537 .errstr
= "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1538 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1541 "PTR_TO_STACK store/load - bad alignment on reg",
1543 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1544 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1545 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1546 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1550 .errstr
= "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1551 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1554 "PTR_TO_STACK store/load - out of bounds low",
1556 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1557 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
1558 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1559 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1563 .errstr
= "invalid stack off=-79992 size=8",
1566 "PTR_TO_STACK store/load - out of bounds high",
1568 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1569 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1570 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1571 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1575 .errstr
= "invalid stack off=0 size=8",
1578 "unpriv: return pointer",
1580 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
1584 .result_unpriv
= REJECT
,
1585 .errstr_unpriv
= "R0 leaks addr",
1588 "unpriv: add const to pointer",
1590 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
1591 BPF_MOV64_IMM(BPF_REG_0
, 0),
1597 "unpriv: add pointer to pointer",
1599 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
1600 BPF_MOV64_IMM(BPF_REG_0
, 0),
1604 .result_unpriv
= REJECT
,
1605 .errstr_unpriv
= "R1 pointer += pointer",
1608 "unpriv: neg pointer",
1610 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
1611 BPF_MOV64_IMM(BPF_REG_0
, 0),
1615 .result_unpriv
= REJECT
,
1616 .errstr_unpriv
= "R1 pointer arithmetic",
1619 "unpriv: cmp pointer with const",
1621 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1622 BPF_MOV64_IMM(BPF_REG_0
, 0),
1626 .result_unpriv
= REJECT
,
1627 .errstr_unpriv
= "R1 pointer comparison",
1630 "unpriv: cmp pointer with pointer",
1632 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1633 BPF_MOV64_IMM(BPF_REG_0
, 0),
1637 .result_unpriv
= REJECT
,
1638 .errstr_unpriv
= "R10 pointer comparison",
1641 "unpriv: check that printk is disallowed",
1643 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1644 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1645 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1646 BPF_MOV64_IMM(BPF_REG_2
, 8),
1647 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1648 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1649 BPF_FUNC_trace_printk
),
1650 BPF_MOV64_IMM(BPF_REG_0
, 0),
1653 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
1654 .result_unpriv
= REJECT
,
1658 "unpriv: pass pointer to helper function",
1660 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1661 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1662 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1663 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1664 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1665 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
1666 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1667 BPF_FUNC_map_update_elem
),
1668 BPF_MOV64_IMM(BPF_REG_0
, 0),
1671 .fixup_map1
= { 3 },
1672 .errstr_unpriv
= "R4 leaks addr",
1673 .result_unpriv
= REJECT
,
1677 "unpriv: indirectly pass pointer on stack to helper function",
1679 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1680 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1681 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1682 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1683 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1684 BPF_FUNC_map_lookup_elem
),
1685 BPF_MOV64_IMM(BPF_REG_0
, 0),
1688 .fixup_map1
= { 3 },
1689 .errstr
= "invalid indirect read from stack off -8+0 size 8",
1693 "unpriv: mangle pointer on stack 1",
1695 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1696 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
1697 BPF_MOV64_IMM(BPF_REG_0
, 0),
1700 .errstr_unpriv
= "attempt to corrupt spilled",
1701 .result_unpriv
= REJECT
,
1705 "unpriv: mangle pointer on stack 2",
1707 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1708 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
1709 BPF_MOV64_IMM(BPF_REG_0
, 0),
1712 .errstr_unpriv
= "attempt to corrupt spilled",
1713 .result_unpriv
= REJECT
,
1717 "unpriv: read pointer from stack in small chunks",
1719 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1720 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
1721 BPF_MOV64_IMM(BPF_REG_0
, 0),
1724 .errstr
= "invalid size",
1728 "unpriv: write pointer into ctx",
1730 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
1731 BPF_MOV64_IMM(BPF_REG_0
, 0),
1734 .errstr_unpriv
= "R1 leaks addr",
1735 .result_unpriv
= REJECT
,
1736 .errstr
= "invalid bpf_context access",
1740 "unpriv: spill/fill of ctx",
1742 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1743 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1744 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1745 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1746 BPF_MOV64_IMM(BPF_REG_0
, 0),
1752 "unpriv: spill/fill of ctx 2",
1754 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1755 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1756 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1757 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1758 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1759 BPF_FUNC_get_hash_recalc
),
1763 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1766 "unpriv: spill/fill of ctx 3",
1768 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1770 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1771 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
1772 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1773 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1774 BPF_FUNC_get_hash_recalc
),
1778 .errstr
= "R1 type=fp expected=ctx",
1779 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1782 "unpriv: spill/fill of ctx 4",
1784 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1785 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1786 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1787 BPF_MOV64_IMM(BPF_REG_0
, 1),
1788 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
,
1790 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1791 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1792 BPF_FUNC_get_hash_recalc
),
1796 .errstr
= "R1 type=inv expected=ctx",
1797 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1800 "unpriv: spill/fill of different pointers stx",
1802 BPF_MOV64_IMM(BPF_REG_3
, 42),
1803 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1804 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1805 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
1806 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1807 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
1808 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
1809 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
1810 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1811 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1812 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
1813 offsetof(struct __sk_buff
, mark
)),
1814 BPF_MOV64_IMM(BPF_REG_0
, 0),
1818 .errstr
= "same insn cannot be used with different pointers",
1819 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1822 "unpriv: spill/fill of different pointers ldx",
1824 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1825 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1826 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
1827 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1828 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
1829 -(__s32
)offsetof(struct bpf_perf_event_data
,
1830 sample_period
) - 8),
1831 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
1832 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
1833 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
1834 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
1835 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
1836 offsetof(struct bpf_perf_event_data
,
1838 BPF_MOV64_IMM(BPF_REG_0
, 0),
1842 .errstr
= "same insn cannot be used with different pointers",
1843 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
1846 "unpriv: write pointer into map elem value",
1848 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1849 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1850 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1851 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1852 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1853 BPF_FUNC_map_lookup_elem
),
1854 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
1855 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
1858 .fixup_map1
= { 3 },
1859 .errstr_unpriv
= "R0 leaks addr",
1860 .result_unpriv
= REJECT
,
1864 "unpriv: partial copy of pointer",
1866 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
1867 BPF_MOV64_IMM(BPF_REG_0
, 0),
1870 .errstr_unpriv
= "R10 partial copy",
1871 .result_unpriv
= REJECT
,
1875 "unpriv: pass pointer to tail_call",
1877 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1878 BPF_LD_MAP_FD(BPF_REG_2
, 0),
1879 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1880 BPF_FUNC_tail_call
),
1881 BPF_MOV64_IMM(BPF_REG_0
, 0),
1884 .fixup_prog
= { 1 },
1885 .errstr_unpriv
= "R3 leaks addr into helper",
1886 .result_unpriv
= REJECT
,
1890 "unpriv: cmp map pointer with zero",
1892 BPF_MOV64_IMM(BPF_REG_1
, 0),
1893 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1894 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1895 BPF_MOV64_IMM(BPF_REG_0
, 0),
1898 .fixup_map1
= { 1 },
1899 .errstr_unpriv
= "R1 pointer comparison",
1900 .result_unpriv
= REJECT
,
1904 "unpriv: write into frame pointer",
1906 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
1907 BPF_MOV64_IMM(BPF_REG_0
, 0),
1910 .errstr
= "frame pointer is read only",
1914 "unpriv: spill/fill frame pointer",
1916 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1917 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1918 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
1919 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
1920 BPF_MOV64_IMM(BPF_REG_0
, 0),
1923 .errstr
= "frame pointer is read only",
1927 "unpriv: cmp of frame pointer",
1929 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
1930 BPF_MOV64_IMM(BPF_REG_0
, 0),
1933 .errstr_unpriv
= "R10 pointer comparison",
1934 .result_unpriv
= REJECT
,
1938 "unpriv: adding of fp",
1940 BPF_MOV64_IMM(BPF_REG_0
, 0),
1941 BPF_MOV64_IMM(BPF_REG_1
, 0),
1942 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
1943 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
1949 "unpriv: cmp of stack pointer",
1951 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1952 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1953 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
1954 BPF_MOV64_IMM(BPF_REG_0
, 0),
1957 .errstr_unpriv
= "R2 pointer comparison",
1958 .result_unpriv
= REJECT
,
1962 "stack pointer arithmetic",
1964 BPF_MOV64_IMM(BPF_REG_1
, 4),
1965 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
1966 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_10
),
1967 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
1968 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
1969 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
1970 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
),
1971 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
1972 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
1973 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
1974 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
1975 BPF_MOV64_IMM(BPF_REG_0
, 0),
1981 "raw_stack: no skb_load_bytes",
1983 BPF_MOV64_IMM(BPF_REG_2
, 4),
1984 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
1985 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
1986 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
1987 BPF_MOV64_IMM(BPF_REG_4
, 8),
1988 /* Call to skb_load_bytes() omitted. */
1989 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
1993 .errstr
= "invalid read from stack off -8+0 size 8",
1994 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1997 "raw_stack: skb_load_bytes, negative len",
1999 BPF_MOV64_IMM(BPF_REG_2
, 4),
2000 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2001 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2002 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2003 BPF_MOV64_IMM(BPF_REG_4
, -8),
2004 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2005 BPF_FUNC_skb_load_bytes
),
2006 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2010 .errstr
= "R4 min value is negative",
2011 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2014 "raw_stack: skb_load_bytes, negative len 2",
2016 BPF_MOV64_IMM(BPF_REG_2
, 4),
2017 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2018 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2019 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2020 BPF_MOV64_IMM(BPF_REG_4
, ~0),
2021 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2022 BPF_FUNC_skb_load_bytes
),
2023 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2027 .errstr
= "R4 min value is negative",
2028 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2031 "raw_stack: skb_load_bytes, zero len",
2033 BPF_MOV64_IMM(BPF_REG_2
, 4),
2034 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2035 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2036 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2037 BPF_MOV64_IMM(BPF_REG_4
, 0),
2038 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2039 BPF_FUNC_skb_load_bytes
),
2040 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2044 .errstr
= "invalid stack type R3",
2045 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2048 "raw_stack: skb_load_bytes, no init",
2050 BPF_MOV64_IMM(BPF_REG_2
, 4),
2051 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2052 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2053 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2054 BPF_MOV64_IMM(BPF_REG_4
, 8),
2055 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2056 BPF_FUNC_skb_load_bytes
),
2057 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2061 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2064 "raw_stack: skb_load_bytes, init",
2066 BPF_MOV64_IMM(BPF_REG_2
, 4),
2067 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2068 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2069 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
2070 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2071 BPF_MOV64_IMM(BPF_REG_4
, 8),
2072 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2073 BPF_FUNC_skb_load_bytes
),
2074 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2078 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2081 "raw_stack: skb_load_bytes, spilled regs around bounds",
2083 BPF_MOV64_IMM(BPF_REG_2
, 4),
2084 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2085 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2086 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2087 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2088 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2089 BPF_MOV64_IMM(BPF_REG_4
, 8),
2090 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2091 BPF_FUNC_skb_load_bytes
),
2092 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2093 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2094 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2095 offsetof(struct __sk_buff
, mark
)),
2096 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2097 offsetof(struct __sk_buff
, priority
)),
2098 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2102 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2105 "raw_stack: skb_load_bytes, spilled regs corruption",
2107 BPF_MOV64_IMM(BPF_REG_2
, 4),
2108 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2109 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2110 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2111 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2112 BPF_MOV64_IMM(BPF_REG_4
, 8),
2113 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2114 BPF_FUNC_skb_load_bytes
),
2115 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2116 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2117 offsetof(struct __sk_buff
, mark
)),
2121 .errstr
= "R0 invalid mem access 'inv'",
2122 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2125 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2127 BPF_MOV64_IMM(BPF_REG_2
, 4),
2128 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2129 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2130 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2131 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2132 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2133 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2134 BPF_MOV64_IMM(BPF_REG_4
, 8),
2135 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2136 BPF_FUNC_skb_load_bytes
),
2137 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2138 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2139 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2140 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2141 offsetof(struct __sk_buff
, mark
)),
2142 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2143 offsetof(struct __sk_buff
, priority
)),
2144 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2145 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
2146 offsetof(struct __sk_buff
, pkt_type
)),
2147 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2151 .errstr
= "R3 invalid mem access 'inv'",
2152 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2155 "raw_stack: skb_load_bytes, spilled regs + data",
2157 BPF_MOV64_IMM(BPF_REG_2
, 4),
2158 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2159 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2160 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2161 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2162 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2163 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2164 BPF_MOV64_IMM(BPF_REG_4
, 8),
2165 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2166 BPF_FUNC_skb_load_bytes
),
2167 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2168 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2169 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2170 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2171 offsetof(struct __sk_buff
, mark
)),
2172 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2173 offsetof(struct __sk_buff
, priority
)),
2174 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2175 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2179 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2182 "raw_stack: skb_load_bytes, invalid access 1",
2184 BPF_MOV64_IMM(BPF_REG_2
, 4),
2185 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2186 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
2187 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2188 BPF_MOV64_IMM(BPF_REG_4
, 8),
2189 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2190 BPF_FUNC_skb_load_bytes
),
2191 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2195 .errstr
= "invalid stack type R3 off=-513 access_size=8",
2196 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2199 "raw_stack: skb_load_bytes, invalid access 2",
2201 BPF_MOV64_IMM(BPF_REG_2
, 4),
2202 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2203 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2204 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2205 BPF_MOV64_IMM(BPF_REG_4
, 8),
2206 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2207 BPF_FUNC_skb_load_bytes
),
2208 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2212 .errstr
= "invalid stack type R3 off=-1 access_size=8",
2213 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2216 "raw_stack: skb_load_bytes, invalid access 3",
2218 BPF_MOV64_IMM(BPF_REG_2
, 4),
2219 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2220 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
2221 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2222 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2223 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2224 BPF_FUNC_skb_load_bytes
),
2225 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2229 .errstr
= "R4 min value is negative",
2230 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2233 "raw_stack: skb_load_bytes, invalid access 4",
2235 BPF_MOV64_IMM(BPF_REG_2
, 4),
2236 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2237 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2238 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2239 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2240 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2241 BPF_FUNC_skb_load_bytes
),
2242 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2246 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2247 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2250 "raw_stack: skb_load_bytes, invalid access 5",
2252 BPF_MOV64_IMM(BPF_REG_2
, 4),
2253 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2254 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2255 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2256 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2257 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2258 BPF_FUNC_skb_load_bytes
),
2259 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2263 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2264 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2267 "raw_stack: skb_load_bytes, invalid access 6",
2269 BPF_MOV64_IMM(BPF_REG_2
, 4),
2270 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2271 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2272 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2273 BPF_MOV64_IMM(BPF_REG_4
, 0),
2274 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2275 BPF_FUNC_skb_load_bytes
),
2276 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2280 .errstr
= "invalid stack type R3 off=-512 access_size=0",
2281 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2284 "raw_stack: skb_load_bytes, large access",
2286 BPF_MOV64_IMM(BPF_REG_2
, 4),
2287 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2288 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2289 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2290 BPF_MOV64_IMM(BPF_REG_4
, 512),
2291 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2292 BPF_FUNC_skb_load_bytes
),
2293 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2297 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2300 "direct packet access: test1",
2302 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2303 offsetof(struct __sk_buff
, data
)),
2304 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2305 offsetof(struct __sk_buff
, data_end
)),
2306 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2307 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2308 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2309 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2310 BPF_MOV64_IMM(BPF_REG_0
, 0),
2314 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2317 "direct packet access: test2",
2319 BPF_MOV64_IMM(BPF_REG_0
, 1),
2320 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
2321 offsetof(struct __sk_buff
, data_end
)),
2322 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2323 offsetof(struct __sk_buff
, data
)),
2324 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2325 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
2326 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
2327 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
2328 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
2329 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
2330 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2331 offsetof(struct __sk_buff
, data
)),
2332 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
2333 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_1
),
2334 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 49),
2335 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 49),
2336 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
2337 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
2338 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2339 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2340 offsetof(struct __sk_buff
, data_end
)),
2341 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
2342 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
2343 BPF_MOV64_IMM(BPF_REG_0
, 0),
2347 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2350 "direct packet access: test3",
2352 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2353 offsetof(struct __sk_buff
, data
)),
2354 BPF_MOV64_IMM(BPF_REG_0
, 0),
2357 .errstr
= "invalid bpf_context access off=76",
2359 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
2362 "direct packet access: test4 (write)",
2364 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2365 offsetof(struct __sk_buff
, data
)),
2366 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2367 offsetof(struct __sk_buff
, data_end
)),
2368 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2369 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2370 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2371 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2372 BPF_MOV64_IMM(BPF_REG_0
, 0),
2376 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2379 "direct packet access: test5 (pkt_end >= reg, good access)",
2381 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2382 offsetof(struct __sk_buff
, data
)),
2383 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2384 offsetof(struct __sk_buff
, data_end
)),
2385 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2386 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2387 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2388 BPF_MOV64_IMM(BPF_REG_0
, 1),
2390 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2391 BPF_MOV64_IMM(BPF_REG_0
, 0),
2395 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2398 "direct packet access: test6 (pkt_end >= reg, bad access)",
2400 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2401 offsetof(struct __sk_buff
, data
)),
2402 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2403 offsetof(struct __sk_buff
, data_end
)),
2404 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2405 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2406 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2407 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2408 BPF_MOV64_IMM(BPF_REG_0
, 1),
2410 BPF_MOV64_IMM(BPF_REG_0
, 0),
2413 .errstr
= "invalid access to packet",
2415 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2418 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2420 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2421 offsetof(struct __sk_buff
, data
)),
2422 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2423 offsetof(struct __sk_buff
, data_end
)),
2424 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2425 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2426 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2427 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2428 BPF_MOV64_IMM(BPF_REG_0
, 1),
2430 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2431 BPF_MOV64_IMM(BPF_REG_0
, 0),
2434 .errstr
= "invalid access to packet",
2436 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2439 "direct packet access: test8 (double test, variant 1)",
2441 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2442 offsetof(struct __sk_buff
, data
)),
2443 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2444 offsetof(struct __sk_buff
, data_end
)),
2445 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2446 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2447 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 4),
2448 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2449 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2450 BPF_MOV64_IMM(BPF_REG_0
, 1),
2452 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2453 BPF_MOV64_IMM(BPF_REG_0
, 0),
2457 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2460 "direct packet access: test9 (double test, variant 2)",
2462 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2463 offsetof(struct __sk_buff
, data
)),
2464 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2465 offsetof(struct __sk_buff
, data_end
)),
2466 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2467 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2468 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2469 BPF_MOV64_IMM(BPF_REG_0
, 1),
2471 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2472 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2473 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2474 BPF_MOV64_IMM(BPF_REG_0
, 0),
2478 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2481 "direct packet access: test10 (write invalid)",
2483 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2484 offsetof(struct __sk_buff
, data
)),
2485 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2486 offsetof(struct __sk_buff
, data_end
)),
2487 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2488 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2489 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
2490 BPF_MOV64_IMM(BPF_REG_0
, 0),
2492 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2493 BPF_MOV64_IMM(BPF_REG_0
, 0),
2496 .errstr
= "invalid access to packet",
2498 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2501 "direct packet access: test11 (shift, good access)",
2503 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2504 offsetof(struct __sk_buff
, data
)),
2505 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2506 offsetof(struct __sk_buff
, data_end
)),
2507 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2508 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2509 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2510 BPF_MOV64_IMM(BPF_REG_3
, 144),
2511 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2512 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2513 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 3),
2514 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2515 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2516 BPF_MOV64_IMM(BPF_REG_0
, 1),
2518 BPF_MOV64_IMM(BPF_REG_0
, 0),
2522 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2525 "direct packet access: test12 (and, good access)",
2527 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2528 offsetof(struct __sk_buff
, data
)),
2529 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2530 offsetof(struct __sk_buff
, data_end
)),
2531 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2532 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2533 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2534 BPF_MOV64_IMM(BPF_REG_3
, 144),
2535 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2536 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2537 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2538 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2539 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2540 BPF_MOV64_IMM(BPF_REG_0
, 1),
2542 BPF_MOV64_IMM(BPF_REG_0
, 0),
2546 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2549 "direct packet access: test13 (branches, good access)",
2551 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2552 offsetof(struct __sk_buff
, data
)),
2553 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2554 offsetof(struct __sk_buff
, data_end
)),
2555 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2556 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2557 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 13),
2558 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2559 offsetof(struct __sk_buff
, mark
)),
2560 BPF_MOV64_IMM(BPF_REG_4
, 1),
2561 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_4
, 2),
2562 BPF_MOV64_IMM(BPF_REG_3
, 14),
2563 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
2564 BPF_MOV64_IMM(BPF_REG_3
, 24),
2565 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2566 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2567 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2568 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2569 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2570 BPF_MOV64_IMM(BPF_REG_0
, 1),
2572 BPF_MOV64_IMM(BPF_REG_0
, 0),
2576 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2579 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2581 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2582 offsetof(struct __sk_buff
, data
)),
2583 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2584 offsetof(struct __sk_buff
, data_end
)),
2585 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2586 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2587 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
2588 BPF_MOV64_IMM(BPF_REG_5
, 12),
2589 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 4),
2590 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2591 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2592 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_6
, 0),
2593 BPF_MOV64_IMM(BPF_REG_0
, 1),
2595 BPF_MOV64_IMM(BPF_REG_0
, 0),
2599 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2602 "direct packet access: test15 (spill with xadd)",
2604 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2605 offsetof(struct __sk_buff
, data
)),
2606 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2607 offsetof(struct __sk_buff
, data_end
)),
2608 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2609 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2610 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2611 BPF_MOV64_IMM(BPF_REG_5
, 4096),
2612 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
2613 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
2614 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
2615 BPF_STX_XADD(BPF_DW
, BPF_REG_4
, BPF_REG_5
, 0),
2616 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
2617 BPF_STX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_5
, 0),
2618 BPF_MOV64_IMM(BPF_REG_0
, 0),
2621 .errstr
= "R2 invalid mem access 'inv'",
2623 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2626 "direct packet access: test16 (arith on data_end)",
2628 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2629 offsetof(struct __sk_buff
, data
)),
2630 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2631 offsetof(struct __sk_buff
, data_end
)),
2632 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2633 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2634 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 16),
2635 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2636 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2637 BPF_MOV64_IMM(BPF_REG_0
, 0),
2640 .errstr
= "invalid access to packet",
2642 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2645 "direct packet access: test17 (pruning, alignment)",
2647 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2648 offsetof(struct __sk_buff
, data
)),
2649 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2650 offsetof(struct __sk_buff
, data_end
)),
2651 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
2652 offsetof(struct __sk_buff
, mark
)),
2653 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2654 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 14),
2655 BPF_JMP_IMM(BPF_JGT
, BPF_REG_7
, 1, 4),
2656 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2657 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, -4),
2658 BPF_MOV64_IMM(BPF_REG_0
, 0),
2660 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 1),
2663 .errstr
= "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2665 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2666 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
2669 "direct packet access: test18 (imm += pkt_ptr, 1)",
2671 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2672 offsetof(struct __sk_buff
, data
)),
2673 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2674 offsetof(struct __sk_buff
, data_end
)),
2675 BPF_MOV64_IMM(BPF_REG_0
, 8),
2676 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2677 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2678 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2679 BPF_MOV64_IMM(BPF_REG_0
, 0),
2683 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2686 "direct packet access: test19 (imm += pkt_ptr, 2)",
2688 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2689 offsetof(struct __sk_buff
, data
)),
2690 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2691 offsetof(struct __sk_buff
, data_end
)),
2692 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2693 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2694 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
2695 BPF_MOV64_IMM(BPF_REG_4
, 4),
2696 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2697 BPF_STX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_4
, 0),
2698 BPF_MOV64_IMM(BPF_REG_0
, 0),
2702 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2705 "direct packet access: test20 (x += pkt_ptr, 1)",
2707 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2708 offsetof(struct __sk_buff
, data
)),
2709 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2710 offsetof(struct __sk_buff
, data_end
)),
2711 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
2712 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
2713 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
2714 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0x7fff),
2715 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
2716 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2717 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
2718 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
2719 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
2720 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
2721 BPF_MOV64_IMM(BPF_REG_0
, 0),
2724 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2728 "direct packet access: test21 (x += pkt_ptr, 2)",
2730 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2731 offsetof(struct __sk_buff
, data
)),
2732 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2733 offsetof(struct __sk_buff
, data_end
)),
2734 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2735 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2736 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 9),
2737 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2738 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
2739 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
2740 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, 0x7fff),
2741 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2742 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
2743 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
2744 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
2745 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
2746 BPF_MOV64_IMM(BPF_REG_0
, 0),
2749 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2753 "direct packet access: test22 (x += pkt_ptr, 3)",
2755 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2756 offsetof(struct __sk_buff
, data
)),
2757 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2758 offsetof(struct __sk_buff
, data_end
)),
2759 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2760 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2761 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -8),
2762 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_3
, -16),
2763 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_10
, -16),
2764 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 11),
2765 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
2766 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2767 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
2768 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
2769 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 49),
2770 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2771 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
2772 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
2773 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
2774 BPF_MOV64_IMM(BPF_REG_2
, 1),
2775 BPF_STX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_2
, 0),
2776 BPF_MOV64_IMM(BPF_REG_0
, 0),
2779 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2783 "direct packet access: test23 (x += pkt_ptr, 4)",
2785 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2786 offsetof(struct __sk_buff
, data
)),
2787 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2788 offsetof(struct __sk_buff
, data_end
)),
2789 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
2790 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
2791 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
2792 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xffff),
2793 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
2794 BPF_MOV64_IMM(BPF_REG_0
, 31),
2795 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
2796 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2797 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
2798 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0xffff - 1),
2799 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2800 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
2801 BPF_MOV64_IMM(BPF_REG_0
, 0),
2804 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2806 .errstr
= "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
2809 "direct packet access: test24 (x += pkt_ptr, 5)",
2811 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2812 offsetof(struct __sk_buff
, data
)),
2813 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2814 offsetof(struct __sk_buff
, data_end
)),
2815 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
2816 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
2817 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
2818 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xff),
2819 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
2820 BPF_MOV64_IMM(BPF_REG_0
, 64),
2821 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
2822 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2823 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
2824 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7fff - 1),
2825 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2826 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
2827 BPF_MOV64_IMM(BPF_REG_0
, 0),
2830 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2834 "direct packet access: test25 (marking on <, good access)",
2836 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2837 offsetof(struct __sk_buff
, data
)),
2838 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2839 offsetof(struct __sk_buff
, data_end
)),
2840 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2841 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2842 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 2),
2843 BPF_MOV64_IMM(BPF_REG_0
, 0),
2845 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2846 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
2849 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2852 "direct packet access: test26 (marking on <, bad access)",
2854 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2855 offsetof(struct __sk_buff
, data
)),
2856 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2857 offsetof(struct __sk_buff
, data_end
)),
2858 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2859 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2860 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 3),
2861 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2862 BPF_MOV64_IMM(BPF_REG_0
, 0),
2864 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
2867 .errstr
= "invalid access to packet",
2868 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2871 "direct packet access: test27 (marking on <=, good access)",
2873 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2874 offsetof(struct __sk_buff
, data
)),
2875 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2876 offsetof(struct __sk_buff
, data_end
)),
2877 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2878 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2879 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 1),
2880 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2881 BPF_MOV64_IMM(BPF_REG_0
, 1),
2885 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2888 "direct packet access: test28 (marking on <=, bad access)",
2890 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2891 offsetof(struct __sk_buff
, data
)),
2892 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2893 offsetof(struct __sk_buff
, data_end
)),
2894 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2895 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2896 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 2),
2897 BPF_MOV64_IMM(BPF_REG_0
, 1),
2899 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2900 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
2903 .errstr
= "invalid access to packet",
2904 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2907 "helper access to packet: test1, valid packet_ptr range",
2909 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2910 offsetof(struct xdp_md
, data
)),
2911 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2912 offsetof(struct xdp_md
, data_end
)),
2913 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
2914 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
2915 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
2916 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2917 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
2918 BPF_MOV64_IMM(BPF_REG_4
, 0),
2919 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2920 BPF_FUNC_map_update_elem
),
2921 BPF_MOV64_IMM(BPF_REG_0
, 0),
2924 .fixup_map1
= { 5 },
2925 .result_unpriv
= ACCEPT
,
2927 .prog_type
= BPF_PROG_TYPE_XDP
,
2930 "helper access to packet: test2, unchecked packet_ptr",
2932 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2933 offsetof(struct xdp_md
, data
)),
2934 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2935 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2936 BPF_FUNC_map_lookup_elem
),
2937 BPF_MOV64_IMM(BPF_REG_0
, 0),
2940 .fixup_map1
= { 1 },
2942 .errstr
= "invalid access to packet",
2943 .prog_type
= BPF_PROG_TYPE_XDP
,
2946 "helper access to packet: test3, variable add",
2948 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2949 offsetof(struct xdp_md
, data
)),
2950 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2951 offsetof(struct xdp_md
, data_end
)),
2952 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
2953 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
2954 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
2955 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
2956 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
2957 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
2958 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
2959 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
2960 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
2961 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2962 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
2963 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2964 BPF_FUNC_map_lookup_elem
),
2965 BPF_MOV64_IMM(BPF_REG_0
, 0),
2968 .fixup_map1
= { 11 },
2970 .prog_type
= BPF_PROG_TYPE_XDP
,
2973 "helper access to packet: test4, packet_ptr with bad range",
2975 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2976 offsetof(struct xdp_md
, data
)),
2977 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2978 offsetof(struct xdp_md
, data_end
)),
2979 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
2980 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
2981 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
2982 BPF_MOV64_IMM(BPF_REG_0
, 0),
2984 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2985 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2986 BPF_FUNC_map_lookup_elem
),
2987 BPF_MOV64_IMM(BPF_REG_0
, 0),
2990 .fixup_map1
= { 7 },
2992 .errstr
= "invalid access to packet",
2993 .prog_type
= BPF_PROG_TYPE_XDP
,
2996 "helper access to packet: test5, packet_ptr with too short range",
2998 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2999 offsetof(struct xdp_md
, data
)),
3000 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3001 offsetof(struct xdp_md
, data_end
)),
3002 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3003 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3004 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3005 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3006 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3007 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3008 BPF_FUNC_map_lookup_elem
),
3009 BPF_MOV64_IMM(BPF_REG_0
, 0),
3012 .fixup_map1
= { 6 },
3014 .errstr
= "invalid access to packet",
3015 .prog_type
= BPF_PROG_TYPE_XDP
,
3018 "helper access to packet: test6, cls valid packet_ptr range",
3020 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3021 offsetof(struct __sk_buff
, data
)),
3022 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3023 offsetof(struct __sk_buff
, data_end
)),
3024 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3025 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3026 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3027 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3028 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3029 BPF_MOV64_IMM(BPF_REG_4
, 0),
3030 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3031 BPF_FUNC_map_update_elem
),
3032 BPF_MOV64_IMM(BPF_REG_0
, 0),
3035 .fixup_map1
= { 5 },
3037 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3040 "helper access to packet: test7, cls unchecked packet_ptr",
3042 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3043 offsetof(struct __sk_buff
, data
)),
3044 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3045 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3046 BPF_FUNC_map_lookup_elem
),
3047 BPF_MOV64_IMM(BPF_REG_0
, 0),
3050 .fixup_map1
= { 1 },
3052 .errstr
= "invalid access to packet",
3053 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3056 "helper access to packet: test8, cls variable add",
3058 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3059 offsetof(struct __sk_buff
, data
)),
3060 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3061 offsetof(struct __sk_buff
, data_end
)),
3062 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3063 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3064 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3065 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3066 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3067 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3068 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3069 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3070 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3071 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3072 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3073 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3074 BPF_FUNC_map_lookup_elem
),
3075 BPF_MOV64_IMM(BPF_REG_0
, 0),
3078 .fixup_map1
= { 11 },
3080 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3083 "helper access to packet: test9, cls packet_ptr with bad range",
3085 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3086 offsetof(struct __sk_buff
, data
)),
3087 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3088 offsetof(struct __sk_buff
, data_end
)),
3089 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3090 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3091 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3092 BPF_MOV64_IMM(BPF_REG_0
, 0),
3094 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3095 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3096 BPF_FUNC_map_lookup_elem
),
3097 BPF_MOV64_IMM(BPF_REG_0
, 0),
3100 .fixup_map1
= { 7 },
3102 .errstr
= "invalid access to packet",
3103 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3106 "helper access to packet: test10, cls packet_ptr with too short range",
3108 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3109 offsetof(struct __sk_buff
, data
)),
3110 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3111 offsetof(struct __sk_buff
, data_end
)),
3112 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3113 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3114 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3115 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3116 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3117 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3118 BPF_FUNC_map_lookup_elem
),
3119 BPF_MOV64_IMM(BPF_REG_0
, 0),
3122 .fixup_map1
= { 6 },
3124 .errstr
= "invalid access to packet",
3125 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3128 "helper access to packet: test11, cls unsuitable helper 1",
3130 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3131 offsetof(struct __sk_buff
, data
)),
3132 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3133 offsetof(struct __sk_buff
, data_end
)),
3134 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3135 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3136 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 7),
3137 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_7
, 4),
3138 BPF_MOV64_IMM(BPF_REG_2
, 0),
3139 BPF_MOV64_IMM(BPF_REG_4
, 42),
3140 BPF_MOV64_IMM(BPF_REG_5
, 0),
3141 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3142 BPF_FUNC_skb_store_bytes
),
3143 BPF_MOV64_IMM(BPF_REG_0
, 0),
3147 .errstr
= "helper access to the packet",
3148 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3151 "helper access to packet: test12, cls unsuitable helper 2",
3153 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3154 offsetof(struct __sk_buff
, data
)),
3155 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3156 offsetof(struct __sk_buff
, data_end
)),
3157 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3158 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
3159 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_7
, 3),
3160 BPF_MOV64_IMM(BPF_REG_2
, 0),
3161 BPF_MOV64_IMM(BPF_REG_4
, 4),
3162 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3163 BPF_FUNC_skb_load_bytes
),
3164 BPF_MOV64_IMM(BPF_REG_0
, 0),
3168 .errstr
= "helper access to the packet",
3169 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3172 "helper access to packet: test13, cls helper ok",
3174 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3175 offsetof(struct __sk_buff
, data
)),
3176 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3177 offsetof(struct __sk_buff
, data_end
)),
3178 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3179 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3180 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3181 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3182 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3183 BPF_MOV64_IMM(BPF_REG_2
, 4),
3184 BPF_MOV64_IMM(BPF_REG_3
, 0),
3185 BPF_MOV64_IMM(BPF_REG_4
, 0),
3186 BPF_MOV64_IMM(BPF_REG_5
, 0),
3187 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3188 BPF_FUNC_csum_diff
),
3189 BPF_MOV64_IMM(BPF_REG_0
, 0),
3193 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3196 "helper access to packet: test14, cls helper ok sub",
3198 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3199 offsetof(struct __sk_buff
, data
)),
3200 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3201 offsetof(struct __sk_buff
, data_end
)),
3202 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3203 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3204 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3205 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3206 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 4),
3207 BPF_MOV64_IMM(BPF_REG_2
, 4),
3208 BPF_MOV64_IMM(BPF_REG_3
, 0),
3209 BPF_MOV64_IMM(BPF_REG_4
, 0),
3210 BPF_MOV64_IMM(BPF_REG_5
, 0),
3211 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3212 BPF_FUNC_csum_diff
),
3213 BPF_MOV64_IMM(BPF_REG_0
, 0),
3217 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3220 "helper access to packet: test15, cls helper fail sub",
3222 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3223 offsetof(struct __sk_buff
, data
)),
3224 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3225 offsetof(struct __sk_buff
, data_end
)),
3226 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3227 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3228 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3229 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3230 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 12),
3231 BPF_MOV64_IMM(BPF_REG_2
, 4),
3232 BPF_MOV64_IMM(BPF_REG_3
, 0),
3233 BPF_MOV64_IMM(BPF_REG_4
, 0),
3234 BPF_MOV64_IMM(BPF_REG_5
, 0),
3235 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3236 BPF_FUNC_csum_diff
),
3237 BPF_MOV64_IMM(BPF_REG_0
, 0),
3241 .errstr
= "invalid access to packet",
3242 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3245 "helper access to packet: test16, cls helper fail range 1",
3247 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3248 offsetof(struct __sk_buff
, data
)),
3249 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3250 offsetof(struct __sk_buff
, data_end
)),
3251 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3252 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3253 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3254 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3255 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3256 BPF_MOV64_IMM(BPF_REG_2
, 8),
3257 BPF_MOV64_IMM(BPF_REG_3
, 0),
3258 BPF_MOV64_IMM(BPF_REG_4
, 0),
3259 BPF_MOV64_IMM(BPF_REG_5
, 0),
3260 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3261 BPF_FUNC_csum_diff
),
3262 BPF_MOV64_IMM(BPF_REG_0
, 0),
3266 .errstr
= "invalid access to packet",
3267 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3270 "helper access to packet: test17, cls helper fail range 2",
3272 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3273 offsetof(struct __sk_buff
, data
)),
3274 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3275 offsetof(struct __sk_buff
, data_end
)),
3276 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3277 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3278 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3279 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3280 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3281 BPF_MOV64_IMM(BPF_REG_2
, -9),
3282 BPF_MOV64_IMM(BPF_REG_3
, 0),
3283 BPF_MOV64_IMM(BPF_REG_4
, 0),
3284 BPF_MOV64_IMM(BPF_REG_5
, 0),
3285 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3286 BPF_FUNC_csum_diff
),
3287 BPF_MOV64_IMM(BPF_REG_0
, 0),
3291 .errstr
= "R2 min value is negative",
3292 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3295 "helper access to packet: test18, cls helper fail range 3",
3297 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3298 offsetof(struct __sk_buff
, data
)),
3299 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3300 offsetof(struct __sk_buff
, data_end
)),
3301 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3302 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3303 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3304 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3305 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3306 BPF_MOV64_IMM(BPF_REG_2
, ~0),
3307 BPF_MOV64_IMM(BPF_REG_3
, 0),
3308 BPF_MOV64_IMM(BPF_REG_4
, 0),
3309 BPF_MOV64_IMM(BPF_REG_5
, 0),
3310 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3311 BPF_FUNC_csum_diff
),
3312 BPF_MOV64_IMM(BPF_REG_0
, 0),
3316 .errstr
= "R2 min value is negative",
3317 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3320 "helper access to packet: test19, cls helper fail range zero",
3322 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3323 offsetof(struct __sk_buff
, data
)),
3324 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3325 offsetof(struct __sk_buff
, data_end
)),
3326 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3327 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3328 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3329 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3330 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3331 BPF_MOV64_IMM(BPF_REG_2
, 0),
3332 BPF_MOV64_IMM(BPF_REG_3
, 0),
3333 BPF_MOV64_IMM(BPF_REG_4
, 0),
3334 BPF_MOV64_IMM(BPF_REG_5
, 0),
3335 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3336 BPF_FUNC_csum_diff
),
3337 BPF_MOV64_IMM(BPF_REG_0
, 0),
3341 .errstr
= "invalid access to packet",
3342 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3345 "helper access to packet: test20, pkt end as input",
3347 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3348 offsetof(struct __sk_buff
, data
)),
3349 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3350 offsetof(struct __sk_buff
, data_end
)),
3351 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3352 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3353 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3354 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3355 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
3356 BPF_MOV64_IMM(BPF_REG_2
, 4),
3357 BPF_MOV64_IMM(BPF_REG_3
, 0),
3358 BPF_MOV64_IMM(BPF_REG_4
, 0),
3359 BPF_MOV64_IMM(BPF_REG_5
, 0),
3360 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3361 BPF_FUNC_csum_diff
),
3362 BPF_MOV64_IMM(BPF_REG_0
, 0),
3366 .errstr
= "R1 type=pkt_end expected=fp",
3367 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3370 "helper access to packet: test21, wrong reg",
3372 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3373 offsetof(struct __sk_buff
, data
)),
3374 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3375 offsetof(struct __sk_buff
, data_end
)),
3376 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3377 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3378 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3379 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3380 BPF_MOV64_IMM(BPF_REG_2
, 4),
3381 BPF_MOV64_IMM(BPF_REG_3
, 0),
3382 BPF_MOV64_IMM(BPF_REG_4
, 0),
3383 BPF_MOV64_IMM(BPF_REG_5
, 0),
3384 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3385 BPF_FUNC_csum_diff
),
3386 BPF_MOV64_IMM(BPF_REG_0
, 0),
3390 .errstr
= "invalid access to packet",
3391 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3394 "valid map access into an array with a constant",
3396 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3397 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3398 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3399 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3400 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3401 BPF_FUNC_map_lookup_elem
),
3402 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3403 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3404 offsetof(struct test_val
, foo
)),
3407 .fixup_map2
= { 3 },
3408 .errstr_unpriv
= "R0 leaks addr",
3409 .result_unpriv
= REJECT
,
3413 "valid map access into an array with a register",
3415 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3416 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3417 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3418 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3419 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3420 BPF_FUNC_map_lookup_elem
),
3421 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3422 BPF_MOV64_IMM(BPF_REG_1
, 4),
3423 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3424 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3425 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3426 offsetof(struct test_val
, foo
)),
3429 .fixup_map2
= { 3 },
3430 .errstr_unpriv
= "R0 leaks addr",
3431 .result_unpriv
= REJECT
,
3433 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3436 "valid map access into an array with a variable",
3438 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3439 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3440 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3441 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3442 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3443 BPF_FUNC_map_lookup_elem
),
3444 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
3445 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3446 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 3),
3447 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3448 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3449 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3450 offsetof(struct test_val
, foo
)),
3453 .fixup_map2
= { 3 },
3454 .errstr_unpriv
= "R0 leaks addr",
3455 .result_unpriv
= REJECT
,
3457 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3460 "valid map access into an array with a signed variable",
3462 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3463 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3464 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3465 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3466 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3467 BPF_FUNC_map_lookup_elem
),
3468 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
3469 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3470 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 0xffffffff, 1),
3471 BPF_MOV32_IMM(BPF_REG_1
, 0),
3472 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3473 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3474 BPF_MOV32_IMM(BPF_REG_1
, 0),
3475 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3476 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3477 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3478 offsetof(struct test_val
, foo
)),
3481 .fixup_map2
= { 3 },
3482 .errstr_unpriv
= "R0 leaks addr",
3483 .result_unpriv
= REJECT
,
3485 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3488 "invalid map access into an array with a constant",
3490 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3491 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3492 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3493 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3494 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3495 BPF_FUNC_map_lookup_elem
),
3496 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3497 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, (MAX_ENTRIES
+ 1) << 2,
3498 offsetof(struct test_val
, foo
)),
3501 .fixup_map2
= { 3 },
3502 .errstr
= "invalid access to map value, value_size=48 off=48 size=8",
3506 "invalid map access into an array with a register",
3508 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3509 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3510 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3511 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3512 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3513 BPF_FUNC_map_lookup_elem
),
3514 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3515 BPF_MOV64_IMM(BPF_REG_1
, MAX_ENTRIES
+ 1),
3516 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3517 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3518 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3519 offsetof(struct test_val
, foo
)),
3522 .fixup_map2
= { 3 },
3523 .errstr
= "R0 min value is outside of the array range",
3525 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3528 "invalid map access into an array with a variable",
3530 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3531 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3532 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3533 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3534 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3535 BPF_FUNC_map_lookup_elem
),
3536 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3537 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3538 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3539 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3540 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3541 offsetof(struct test_val
, foo
)),
3544 .fixup_map2
= { 3 },
3545 .errstr
= "R0 unbounded memory access, make sure to bounds check any array access into a map",
3547 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3550 "invalid map access into an array with no floor check",
3552 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3553 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3554 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3555 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3556 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3557 BPF_FUNC_map_lookup_elem
),
3558 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3559 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
3560 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3561 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3562 BPF_MOV32_IMM(BPF_REG_1
, 0),
3563 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3564 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3565 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3566 offsetof(struct test_val
, foo
)),
3569 .fixup_map2
= { 3 },
3570 .errstr_unpriv
= "R0 leaks addr",
3571 .errstr
= "R0 unbounded memory access",
3572 .result_unpriv
= REJECT
,
3574 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3577 "invalid map access into an array with a invalid max check",
3579 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3580 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3581 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3582 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3583 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3584 BPF_FUNC_map_lookup_elem
),
3585 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3586 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3587 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
+ 1),
3588 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
3589 BPF_MOV32_IMM(BPF_REG_1
, 0),
3590 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3591 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3592 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3593 offsetof(struct test_val
, foo
)),
3596 .fixup_map2
= { 3 },
3597 .errstr_unpriv
= "R0 leaks addr",
3598 .errstr
= "invalid access to map value, value_size=48 off=44 size=8",
3599 .result_unpriv
= REJECT
,
3601 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3604 "invalid map access into an array with a invalid max check",
3606 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3607 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3608 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3609 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3610 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3611 BPF_FUNC_map_lookup_elem
),
3612 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
3613 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
3614 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3615 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3616 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3617 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3618 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3619 BPF_FUNC_map_lookup_elem
),
3620 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
3621 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
3622 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
3623 offsetof(struct test_val
, foo
)),
3626 .fixup_map2
= { 3, 11 },
3627 .errstr_unpriv
= "R0 pointer += pointer",
3628 .errstr
= "R0 invalid mem access 'inv'",
3629 .result_unpriv
= REJECT
,
3631 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3634 "multiple registers share map_lookup_elem result",
3636 BPF_MOV64_IMM(BPF_REG_1
, 10),
3637 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3638 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3639 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3640 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3641 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3642 BPF_FUNC_map_lookup_elem
),
3643 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3644 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3645 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3648 .fixup_map1
= { 4 },
3650 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3653 "alu ops on ptr_to_map_value_or_null, 1",
3655 BPF_MOV64_IMM(BPF_REG_1
, 10),
3656 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3657 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3658 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3659 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3660 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3661 BPF_FUNC_map_lookup_elem
),
3662 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3663 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -2),
3664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
3665 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3666 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3669 .fixup_map1
= { 4 },
3670 .errstr
= "R4 invalid mem access",
3672 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3675 "alu ops on ptr_to_map_value_or_null, 2",
3677 BPF_MOV64_IMM(BPF_REG_1
, 10),
3678 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3679 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3680 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3681 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3682 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3683 BPF_FUNC_map_lookup_elem
),
3684 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3685 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, -1),
3686 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3687 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3690 .fixup_map1
= { 4 },
3691 .errstr
= "R4 invalid mem access",
3693 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3696 "alu ops on ptr_to_map_value_or_null, 3",
3698 BPF_MOV64_IMM(BPF_REG_1
, 10),
3699 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3700 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3701 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3702 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3703 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3704 BPF_FUNC_map_lookup_elem
),
3705 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3706 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 1),
3707 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3708 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3711 .fixup_map1
= { 4 },
3712 .errstr
= "R4 invalid mem access",
3714 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3717 "invalid memory access with multiple map_lookup_elem calls",
3719 BPF_MOV64_IMM(BPF_REG_1
, 10),
3720 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3721 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3722 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3723 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3724 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
3725 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
3726 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3727 BPF_FUNC_map_lookup_elem
),
3728 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3729 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
3730 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
3731 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3732 BPF_FUNC_map_lookup_elem
),
3733 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3734 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3737 .fixup_map1
= { 4 },
3739 .errstr
= "R4 !read_ok",
3740 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3743 "valid indirect map_lookup_elem access with 2nd lookup in branch",
3745 BPF_MOV64_IMM(BPF_REG_1
, 10),
3746 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3747 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3748 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3749 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3750 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
3751 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
3752 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3753 BPF_FUNC_map_lookup_elem
),
3754 BPF_MOV64_IMM(BPF_REG_2
, 10),
3755 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 3),
3756 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
3757 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
3758 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3759 BPF_FUNC_map_lookup_elem
),
3760 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3761 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3762 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3765 .fixup_map1
= { 4 },
3767 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3770 "invalid map access from else condition",
3772 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3773 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3774 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3775 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3776 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
3777 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
3778 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3779 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
-1, 1),
3780 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 1),
3781 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3782 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3783 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, offsetof(struct test_val
, foo
)),
3786 .fixup_map2
= { 3 },
3787 .errstr
= "R0 unbounded memory access",
3789 .errstr_unpriv
= "R0 leaks addr",
3790 .result_unpriv
= REJECT
,
3791 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3794 "constant register |= constant should keep constant type",
3796 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
3797 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
3798 BPF_MOV64_IMM(BPF_REG_2
, 34),
3799 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 13),
3800 BPF_MOV64_IMM(BPF_REG_3
, 0),
3801 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
3805 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
3808 "constant register |= constant should not bypass stack boundary checks",
3810 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
3811 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
3812 BPF_MOV64_IMM(BPF_REG_2
, 34),
3813 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 24),
3814 BPF_MOV64_IMM(BPF_REG_3
, 0),
3815 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
3818 .errstr
= "invalid stack type R1 off=-48 access_size=58",
3820 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
3823 "constant register |= constant register should keep constant type",
3825 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
3826 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
3827 BPF_MOV64_IMM(BPF_REG_2
, 34),
3828 BPF_MOV64_IMM(BPF_REG_4
, 13),
3829 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
3830 BPF_MOV64_IMM(BPF_REG_3
, 0),
3831 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
3835 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
3838 "constant register |= constant register should not bypass stack boundary checks",
3840 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
3841 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
3842 BPF_MOV64_IMM(BPF_REG_2
, 34),
3843 BPF_MOV64_IMM(BPF_REG_4
, 24),
3844 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
3845 BPF_MOV64_IMM(BPF_REG_3
, 0),
3846 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
3849 .errstr
= "invalid stack type R1 off=-48 access_size=58",
3851 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
3854 "invalid direct packet write for LWT_IN",
3856 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3857 offsetof(struct __sk_buff
, data
)),
3858 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3859 offsetof(struct __sk_buff
, data_end
)),
3860 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3861 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3862 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3863 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3864 BPF_MOV64_IMM(BPF_REG_0
, 0),
3867 .errstr
= "cannot write into packet",
3869 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
3872 "invalid direct packet write for LWT_OUT",
3874 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3875 offsetof(struct __sk_buff
, data
)),
3876 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3877 offsetof(struct __sk_buff
, data_end
)),
3878 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3879 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3880 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3881 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3882 BPF_MOV64_IMM(BPF_REG_0
, 0),
3885 .errstr
= "cannot write into packet",
3887 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
3890 "direct packet write for LWT_XMIT",
3892 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3893 offsetof(struct __sk_buff
, data
)),
3894 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3895 offsetof(struct __sk_buff
, data_end
)),
3896 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3898 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3899 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
3900 BPF_MOV64_IMM(BPF_REG_0
, 0),
3904 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
3907 "direct packet read for LWT_IN",
3909 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3910 offsetof(struct __sk_buff
, data
)),
3911 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3912 offsetof(struct __sk_buff
, data_end
)),
3913 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3914 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3915 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3916 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3917 BPF_MOV64_IMM(BPF_REG_0
, 0),
3921 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
3924 "direct packet read for LWT_OUT",
3926 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3927 offsetof(struct __sk_buff
, data
)),
3928 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3929 offsetof(struct __sk_buff
, data_end
)),
3930 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3931 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3932 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3933 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3934 BPF_MOV64_IMM(BPF_REG_0
, 0),
3938 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
3941 "direct packet read for LWT_XMIT",
3943 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3944 offsetof(struct __sk_buff
, data
)),
3945 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3946 offsetof(struct __sk_buff
, data_end
)),
3947 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3948 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3949 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3950 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3951 BPF_MOV64_IMM(BPF_REG_0
, 0),
3955 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
3958 "overlapping checks for direct packet access",
3960 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3961 offsetof(struct __sk_buff
, data
)),
3962 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3963 offsetof(struct __sk_buff
, data_end
)),
3964 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3965 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3966 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
3967 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3968 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
3969 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
3970 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
3971 BPF_MOV64_IMM(BPF_REG_0
, 0),
3975 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
3978 "invalid access of tc_classid for LWT_IN",
3980 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
3981 offsetof(struct __sk_buff
, tc_classid
)),
3985 .errstr
= "invalid bpf_context access",
3988 "invalid access of tc_classid for LWT_OUT",
3990 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
3991 offsetof(struct __sk_buff
, tc_classid
)),
3995 .errstr
= "invalid bpf_context access",
3998 "invalid access of tc_classid for LWT_XMIT",
4000 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4001 offsetof(struct __sk_buff
, tc_classid
)),
4005 .errstr
= "invalid bpf_context access",
4008 "leak pointer into ctx 1",
4010 BPF_MOV64_IMM(BPF_REG_0
, 0),
4011 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4012 offsetof(struct __sk_buff
, cb
[0])),
4013 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4014 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4015 offsetof(struct __sk_buff
, cb
[0])),
4018 .fixup_map1
= { 2 },
4019 .errstr_unpriv
= "R2 leaks addr into mem",
4020 .result_unpriv
= REJECT
,
4024 "leak pointer into ctx 2",
4026 BPF_MOV64_IMM(BPF_REG_0
, 0),
4027 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4028 offsetof(struct __sk_buff
, cb
[0])),
4029 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_10
,
4030 offsetof(struct __sk_buff
, cb
[0])),
4033 .errstr_unpriv
= "R10 leaks addr into mem",
4034 .result_unpriv
= REJECT
,
4038 "leak pointer into ctx 3",
4040 BPF_MOV64_IMM(BPF_REG_0
, 0),
4041 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4042 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4043 offsetof(struct __sk_buff
, cb
[0])),
4046 .fixup_map1
= { 1 },
4047 .errstr_unpriv
= "R2 leaks addr into ctx",
4048 .result_unpriv
= REJECT
,
4052 "leak pointer into map val",
4054 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
4055 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4056 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4057 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4058 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4059 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4060 BPF_FUNC_map_lookup_elem
),
4061 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4062 BPF_MOV64_IMM(BPF_REG_3
, 0),
4063 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
4064 BPF_STX_XADD(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
4065 BPF_MOV64_IMM(BPF_REG_0
, 0),
4068 .fixup_map1
= { 4 },
4069 .errstr_unpriv
= "R6 leaks addr into mem",
4070 .result_unpriv
= REJECT
,
4074 "helper access to map: full range",
4076 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4077 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4078 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4079 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4080 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4081 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4082 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4083 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
4084 BPF_MOV64_IMM(BPF_REG_3
, 0),
4085 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4088 .fixup_map2
= { 3 },
4090 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4093 "helper access to map: partial range",
4095 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4096 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4097 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4098 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4099 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4100 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4101 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4102 BPF_MOV64_IMM(BPF_REG_2
, 8),
4103 BPF_MOV64_IMM(BPF_REG_3
, 0),
4104 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4107 .fixup_map2
= { 3 },
4109 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4112 "helper access to map: empty range",
4114 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4115 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4116 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4117 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4118 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4119 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4120 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4121 BPF_MOV64_IMM(BPF_REG_2
, 0),
4122 BPF_MOV64_IMM(BPF_REG_3
, 0),
4123 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4126 .fixup_map2
= { 3 },
4127 .errstr
= "invalid access to map value, value_size=48 off=0 size=0",
4129 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4132 "helper access to map: out-of-bound range",
4134 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4135 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4136 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4137 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4138 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4139 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4140 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4141 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
) + 8),
4142 BPF_MOV64_IMM(BPF_REG_3
, 0),
4143 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4146 .fixup_map2
= { 3 },
4147 .errstr
= "invalid access to map value, value_size=48 off=0 size=56",
4149 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4152 "helper access to map: negative range",
4154 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4155 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4156 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4157 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4158 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4159 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4160 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4161 BPF_MOV64_IMM(BPF_REG_2
, -8),
4162 BPF_MOV64_IMM(BPF_REG_3
, 0),
4163 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4166 .fixup_map2
= { 3 },
4167 .errstr
= "R2 min value is negative",
4169 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4172 "helper access to adjusted map (via const imm): full range",
4174 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4175 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4176 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4177 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4178 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4179 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4180 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4181 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4182 offsetof(struct test_val
, foo
)),
4183 BPF_MOV64_IMM(BPF_REG_2
,
4184 sizeof(struct test_val
) -
4185 offsetof(struct test_val
, foo
)),
4186 BPF_MOV64_IMM(BPF_REG_3
, 0),
4187 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4190 .fixup_map2
= { 3 },
4192 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4195 "helper access to adjusted map (via const imm): partial range",
4197 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4198 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4199 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4200 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4201 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4202 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4203 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4204 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4205 offsetof(struct test_val
, foo
)),
4206 BPF_MOV64_IMM(BPF_REG_2
, 8),
4207 BPF_MOV64_IMM(BPF_REG_3
, 0),
4208 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4211 .fixup_map2
= { 3 },
4213 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4216 "helper access to adjusted map (via const imm): empty range",
4218 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4219 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4220 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4221 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4222 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4223 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4224 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4225 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4226 offsetof(struct test_val
, foo
)),
4227 BPF_MOV64_IMM(BPF_REG_2
, 0),
4228 BPF_MOV64_IMM(BPF_REG_3
, 0),
4229 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4232 .fixup_map2
= { 3 },
4233 .errstr
= "invalid access to map value, value_size=48 off=4 size=0",
4235 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4238 "helper access to adjusted map (via const imm): out-of-bound range",
4240 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4241 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4242 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4243 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4244 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4245 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4246 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4247 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4248 offsetof(struct test_val
, foo
)),
4249 BPF_MOV64_IMM(BPF_REG_2
,
4250 sizeof(struct test_val
) -
4251 offsetof(struct test_val
, foo
) + 8),
4252 BPF_MOV64_IMM(BPF_REG_3
, 0),
4253 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4256 .fixup_map2
= { 3 },
4257 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4259 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4262 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4264 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4265 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4266 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4267 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4268 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4269 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4270 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4271 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4272 offsetof(struct test_val
, foo
)),
4273 BPF_MOV64_IMM(BPF_REG_2
, -8),
4274 BPF_MOV64_IMM(BPF_REG_3
, 0),
4275 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4278 .fixup_map2
= { 3 },
4279 .errstr
= "R2 min value is negative",
4281 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4284 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4286 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4287 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4288 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4289 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4290 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4291 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4292 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4294 offsetof(struct test_val
, foo
)),
4295 BPF_MOV64_IMM(BPF_REG_2
, -1),
4296 BPF_MOV64_IMM(BPF_REG_3
, 0),
4297 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4300 .fixup_map2
= { 3 },
4301 .errstr
= "R2 min value is negative",
4303 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4306 "helper access to adjusted map (via const reg): full range",
4308 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4309 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4310 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4311 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4312 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4313 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4314 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4315 BPF_MOV64_IMM(BPF_REG_3
,
4316 offsetof(struct test_val
, foo
)),
4317 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4318 BPF_MOV64_IMM(BPF_REG_2
,
4319 sizeof(struct test_val
) -
4320 offsetof(struct test_val
, foo
)),
4321 BPF_MOV64_IMM(BPF_REG_3
, 0),
4322 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4325 .fixup_map2
= { 3 },
4327 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4330 "helper access to adjusted map (via const reg): partial range",
4332 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4333 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4334 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4335 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4336 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4337 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4338 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4339 BPF_MOV64_IMM(BPF_REG_3
,
4340 offsetof(struct test_val
, foo
)),
4341 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4342 BPF_MOV64_IMM(BPF_REG_2
, 8),
4343 BPF_MOV64_IMM(BPF_REG_3
, 0),
4344 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4347 .fixup_map2
= { 3 },
4349 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4352 "helper access to adjusted map (via const reg): empty range",
4354 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4355 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4356 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4357 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4358 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4359 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4360 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4361 BPF_MOV64_IMM(BPF_REG_3
, 0),
4362 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4363 BPF_MOV64_IMM(BPF_REG_2
, 0),
4364 BPF_MOV64_IMM(BPF_REG_3
, 0),
4365 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4368 .fixup_map2
= { 3 },
4369 .errstr
= "R1 min value is outside of the array range",
4371 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4374 "helper access to adjusted map (via const reg): out-of-bound range",
4376 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4377 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4378 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4379 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4380 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4381 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4382 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4383 BPF_MOV64_IMM(BPF_REG_3
,
4384 offsetof(struct test_val
, foo
)),
4385 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4386 BPF_MOV64_IMM(BPF_REG_2
,
4387 sizeof(struct test_val
) -
4388 offsetof(struct test_val
, foo
) + 8),
4389 BPF_MOV64_IMM(BPF_REG_3
, 0),
4390 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4393 .fixup_map2
= { 3 },
4394 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4396 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4399 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4401 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4402 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4403 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4404 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4405 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4406 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4407 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4408 BPF_MOV64_IMM(BPF_REG_3
,
4409 offsetof(struct test_val
, foo
)),
4410 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4411 BPF_MOV64_IMM(BPF_REG_2
, -8),
4412 BPF_MOV64_IMM(BPF_REG_3
, 0),
4413 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4416 .fixup_map2
= { 3 },
4417 .errstr
= "R2 min value is negative",
4419 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4422 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4424 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4425 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4426 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4427 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4428 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4429 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4430 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4431 BPF_MOV64_IMM(BPF_REG_3
,
4432 offsetof(struct test_val
, foo
)),
4433 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4434 BPF_MOV64_IMM(BPF_REG_2
, -1),
4435 BPF_MOV64_IMM(BPF_REG_3
, 0),
4436 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4439 .fixup_map2
= { 3 },
4440 .errstr
= "R2 min value is negative",
4442 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4445 "helper access to adjusted map (via variable): full range",
4447 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4448 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4449 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4450 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4451 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4452 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4453 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4454 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4455 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4456 offsetof(struct test_val
, foo
), 4),
4457 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4458 BPF_MOV64_IMM(BPF_REG_2
,
4459 sizeof(struct test_val
) -
4460 offsetof(struct test_val
, foo
)),
4461 BPF_MOV64_IMM(BPF_REG_3
, 0),
4462 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4465 .fixup_map2
= { 3 },
4467 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4470 "helper access to adjusted map (via variable): partial range",
4472 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4473 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4474 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4475 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4476 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4477 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4478 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4479 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4480 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4481 offsetof(struct test_val
, foo
), 4),
4482 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4483 BPF_MOV64_IMM(BPF_REG_2
, 8),
4484 BPF_MOV64_IMM(BPF_REG_3
, 0),
4485 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4488 .fixup_map2
= { 3 },
4490 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4493 "helper access to adjusted map (via variable): empty range",
4495 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4496 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4497 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4498 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4499 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4500 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4501 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4502 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4503 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4504 offsetof(struct test_val
, foo
), 4),
4505 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4506 BPF_MOV64_IMM(BPF_REG_2
, 0),
4507 BPF_MOV64_IMM(BPF_REG_3
, 0),
4508 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4511 .fixup_map2
= { 3 },
4512 .errstr
= "R1 min value is outside of the array range",
4514 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4517 "helper access to adjusted map (via variable): no max check",
4519 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4520 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4521 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4522 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4523 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4524 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4525 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4526 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4527 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4528 BPF_MOV64_IMM(BPF_REG_2
, 1),
4529 BPF_MOV64_IMM(BPF_REG_3
, 0),
4530 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4533 .fixup_map2
= { 3 },
4534 .errstr
= "R1 unbounded memory access",
4536 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4539 "helper access to adjusted map (via variable): wrong max check",
4541 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4542 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4543 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4544 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4545 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4546 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4547 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4548 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4549 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4550 offsetof(struct test_val
, foo
), 4),
4551 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4552 BPF_MOV64_IMM(BPF_REG_2
,
4553 sizeof(struct test_val
) -
4554 offsetof(struct test_val
, foo
) + 1),
4555 BPF_MOV64_IMM(BPF_REG_3
, 0),
4556 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4559 .fixup_map2
= { 3 },
4560 .errstr
= "invalid access to map value, value_size=48 off=4 size=45",
4562 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4565 "helper access to map: bounds check using <, good access",
4567 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4569 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4570 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4573 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4574 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4575 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 2),
4576 BPF_MOV64_IMM(BPF_REG_0
, 0),
4578 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4579 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4580 BPF_MOV64_IMM(BPF_REG_0
, 0),
4583 .fixup_map2
= { 3 },
4585 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4588 "helper access to map: bounds check using <, bad access",
4590 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4591 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4592 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4593 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4594 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4595 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4596 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4597 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4598 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 4),
4599 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4600 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4601 BPF_MOV64_IMM(BPF_REG_0
, 0),
4603 BPF_MOV64_IMM(BPF_REG_0
, 0),
4606 .fixup_map2
= { 3 },
4608 .errstr
= "R1 unbounded memory access",
4609 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4612 "helper access to map: bounds check using <=, good access",
4614 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4615 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4616 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4617 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4618 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4619 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4620 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4621 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4622 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 2),
4623 BPF_MOV64_IMM(BPF_REG_0
, 0),
4625 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4626 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4627 BPF_MOV64_IMM(BPF_REG_0
, 0),
4630 .fixup_map2
= { 3 },
4632 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4635 "helper access to map: bounds check using <=, bad access",
4637 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4638 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4639 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4640 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4641 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4642 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4643 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4644 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4645 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 4),
4646 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4647 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4648 BPF_MOV64_IMM(BPF_REG_0
, 0),
4650 BPF_MOV64_IMM(BPF_REG_0
, 0),
4653 .fixup_map2
= { 3 },
4655 .errstr
= "R1 unbounded memory access",
4656 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4659 "helper access to map: bounds check using s<, good access",
4661 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4662 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4663 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4664 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4665 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4666 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4667 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4668 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4669 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4670 BPF_MOV64_IMM(BPF_REG_0
, 0),
4672 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 0, -3),
4673 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4674 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4675 BPF_MOV64_IMM(BPF_REG_0
, 0),
4678 .fixup_map2
= { 3 },
4680 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4683 "helper access to map: bounds check using s<, good access 2",
4685 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4686 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4687 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4688 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4689 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4690 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4691 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4692 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4693 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4694 BPF_MOV64_IMM(BPF_REG_0
, 0),
4696 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4697 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4698 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4699 BPF_MOV64_IMM(BPF_REG_0
, 0),
4702 .fixup_map2
= { 3 },
4704 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4707 "helper access to map: bounds check using s<, bad access",
4709 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4710 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4711 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4712 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4713 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4714 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4715 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4716 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
4717 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4718 BPF_MOV64_IMM(BPF_REG_0
, 0),
4720 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4721 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4722 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4723 BPF_MOV64_IMM(BPF_REG_0
, 0),
4726 .fixup_map2
= { 3 },
4728 .errstr
= "R1 min value is negative",
4729 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4732 "helper access to map: bounds check using s<=, good access",
4734 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4735 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4736 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4737 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4738 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4739 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4740 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4741 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4742 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
4743 BPF_MOV64_IMM(BPF_REG_0
, 0),
4745 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 0, -3),
4746 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4747 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4748 BPF_MOV64_IMM(BPF_REG_0
, 0),
4751 .fixup_map2
= { 3 },
4753 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4756 "helper access to map: bounds check using s<=, good access 2",
4758 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4759 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4760 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4761 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4763 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4764 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4765 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4766 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
4767 BPF_MOV64_IMM(BPF_REG_0
, 0),
4769 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
4770 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4771 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4772 BPF_MOV64_IMM(BPF_REG_0
, 0),
4775 .fixup_map2
= { 3 },
4777 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4780 "helper access to map: bounds check using s<=, bad access",
4782 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4783 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4784 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4785 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4786 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4787 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4788 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4789 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
4790 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
4791 BPF_MOV64_IMM(BPF_REG_0
, 0),
4793 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
4794 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4795 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4796 BPF_MOV64_IMM(BPF_REG_0
, 0),
4799 .fixup_map2
= { 3 },
4801 .errstr
= "R1 min value is negative",
4802 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4805 "map element value is preserved across register spilling",
4807 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4808 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4809 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4810 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4811 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4812 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4813 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
4814 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4815 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
4816 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
4817 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
4818 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
4821 .fixup_map2
= { 3 },
4822 .errstr_unpriv
= "R0 leaks addr",
4824 .result_unpriv
= REJECT
,
4827 "map element value or null is marked on register spilling",
4829 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4830 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4831 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4832 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4833 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4834 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4835 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -152),
4836 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
4837 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4838 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
4839 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
4842 .fixup_map2
= { 3 },
4843 .errstr_unpriv
= "R0 leaks addr",
4845 .result_unpriv
= REJECT
,
4848 "map element value store of cleared call register",
4850 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4851 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4852 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4853 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4854 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4855 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4856 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
4859 .fixup_map2
= { 3 },
4860 .errstr_unpriv
= "R1 !read_ok",
4861 .errstr
= "R1 !read_ok",
4863 .result_unpriv
= REJECT
,
4866 "map element value with unaligned store",
4868 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4869 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4870 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4871 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4872 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4873 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 17),
4874 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
4875 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
4876 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 43),
4877 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, -2, 44),
4878 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
4879 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 32),
4880 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 2, 33),
4881 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -2, 34),
4882 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 5),
4883 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 22),
4884 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 4, 23),
4885 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -7, 24),
4886 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_8
),
4887 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 3),
4888 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 0, 22),
4889 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 4, 23),
4890 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, -4, 24),
4893 .fixup_map2
= { 3 },
4894 .errstr_unpriv
= "R0 leaks addr",
4896 .result_unpriv
= REJECT
,
4897 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4900 "map element value with unaligned load",
4902 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4903 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4904 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4905 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4906 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4907 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
4908 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4909 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 9),
4910 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
4911 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
4912 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 2),
4913 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
4914 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 0),
4915 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 2),
4916 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 5),
4917 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
4918 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 4),
4921 .fixup_map2
= { 3 },
4922 .errstr_unpriv
= "R0 leaks addr",
4924 .result_unpriv
= REJECT
,
4925 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4928 "map element value illegal alu op, 1",
4930 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4931 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4932 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4933 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4934 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4935 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4936 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 8),
4937 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
4940 .fixup_map2
= { 3 },
4941 .errstr_unpriv
= "R0 bitwise operator &= on pointer",
4942 .errstr
= "invalid mem access 'inv'",
4944 .result_unpriv
= REJECT
,
4947 "map element value illegal alu op, 2",
4949 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4950 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4951 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4952 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4953 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4954 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4955 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_0
, 0),
4956 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
4959 .fixup_map2
= { 3 },
4960 .errstr_unpriv
= "R0 32-bit pointer arithmetic prohibited",
4961 .errstr
= "invalid mem access 'inv'",
4963 .result_unpriv
= REJECT
,
4966 "map element value illegal alu op, 3",
4968 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4969 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4970 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4973 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4974 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, 42),
4975 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
4978 .fixup_map2
= { 3 },
4979 .errstr_unpriv
= "R0 pointer arithmetic with /= operator",
4980 .errstr
= "invalid mem access 'inv'",
4982 .result_unpriv
= REJECT
,
4985 "map element value illegal alu op, 4",
4987 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4988 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4989 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4990 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4991 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4992 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
4993 BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_0
, 64),
4994 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
4997 .fixup_map2
= { 3 },
4998 .errstr_unpriv
= "R0 pointer arithmetic prohibited",
4999 .errstr
= "invalid mem access 'inv'",
5001 .result_unpriv
= REJECT
,
5004 "map element value illegal alu op, 5",
5006 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5007 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5008 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5009 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5010 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5011 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5012 BPF_MOV64_IMM(BPF_REG_3
, 4096),
5013 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5014 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5015 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
5016 BPF_STX_XADD(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
5017 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0),
5018 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5021 .fixup_map2
= { 3 },
5022 .errstr
= "R0 invalid mem access 'inv'",
5026 "map element value is preserved across register spilling",
5028 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5029 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5030 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5031 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5032 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5033 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5034 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
,
5035 offsetof(struct test_val
, foo
)),
5036 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5037 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5038 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5039 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5040 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5041 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5044 .fixup_map2
= { 3 },
5045 .errstr_unpriv
= "R0 leaks addr",
5047 .result_unpriv
= REJECT
,
5048 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5051 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5053 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5054 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5055 BPF_MOV64_IMM(BPF_REG_0
, 0),
5056 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5057 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5058 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5059 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5060 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5061 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5062 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5063 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5064 BPF_MOV64_IMM(BPF_REG_2
, 16),
5065 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5066 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5067 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5068 BPF_MOV64_IMM(BPF_REG_4
, 0),
5069 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5070 BPF_MOV64_IMM(BPF_REG_3
, 0),
5071 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5072 BPF_MOV64_IMM(BPF_REG_0
, 0),
5076 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5079 "helper access to variable memory: stack, bitwise AND, zero included",
5081 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5082 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5083 BPF_MOV64_IMM(BPF_REG_2
, 16),
5084 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5085 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5086 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5087 BPF_MOV64_IMM(BPF_REG_3
, 0),
5088 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5091 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5093 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5096 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5098 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5099 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5100 BPF_MOV64_IMM(BPF_REG_2
, 16),
5101 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5102 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5103 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 65),
5104 BPF_MOV64_IMM(BPF_REG_4
, 0),
5105 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5106 BPF_MOV64_IMM(BPF_REG_3
, 0),
5107 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5108 BPF_MOV64_IMM(BPF_REG_0
, 0),
5111 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5113 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5116 "helper access to variable memory: stack, JMP, correct bounds",
5118 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5119 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5120 BPF_MOV64_IMM(BPF_REG_0
, 0),
5121 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5122 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5123 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5124 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5125 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5126 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5127 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5128 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5129 BPF_MOV64_IMM(BPF_REG_2
, 16),
5130 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5131 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5132 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 4),
5133 BPF_MOV64_IMM(BPF_REG_4
, 0),
5134 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5135 BPF_MOV64_IMM(BPF_REG_3
, 0),
5136 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5137 BPF_MOV64_IMM(BPF_REG_0
, 0),
5141 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5144 "helper access to variable memory: stack, JMP (signed), correct bounds",
5146 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5147 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5148 BPF_MOV64_IMM(BPF_REG_0
, 0),
5149 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5150 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5151 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5152 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5153 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5154 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5155 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5156 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5157 BPF_MOV64_IMM(BPF_REG_2
, 16),
5158 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5159 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5160 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 4),
5161 BPF_MOV64_IMM(BPF_REG_4
, 0),
5162 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5163 BPF_MOV64_IMM(BPF_REG_3
, 0),
5164 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5165 BPF_MOV64_IMM(BPF_REG_0
, 0),
5169 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5172 "helper access to variable memory: stack, JMP, bounds + offset",
5174 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5175 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5176 BPF_MOV64_IMM(BPF_REG_2
, 16),
5177 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5178 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5179 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 5),
5180 BPF_MOV64_IMM(BPF_REG_4
, 0),
5181 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 3),
5182 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5183 BPF_MOV64_IMM(BPF_REG_3
, 0),
5184 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5185 BPF_MOV64_IMM(BPF_REG_0
, 0),
5188 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5190 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5193 "helper access to variable memory: stack, JMP, wrong max",
5195 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5196 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5197 BPF_MOV64_IMM(BPF_REG_2
, 16),
5198 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5199 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5200 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 65, 4),
5201 BPF_MOV64_IMM(BPF_REG_4
, 0),
5202 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5203 BPF_MOV64_IMM(BPF_REG_3
, 0),
5204 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5205 BPF_MOV64_IMM(BPF_REG_0
, 0),
5208 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5210 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5213 "helper access to variable memory: stack, JMP, no max check",
5215 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5216 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5217 BPF_MOV64_IMM(BPF_REG_2
, 16),
5218 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5219 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5220 BPF_MOV64_IMM(BPF_REG_4
, 0),
5221 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5222 BPF_MOV64_IMM(BPF_REG_3
, 0),
5223 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5224 BPF_MOV64_IMM(BPF_REG_0
, 0),
5227 /* because max wasn't checked, signed min is negative */
5228 .errstr
= "R2 min value is negative, either use unsigned or 'var &= const'",
5230 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5233 "helper access to variable memory: stack, JMP, no min check",
5235 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5236 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5237 BPF_MOV64_IMM(BPF_REG_2
, 16),
5238 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5239 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5240 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 3),
5241 BPF_MOV64_IMM(BPF_REG_3
, 0),
5242 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5243 BPF_MOV64_IMM(BPF_REG_0
, 0),
5246 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5248 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5251 "helper access to variable memory: stack, JMP (signed), no min check",
5253 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5254 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5255 BPF_MOV64_IMM(BPF_REG_2
, 16),
5256 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5257 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5258 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 3),
5259 BPF_MOV64_IMM(BPF_REG_3
, 0),
5260 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5261 BPF_MOV64_IMM(BPF_REG_0
, 0),
5264 .errstr
= "R2 min value is negative",
5266 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5269 "helper access to variable memory: map, JMP, correct bounds",
5271 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5272 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5273 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5274 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5275 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5276 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5277 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5278 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5279 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5280 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5281 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5282 sizeof(struct test_val
), 4),
5283 BPF_MOV64_IMM(BPF_REG_4
, 0),
5284 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5285 BPF_MOV64_IMM(BPF_REG_3
, 0),
5286 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5287 BPF_MOV64_IMM(BPF_REG_0
, 0),
5290 .fixup_map2
= { 3 },
5292 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5295 "helper access to variable memory: map, JMP, wrong max",
5297 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5298 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5299 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5300 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5301 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5302 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5303 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5304 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5305 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5306 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5307 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5308 sizeof(struct test_val
) + 1, 4),
5309 BPF_MOV64_IMM(BPF_REG_4
, 0),
5310 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5311 BPF_MOV64_IMM(BPF_REG_3
, 0),
5312 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5313 BPF_MOV64_IMM(BPF_REG_0
, 0),
5316 .fixup_map2
= { 3 },
5317 .errstr
= "invalid access to map value, value_size=48 off=0 size=49",
5319 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5322 "helper access to variable memory: map adjusted, JMP, correct bounds",
5324 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5325 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5326 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5327 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5328 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5329 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5330 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5331 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5332 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5333 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5334 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5335 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5336 sizeof(struct test_val
) - 20, 4),
5337 BPF_MOV64_IMM(BPF_REG_4
, 0),
5338 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5339 BPF_MOV64_IMM(BPF_REG_3
, 0),
5340 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5341 BPF_MOV64_IMM(BPF_REG_0
, 0),
5344 .fixup_map2
= { 3 },
5346 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5349 "helper access to variable memory: map adjusted, JMP, wrong max",
5351 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5352 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5353 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5354 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5355 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5356 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5357 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5358 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5359 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5360 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5361 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5362 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5363 sizeof(struct test_val
) - 19, 4),
5364 BPF_MOV64_IMM(BPF_REG_4
, 0),
5365 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5366 BPF_MOV64_IMM(BPF_REG_3
, 0),
5367 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5368 BPF_MOV64_IMM(BPF_REG_0
, 0),
5371 .fixup_map2
= { 3 },
5372 .errstr
= "R1 min value is outside of the array range",
5374 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5377 "helper access to variable memory: size = 0 allowed on NULL",
5379 BPF_MOV64_IMM(BPF_REG_1
, 0),
5380 BPF_MOV64_IMM(BPF_REG_2
, 0),
5381 BPF_MOV64_IMM(BPF_REG_3
, 0),
5382 BPF_MOV64_IMM(BPF_REG_4
, 0),
5383 BPF_MOV64_IMM(BPF_REG_5
, 0),
5384 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5388 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5391 "helper access to variable memory: size > 0 not allowed on NULL",
5393 BPF_MOV64_IMM(BPF_REG_1
, 0),
5394 BPF_MOV64_IMM(BPF_REG_2
, 0),
5395 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5396 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5397 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5398 BPF_MOV64_IMM(BPF_REG_3
, 0),
5399 BPF_MOV64_IMM(BPF_REG_4
, 0),
5400 BPF_MOV64_IMM(BPF_REG_5
, 0),
5401 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5404 .errstr
= "R1 type=inv expected=fp",
5406 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5409 "helper access to variable memory: size = 0 not allowed on != NULL",
5411 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5412 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
5413 BPF_MOV64_IMM(BPF_REG_2
, 0),
5414 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
5415 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 8),
5416 BPF_MOV64_IMM(BPF_REG_3
, 0),
5417 BPF_MOV64_IMM(BPF_REG_4
, 0),
5418 BPF_MOV64_IMM(BPF_REG_5
, 0),
5419 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5422 .errstr
= "invalid stack type R1 off=-8 access_size=0",
5424 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5427 "helper access to variable memory: 8 bytes leak",
5429 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5430 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5431 BPF_MOV64_IMM(BPF_REG_0
, 0),
5432 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5433 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5434 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5435 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5436 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5437 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5438 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5439 BPF_MOV64_IMM(BPF_REG_2
, 0),
5440 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5441 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5442 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 63),
5443 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5444 BPF_MOV64_IMM(BPF_REG_3
, 0),
5445 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5446 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5449 .errstr
= "invalid indirect read from stack off -64+32 size 64",
5451 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5454 "helper access to variable memory: 8 bytes no leak (init memory)",
5456 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5457 BPF_MOV64_IMM(BPF_REG_0
, 0),
5458 BPF_MOV64_IMM(BPF_REG_0
, 0),
5459 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5460 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5461 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5462 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5463 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5464 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5465 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5466 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5467 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5468 BPF_MOV64_IMM(BPF_REG_2
, 0),
5469 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 32),
5470 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 32),
5471 BPF_MOV64_IMM(BPF_REG_3
, 0),
5472 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5473 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5477 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5480 "invalid and of negative number",
5482 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5483 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5484 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5485 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5486 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5487 BPF_FUNC_map_lookup_elem
),
5488 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5489 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
5490 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, -4),
5491 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
5492 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5493 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
5494 offsetof(struct test_val
, foo
)),
5497 .fixup_map2
= { 3 },
5498 .errstr
= "R0 max value is outside of the array range",
5500 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5503 "invalid range check",
5505 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5506 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5507 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5508 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5509 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5510 BPF_FUNC_map_lookup_elem
),
5511 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 12),
5512 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5513 BPF_MOV64_IMM(BPF_REG_9
, 1),
5514 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_1
, 2),
5515 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 1),
5516 BPF_ALU32_REG(BPF_AND
, BPF_REG_9
, BPF_REG_1
),
5517 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_9
, 1),
5518 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_9
, 1),
5519 BPF_MOV32_IMM(BPF_REG_3
, 1),
5520 BPF_ALU32_REG(BPF_SUB
, BPF_REG_3
, BPF_REG_9
),
5521 BPF_ALU32_IMM(BPF_MUL
, BPF_REG_3
, 0x10000000),
5522 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
5523 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_3
, 0),
5524 BPF_MOV64_REG(BPF_REG_0
, 0),
5527 .fixup_map2
= { 3 },
5528 .errstr
= "R0 max value is outside of the array range",
5530 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5533 "map in map access",
5535 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5536 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5537 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5538 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5539 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5540 BPF_FUNC_map_lookup_elem
),
5541 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
5542 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5543 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5544 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5545 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5546 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5547 BPF_FUNC_map_lookup_elem
),
5548 BPF_MOV64_REG(BPF_REG_0
, 0),
5551 .fixup_map_in_map
= { 3 },
5555 "invalid inner map pointer",
5557 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5558 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5559 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5560 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5561 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5562 BPF_FUNC_map_lookup_elem
),
5563 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5564 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5565 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5566 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5567 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
5569 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5570 BPF_FUNC_map_lookup_elem
),
5571 BPF_MOV64_REG(BPF_REG_0
, 0),
5574 .fixup_map_in_map
= { 3 },
5575 .errstr
= "R1 type=inv expected=map_ptr",
5576 .errstr_unpriv
= "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5580 "forgot null checking on the inner map pointer",
5582 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5583 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5584 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5585 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5586 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5587 BPF_FUNC_map_lookup_elem
),
5588 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5589 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5590 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5591 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5592 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5593 BPF_FUNC_map_lookup_elem
),
5594 BPF_MOV64_REG(BPF_REG_0
, 0),
5597 .fixup_map_in_map
= { 3 },
5598 .errstr
= "R1 type=map_value_or_null expected=map_ptr",
5602 "ld_abs: check calling conv, r1",
5604 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5605 BPF_MOV64_IMM(BPF_REG_1
, 0),
5606 BPF_LD_ABS(BPF_W
, -0x200000),
5607 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5610 .errstr
= "R1 !read_ok",
5614 "ld_abs: check calling conv, r2",
5616 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5617 BPF_MOV64_IMM(BPF_REG_2
, 0),
5618 BPF_LD_ABS(BPF_W
, -0x200000),
5619 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5622 .errstr
= "R2 !read_ok",
5626 "ld_abs: check calling conv, r3",
5628 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5629 BPF_MOV64_IMM(BPF_REG_3
, 0),
5630 BPF_LD_ABS(BPF_W
, -0x200000),
5631 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5634 .errstr
= "R3 !read_ok",
5638 "ld_abs: check calling conv, r4",
5640 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5641 BPF_MOV64_IMM(BPF_REG_4
, 0),
5642 BPF_LD_ABS(BPF_W
, -0x200000),
5643 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5646 .errstr
= "R4 !read_ok",
5650 "ld_abs: check calling conv, r5",
5652 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5653 BPF_MOV64_IMM(BPF_REG_5
, 0),
5654 BPF_LD_ABS(BPF_W
, -0x200000),
5655 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5658 .errstr
= "R5 !read_ok",
5662 "ld_abs: check calling conv, r7",
5664 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5665 BPF_MOV64_IMM(BPF_REG_7
, 0),
5666 BPF_LD_ABS(BPF_W
, -0x200000),
5667 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
5673 "ld_ind: check calling conv, r1",
5675 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5676 BPF_MOV64_IMM(BPF_REG_1
, 1),
5677 BPF_LD_IND(BPF_W
, BPF_REG_1
, -0x200000),
5678 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5681 .errstr
= "R1 !read_ok",
5685 "ld_ind: check calling conv, r2",
5687 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5688 BPF_MOV64_IMM(BPF_REG_2
, 1),
5689 BPF_LD_IND(BPF_W
, BPF_REG_2
, -0x200000),
5690 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5693 .errstr
= "R2 !read_ok",
5697 "ld_ind: check calling conv, r3",
5699 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5700 BPF_MOV64_IMM(BPF_REG_3
, 1),
5701 BPF_LD_IND(BPF_W
, BPF_REG_3
, -0x200000),
5702 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5705 .errstr
= "R3 !read_ok",
5709 "ld_ind: check calling conv, r4",
5711 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5712 BPF_MOV64_IMM(BPF_REG_4
, 1),
5713 BPF_LD_IND(BPF_W
, BPF_REG_4
, -0x200000),
5714 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5717 .errstr
= "R4 !read_ok",
5721 "ld_ind: check calling conv, r5",
5723 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5724 BPF_MOV64_IMM(BPF_REG_5
, 1),
5725 BPF_LD_IND(BPF_W
, BPF_REG_5
, -0x200000),
5726 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5729 .errstr
= "R5 !read_ok",
5733 "ld_ind: check calling conv, r7",
5735 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5736 BPF_MOV64_IMM(BPF_REG_7
, 1),
5737 BPF_LD_IND(BPF_W
, BPF_REG_7
, -0x200000),
5738 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
5744 "check bpf_perf_event_data->sample_period byte load permitted",
5746 BPF_MOV64_IMM(BPF_REG_0
, 0),
5747 #if __BYTE_ORDER == __LITTLE_ENDIAN
5748 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
5749 offsetof(struct bpf_perf_event_data
, sample_period
)),
5751 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
5752 offsetof(struct bpf_perf_event_data
, sample_period
) + 7),
5757 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
5760 "check bpf_perf_event_data->sample_period half load permitted",
5762 BPF_MOV64_IMM(BPF_REG_0
, 0),
5763 #if __BYTE_ORDER == __LITTLE_ENDIAN
5764 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5765 offsetof(struct bpf_perf_event_data
, sample_period
)),
5767 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5768 offsetof(struct bpf_perf_event_data
, sample_period
) + 6),
5773 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
5776 "check bpf_perf_event_data->sample_period word load permitted",
5778 BPF_MOV64_IMM(BPF_REG_0
, 0),
5779 #if __BYTE_ORDER == __LITTLE_ENDIAN
5780 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
5781 offsetof(struct bpf_perf_event_data
, sample_period
)),
5783 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
5784 offsetof(struct bpf_perf_event_data
, sample_period
) + 4),
5789 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
5792 "check bpf_perf_event_data->sample_period dword load permitted",
5794 BPF_MOV64_IMM(BPF_REG_0
, 0),
5795 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
5796 offsetof(struct bpf_perf_event_data
, sample_period
)),
5800 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
5803 "check skb->data half load not permitted",
5805 BPF_MOV64_IMM(BPF_REG_0
, 0),
5806 #if __BYTE_ORDER == __LITTLE_ENDIAN
5807 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5808 offsetof(struct __sk_buff
, data
)),
5810 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5811 offsetof(struct __sk_buff
, data
) + 2),
5816 .errstr
= "invalid bpf_context access",
5819 "check skb->tc_classid half load not permitted for lwt prog",
5821 BPF_MOV64_IMM(BPF_REG_0
, 0),
5822 #if __BYTE_ORDER == __LITTLE_ENDIAN
5823 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5824 offsetof(struct __sk_buff
, tc_classid
)),
5826 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
5827 offsetof(struct __sk_buff
, tc_classid
) + 2),
5832 .errstr
= "invalid bpf_context access",
5833 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
5836 "bounds checks mixing signed and unsigned, positive bounds",
5838 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5839 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5840 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5841 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5842 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5843 BPF_FUNC_map_lookup_elem
),
5844 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5845 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5846 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5847 BPF_MOV64_IMM(BPF_REG_2
, 2),
5848 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 3),
5849 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 4, 2),
5850 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5851 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
5852 BPF_MOV64_IMM(BPF_REG_0
, 0),
5855 .fixup_map1
= { 3 },
5856 .errstr
= "R0 min value is negative",
5860 "bounds checks mixing signed and unsigned",
5862 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5863 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5864 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5865 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5866 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5867 BPF_FUNC_map_lookup_elem
),
5868 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5869 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5870 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5871 BPF_MOV64_IMM(BPF_REG_2
, -1),
5872 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
5873 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
5874 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5875 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
5876 BPF_MOV64_IMM(BPF_REG_0
, 0),
5879 .fixup_map1
= { 3 },
5880 .errstr
= "R0 min value is negative",
5884 "bounds checks mixing signed and unsigned, variant 2",
5886 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5887 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5888 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5889 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5890 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5891 BPF_FUNC_map_lookup_elem
),
5892 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
5893 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5894 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5895 BPF_MOV64_IMM(BPF_REG_2
, -1),
5896 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
5897 BPF_MOV64_IMM(BPF_REG_8
, 0),
5898 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_1
),
5899 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
5900 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
5901 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
5902 BPF_MOV64_IMM(BPF_REG_0
, 0),
5905 .fixup_map1
= { 3 },
5906 .errstr
= "R8 invalid mem access 'inv'",
5910 "bounds checks mixing signed and unsigned, variant 3",
5912 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5913 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5914 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5915 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5916 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5917 BPF_FUNC_map_lookup_elem
),
5918 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
5919 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5920 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5921 BPF_MOV64_IMM(BPF_REG_2
, -1),
5922 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 4),
5923 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
5924 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
5925 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
5926 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
5927 BPF_MOV64_IMM(BPF_REG_0
, 0),
5930 .fixup_map1
= { 3 },
5931 .errstr
= "R8 invalid mem access 'inv'",
5935 "bounds checks mixing signed and unsigned, variant 4",
5937 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5938 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5939 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5940 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5941 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5942 BPF_FUNC_map_lookup_elem
),
5943 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5944 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5945 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5946 BPF_MOV64_IMM(BPF_REG_2
, 1),
5947 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
5948 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
5949 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5950 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
5951 BPF_MOV64_IMM(BPF_REG_0
, 0),
5954 .fixup_map1
= { 3 },
5958 "bounds checks mixing signed and unsigned, variant 5",
5960 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5961 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5962 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5963 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5964 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5965 BPF_FUNC_map_lookup_elem
),
5966 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
5967 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5968 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5969 BPF_MOV64_IMM(BPF_REG_2
, -1),
5970 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
5971 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 4),
5972 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 4),
5973 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
5974 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
5975 BPF_MOV64_IMM(BPF_REG_0
, 0),
5978 .fixup_map1
= { 3 },
5979 .errstr
= "R0 min value is negative",
5983 "bounds checks mixing signed and unsigned, variant 6",
5985 BPF_MOV64_IMM(BPF_REG_2
, 0),
5986 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
5987 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -512),
5988 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
5989 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -16),
5990 BPF_MOV64_IMM(BPF_REG_6
, -1),
5991 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_6
, 5),
5992 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_4
, 1, 4),
5993 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
5994 BPF_MOV64_IMM(BPF_REG_5
, 0),
5995 BPF_ST_MEM(BPF_H
, BPF_REG_10
, -512, 0),
5996 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5997 BPF_FUNC_skb_load_bytes
),
5998 BPF_MOV64_IMM(BPF_REG_0
, 0),
6001 .errstr
= "R4 min value is negative, either use unsigned",
6005 "bounds checks mixing signed and unsigned, variant 7",
6007 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6008 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6009 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6010 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6011 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6012 BPF_FUNC_map_lookup_elem
),
6013 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6014 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6015 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6016 BPF_MOV64_IMM(BPF_REG_2
, 1024 * 1024 * 1024),
6017 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6018 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6019 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6020 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6021 BPF_MOV64_IMM(BPF_REG_0
, 0),
6024 .fixup_map1
= { 3 },
6028 "bounds checks mixing signed and unsigned, variant 8",
6030 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6031 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6032 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6033 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6034 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6035 BPF_FUNC_map_lookup_elem
),
6036 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6037 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6038 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6039 BPF_MOV64_IMM(BPF_REG_2
, -1),
6040 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6041 BPF_MOV64_IMM(BPF_REG_0
, 0),
6043 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6044 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6045 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6046 BPF_MOV64_IMM(BPF_REG_0
, 0),
6049 .fixup_map1
= { 3 },
6050 .errstr
= "R0 min value is negative",
6054 "bounds checks mixing signed and unsigned, variant 9",
6056 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6057 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6058 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6059 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6060 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6061 BPF_FUNC_map_lookup_elem
),
6062 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
6063 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6064 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6065 BPF_LD_IMM64(BPF_REG_2
, -9223372036854775808ULL),
6066 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6067 BPF_MOV64_IMM(BPF_REG_0
, 0),
6069 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6070 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6071 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6072 BPF_MOV64_IMM(BPF_REG_0
, 0),
6075 .fixup_map1
= { 3 },
6079 "bounds checks mixing signed and unsigned, variant 10",
6081 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6082 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6084 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6085 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6086 BPF_FUNC_map_lookup_elem
),
6087 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6088 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6089 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6090 BPF_MOV64_IMM(BPF_REG_2
, 0),
6091 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6092 BPF_MOV64_IMM(BPF_REG_0
, 0),
6094 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6095 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6096 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6097 BPF_MOV64_IMM(BPF_REG_0
, 0),
6100 .fixup_map1
= { 3 },
6101 .errstr
= "R0 min value is negative",
6105 "bounds checks mixing signed and unsigned, variant 11",
6107 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6108 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6109 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6110 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6111 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6112 BPF_FUNC_map_lookup_elem
),
6113 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6114 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6115 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6116 BPF_MOV64_IMM(BPF_REG_2
, -1),
6117 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6119 BPF_MOV64_IMM(BPF_REG_0
, 0),
6121 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6122 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6123 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6124 BPF_MOV64_IMM(BPF_REG_0
, 0),
6127 .fixup_map1
= { 3 },
6128 .errstr
= "R0 min value is negative",
6132 "bounds checks mixing signed and unsigned, variant 12",
6134 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6135 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6136 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6137 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6138 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6139 BPF_FUNC_map_lookup_elem
),
6140 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6141 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6142 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6143 BPF_MOV64_IMM(BPF_REG_2
, -6),
6144 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6145 BPF_MOV64_IMM(BPF_REG_0
, 0),
6147 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6148 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6149 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6150 BPF_MOV64_IMM(BPF_REG_0
, 0),
6153 .fixup_map1
= { 3 },
6154 .errstr
= "R0 min value is negative",
6158 "bounds checks mixing signed and unsigned, variant 13",
6160 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6161 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6162 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6163 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6164 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6165 BPF_FUNC_map_lookup_elem
),
6166 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6167 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6168 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6169 BPF_MOV64_IMM(BPF_REG_2
, 2),
6170 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6171 BPF_MOV64_IMM(BPF_REG_7
, 1),
6172 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 0, 2),
6173 BPF_MOV64_IMM(BPF_REG_0
, 0),
6175 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_1
),
6176 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 4, 2),
6177 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_7
),
6178 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6179 BPF_MOV64_IMM(BPF_REG_0
, 0),
6182 .fixup_map1
= { 3 },
6183 .errstr
= "R0 min value is negative",
6187 "bounds checks mixing signed and unsigned, variant 14",
6189 BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_1
,
6190 offsetof(struct __sk_buff
, mark
)),
6191 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6192 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6193 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6194 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6195 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6196 BPF_FUNC_map_lookup_elem
),
6197 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6198 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6199 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6200 BPF_MOV64_IMM(BPF_REG_2
, -1),
6201 BPF_MOV64_IMM(BPF_REG_8
, 2),
6202 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_9
, 42, 6),
6203 BPF_JMP_REG(BPF_JSGT
, BPF_REG_8
, BPF_REG_1
, 3),
6204 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6205 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6206 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6207 BPF_MOV64_IMM(BPF_REG_0
, 0),
6209 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, -3),
6210 BPF_JMP_IMM(BPF_JA
, 0, 0, -7),
6212 .fixup_map1
= { 4 },
6213 .errstr
= "R0 min value is negative",
6217 "bounds checks mixing signed and unsigned, variant 15",
6219 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6220 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6221 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6222 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6223 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6224 BPF_FUNC_map_lookup_elem
),
6225 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6226 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6227 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6228 BPF_MOV64_IMM(BPF_REG_2
, -6),
6229 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6230 BPF_MOV64_IMM(BPF_REG_0
, 0),
6232 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6233 BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, 1, 2),
6234 BPF_MOV64_IMM(BPF_REG_0
, 0),
6236 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6237 BPF_MOV64_IMM(BPF_REG_0
, 0),
6240 .fixup_map1
= { 3 },
6241 .errstr_unpriv
= "R0 pointer comparison prohibited",
6242 .errstr
= "R0 min value is negative",
6244 .result_unpriv
= REJECT
,
6247 "subtraction bounds (map value) variant 1",
6249 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6250 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6251 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6252 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6253 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6254 BPF_FUNC_map_lookup_elem
),
6255 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6256 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6257 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 7),
6258 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6259 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 5),
6260 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6261 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 56),
6262 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6263 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6265 BPF_MOV64_IMM(BPF_REG_0
, 0),
6268 .fixup_map1
= { 3 },
6269 .errstr
= "R0 max value is outside of the array range",
6273 "subtraction bounds (map value) variant 2",
6275 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6276 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6277 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6278 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6279 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6280 BPF_FUNC_map_lookup_elem
),
6281 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6282 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6283 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 6),
6284 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6285 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 4),
6286 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6287 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6288 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6290 BPF_MOV64_IMM(BPF_REG_0
, 0),
6293 .fixup_map1
= { 3 },
6294 .errstr
= "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6298 "variable-offset ctx access",
6300 /* Get an unknown value */
6301 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
6302 /* Make it small and 4-byte aligned */
6303 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
6304 /* add it to skb. We now have either &skb->len or
6305 * &skb->pkt_type, but we don't know which
6307 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
6308 /* dereference it */
6309 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
6312 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
6314 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6317 "variable-offset stack access",
6319 /* Fill the top 8 bytes of the stack */
6320 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6321 /* Get an unknown value */
6322 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
6323 /* Make it small and 4-byte aligned */
6324 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
6325 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
6326 /* add it to fp. We now have either fp-4 or fp-8, but
6327 * we don't know which
6329 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
6330 /* dereference it */
6331 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0),
6334 .errstr
= "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
6336 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6340 static int probe_filter_length(const struct bpf_insn
*fp
)
6344 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
6345 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
6350 static int create_map(uint32_t size_value
, uint32_t max_elem
)
6354 fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
6355 size_value
, max_elem
, BPF_F_NO_PREALLOC
);
6357 printf("Failed to create hash map '%s'!\n", strerror(errno
));
6362 static int create_prog_array(void)
6366 fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
6369 printf("Failed to create prog array '%s'!\n", strerror(errno
));
6374 static int create_map_in_map(void)
6376 int inner_map_fd
, outer_map_fd
;
6378 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
6380 if (inner_map_fd
< 0) {
6381 printf("Failed to create array '%s'!\n", strerror(errno
));
6382 return inner_map_fd
;
6385 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
,
6386 sizeof(int), inner_map_fd
, 1, 0);
6387 if (outer_map_fd
< 0)
6388 printf("Failed to create array of maps '%s'!\n",
6391 close(inner_map_fd
);
6393 return outer_map_fd
;
6396 static char bpf_vlog
[32768];
6398 static void do_test_fixup(struct bpf_test
*test
, struct bpf_insn
*prog
,
6401 int *fixup_map1
= test
->fixup_map1
;
6402 int *fixup_map2
= test
->fixup_map2
;
6403 int *fixup_prog
= test
->fixup_prog
;
6404 int *fixup_map_in_map
= test
->fixup_map_in_map
;
6406 /* Allocating HTs with 1 elem is fine here, since we only test
6407 * for verifier and not do a runtime lookup, so the only thing
6408 * that really matters is value size in this case.
6411 map_fds
[0] = create_map(sizeof(long long), 1);
6413 prog
[*fixup_map1
].imm
= map_fds
[0];
6415 } while (*fixup_map1
);
6419 map_fds
[1] = create_map(sizeof(struct test_val
), 1);
6421 prog
[*fixup_map2
].imm
= map_fds
[1];
6423 } while (*fixup_map2
);
6427 map_fds
[2] = create_prog_array();
6429 prog
[*fixup_prog
].imm
= map_fds
[2];
6431 } while (*fixup_prog
);
6434 if (*fixup_map_in_map
) {
6435 map_fds
[3] = create_map_in_map();
6437 prog
[*fixup_map_in_map
].imm
= map_fds
[3];
6439 } while (*fixup_map_in_map
);
6443 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
6444 int *passes
, int *errors
)
6446 int fd_prog
, expected_ret
, reject_from_alignment
;
6447 struct bpf_insn
*prog
= test
->insns
;
6448 int prog_len
= probe_filter_length(prog
);
6449 int prog_type
= test
->prog_type
;
6450 int map_fds
[MAX_NR_MAPS
];
6451 const char *expected_err
;
6454 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
6457 do_test_fixup(test
, prog
, map_fds
);
6459 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
6460 prog
, prog_len
, test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
,
6461 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 1);
6463 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
6464 test
->result_unpriv
: test
->result
;
6465 expected_err
= unpriv
&& test
->errstr_unpriv
?
6466 test
->errstr_unpriv
: test
->errstr
;
6468 reject_from_alignment
= fd_prog
< 0 &&
6469 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
) &&
6470 strstr(bpf_vlog
, "Unknown alignment.");
6471 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
6472 if (reject_from_alignment
) {
6473 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
6478 if (expected_ret
== ACCEPT
) {
6479 if (fd_prog
< 0 && !reject_from_alignment
) {
6480 printf("FAIL\nFailed to load prog '%s'!\n",
6486 printf("FAIL\nUnexpected success to load!\n");
6489 if (!strstr(bpf_vlog
, expected_err
) && !reject_from_alignment
) {
6490 printf("FAIL\nUnexpected error message!\n");
6496 printf("OK%s\n", reject_from_alignment
?
6497 " (NOTE: reject due to unknown alignment)" : "");
6500 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
6506 printf("%s", bpf_vlog
);
6510 static bool is_admin(void)
6513 cap_flag_value_t sysadmin
= CAP_CLEAR
;
6514 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
6516 #ifdef CAP_IS_SUPPORTED
6517 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
6518 perror("cap_get_flag");
6522 caps
= cap_get_proc();
6524 perror("cap_get_proc");
6527 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
6528 perror("cap_get_flag");
6531 return (sysadmin
== CAP_SET
);
6534 static int set_admin(bool admin
)
6537 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
6540 caps
= cap_get_proc();
6542 perror("cap_get_proc");
6545 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
6546 admin
? CAP_SET
: CAP_CLEAR
)) {
6547 perror("cap_set_flag");
6550 if (cap_set_proc(caps
)) {
6551 perror("cap_set_proc");
6561 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
6563 int i
, passes
= 0, errors
= 0;
6565 for (i
= from
; i
< to
; i
++) {
6566 struct bpf_test
*test
= &tests
[i
];
6568 /* Program types that are not supported by non-root we
6571 if (!test
->prog_type
) {
6574 printf("#%d/u %s ", i
, test
->descr
);
6575 do_test_single(test
, true, &passes
, &errors
);
6581 printf("#%d/p %s ", i
, test
->descr
);
6582 do_test_single(test
, false, &passes
, &errors
);
6586 printf("Summary: %d PASSED, %d FAILED\n", passes
, errors
);
6587 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
6590 int main(int argc
, char **argv
)
6592 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
6593 struct rlimit rlim
= { 1 << 20, 1 << 20 };
6594 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
6595 bool unpriv
= !is_admin();
6598 unsigned int l
= atoi(argv
[argc
- 2]);
6599 unsigned int u
= atoi(argv
[argc
- 1]);
6601 if (l
< to
&& u
< to
) {
6605 } else if (argc
== 2) {
6606 unsigned int t
= atoi(argv
[argc
- 1]);
6614 setrlimit(RLIMIT_MEMLOCK
, unpriv
? &rlim
: &rinf
);
6615 return do_test(unpriv
, from
, to
);