2 * Testsuite for eBPF verifier
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
12 #include <asm/types.h>
13 #include <linux/types.h>
24 #include <sys/capability.h>
25 #include <sys/resource.h>
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
35 # include "autoconf.h"
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
42 #include "../../../include/linux/filter.h"
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
57 struct bpf_insn insns
[MAX_INSNS
];
58 int fixup_map1
[MAX_FIXUPS
];
59 int fixup_map2
[MAX_FIXUPS
];
60 int fixup_prog
[MAX_FIXUPS
];
61 int fixup_map_in_map
[MAX_FIXUPS
];
63 const char *errstr_unpriv
;
68 } result
, result_unpriv
;
69 enum bpf_prog_type prog_type
;
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
76 #define MAX_ENTRIES 11
83 static struct bpf_test tests
[] = {
87 BPF_MOV64_IMM(BPF_REG_1
, 1),
88 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 2),
89 BPF_MOV64_IMM(BPF_REG_2
, 3),
90 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_2
),
91 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -1),
92 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 3),
93 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
104 .errstr
= "unreachable",
110 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
114 .errstr
= "unreachable",
120 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
123 .errstr
= "jump out of range",
127 "out of range jump2",
129 BPF_JMP_IMM(BPF_JA
, 0, 0, -2),
132 .errstr
= "jump out of range",
138 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0
, 0),
140 BPF_LD_IMM64(BPF_REG_0
, 0),
141 BPF_LD_IMM64(BPF_REG_0
, 1),
142 BPF_LD_IMM64(BPF_REG_0
, 1),
143 BPF_MOV64_IMM(BPF_REG_0
, 2),
146 .errstr
= "invalid BPF_LD_IMM insn",
147 .errstr_unpriv
= "R1 pointer comparison",
153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0
, 0),
155 BPF_LD_IMM64(BPF_REG_0
, 0),
156 BPF_LD_IMM64(BPF_REG_0
, 1),
157 BPF_LD_IMM64(BPF_REG_0
, 1),
160 .errstr
= "invalid BPF_LD_IMM insn",
161 .errstr_unpriv
= "R1 pointer comparison",
167 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
168 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0
, 0),
170 BPF_LD_IMM64(BPF_REG_0
, 0),
171 BPF_LD_IMM64(BPF_REG_0
, 1),
172 BPF_LD_IMM64(BPF_REG_0
, 1),
175 .errstr
= "invalid bpf_ld_imm64 insn",
181 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
184 .errstr
= "invalid bpf_ld_imm64 insn",
190 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
192 .errstr
= "invalid bpf_ld_imm64 insn",
198 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
207 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
216 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
220 .errstr
= "uses reserved fields",
226 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
230 .errstr
= "invalid bpf_ld_imm64 insn",
236 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1
, 0, 0, 1),
240 .errstr
= "invalid bpf_ld_imm64 insn",
246 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
250 .errstr
= "invalid bpf_ld_imm64 insn",
256 BPF_MOV64_IMM(BPF_REG_1
, 0),
257 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
261 .errstr
= "not pointing to valid bpf_map",
267 BPF_MOV64_IMM(BPF_REG_1
, 0),
268 BPF_RAW_INSN(BPF_LD
| BPF_IMM
| BPF_DW
, 0, BPF_REG_1
, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1
, 0, 1),
272 .errstr
= "invalid bpf_ld_imm64 insn",
278 BPF_ALU64_REG(BPF_MOV
, BPF_REG_0
, BPF_REG_2
),
280 .errstr
= "jump out of range",
286 BPF_JMP_IMM(BPF_JA
, 0, 0, -1),
289 .errstr
= "back-edge",
295 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
296 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
297 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
298 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
301 .errstr
= "back-edge",
307 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
308 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_0
),
309 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_0
),
310 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, -3),
313 .errstr
= "back-edge",
317 "read uninitialized register",
319 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
322 .errstr
= "R2 !read_ok",
326 "read invalid register",
328 BPF_MOV64_REG(BPF_REG_0
, -1),
331 .errstr
= "R15 is invalid",
335 "program doesn't init R0 before exit",
337 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_1
),
340 .errstr
= "R0 !read_ok",
344 "program doesn't init R0 before exit in all branches",
346 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0
, 1),
348 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
351 .errstr
= "R0 !read_ok",
352 .errstr_unpriv
= "R1 pointer comparison",
356 "stack out of bounds",
358 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, 8, 0),
361 .errstr
= "invalid stack",
365 "invalid call insn1",
367 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
| BPF_X
, 0, 0, 0, 0),
370 .errstr
= "BPF_CALL uses reserved",
374 "invalid call insn2",
376 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 1, 0),
379 .errstr
= "BPF_CALL uses reserved",
383 "invalid function call",
385 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, 1234567),
388 .errstr
= "invalid func unknown#1234567",
392 "uninitialized stack1",
394 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
395 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
397 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem
),
402 .errstr
= "invalid indirect read from stack",
406 "uninitialized stack2",
408 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
409 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -8),
412 .errstr
= "invalid read from stack",
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
419 BPF_MOV64_IMM(BPF_REG_0
, 0),
420 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
421 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 8),
422 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
425 .errstr_unpriv
= "R1 subtraction from stack pointer",
426 .result_unpriv
= REJECT
,
427 .errstr
= "R1 invalid mem access",
431 "non-invalid fp arithmetic",
433 BPF_MOV64_IMM(BPF_REG_0
, 0),
434 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
440 "invalid argument register",
442 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid
),
444 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid
),
448 .errstr
= "R1 !read_ok",
450 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
453 "non-invalid argument register",
455 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
456 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid
),
458 BPF_ALU64_REG(BPF_MOV
, BPF_REG_1
, BPF_REG_6
),
459 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid
),
464 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
467 "check valid spill/fill",
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
478 .errstr_unpriv
= "R0 leaks addr",
480 .result_unpriv
= REJECT
,
483 "check valid spill/fill, skb mark",
485 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_1
),
486 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, -8),
487 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
488 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
489 offsetof(struct __sk_buff
, mark
)),
493 .result_unpriv
= ACCEPT
,
496 "check corrupted spill/fill",
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
506 .errstr_unpriv
= "attempt to corrupt spilled",
507 .errstr
= "corrupted spill",
511 "invalid src register in STX",
513 BPF_STX_MEM(BPF_B
, BPF_REG_10
, -1, -1),
516 .errstr
= "R15 is invalid",
520 "invalid dst register in STX",
522 BPF_STX_MEM(BPF_B
, 14, BPF_REG_10
, -1),
525 .errstr
= "R14 is invalid",
529 "invalid dst register in ST",
531 BPF_ST_MEM(BPF_B
, 14, -1, -1),
534 .errstr
= "R14 is invalid",
538 "invalid src register in LDX",
540 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, 12, 0),
543 .errstr
= "R12 is invalid",
547 "invalid dst register in LDX",
549 BPF_LDX_MEM(BPF_B
, 11, BPF_REG_1
, 0),
552 .errstr
= "R11 is invalid",
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
561 .errstr
= "invalid BPF_LD_IMM",
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
570 .errstr
= "BPF_LDX uses reserved fields",
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
579 .errstr
= "invalid BPF_ALU opcode f0",
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
588 .errstr
= "invalid BPF_ALU opcode f0",
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
597 .errstr
= "BPF_ALU uses reserved fields",
601 "misaligned read from stack",
603 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
604 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, -4),
607 .errstr
= "misaligned stack access",
611 "invalid map_fd for function call",
613 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
614 BPF_ALU64_REG(BPF_MOV
, BPF_REG_2
, BPF_REG_10
),
615 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
616 BPF_LD_MAP_FD(BPF_REG_1
, 0),
617 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
618 BPF_FUNC_map_delete_elem
),
621 .errstr
= "fd 0 is not pointing to valid bpf_map",
625 "don't check return value before access",
627 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
628 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
629 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
630 BPF_LD_MAP_FD(BPF_REG_1
, 0),
631 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
632 BPF_FUNC_map_lookup_elem
),
633 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
637 .errstr
= "R0 invalid mem access 'map_value_or_null'",
641 "access memory with incorrect alignment",
643 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
644 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
645 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
646 BPF_LD_MAP_FD(BPF_REG_1
, 0),
647 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
648 BPF_FUNC_map_lookup_elem
),
649 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
650 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 4, 0),
654 .errstr
= "misaligned value access",
656 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
659 "sometimes access memory with incorrect alignment",
661 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
662 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
663 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
664 BPF_LD_MAP_FD(BPF_REG_1
, 0),
665 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
666 BPF_FUNC_map_lookup_elem
),
667 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
668 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 0),
670 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 1),
674 .errstr
= "R0 invalid mem access",
675 .errstr_unpriv
= "R0 leaks addr",
677 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
682 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
683 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -8),
684 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 1),
685 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
686 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 1),
687 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 1),
688 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 1),
689 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 2),
690 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 1),
691 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 3),
692 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 1),
693 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 4),
694 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
695 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 5),
696 BPF_MOV64_IMM(BPF_REG_0
, 0),
699 .errstr_unpriv
= "R1 pointer comparison",
700 .result_unpriv
= REJECT
,
706 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
707 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 2),
708 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
709 BPF_JMP_IMM(BPF_JA
, 0, 0, 14),
710 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 2),
711 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
712 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
713 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 2),
714 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
715 BPF_JMP_IMM(BPF_JA
, 0, 0, 8),
716 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 2),
717 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
718 BPF_JMP_IMM(BPF_JA
, 0, 0, 5),
719 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 2),
720 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
721 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
722 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 1),
723 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
724 BPF_MOV64_IMM(BPF_REG_0
, 0),
727 .errstr_unpriv
= "R1 pointer comparison",
728 .result_unpriv
= REJECT
,
734 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
735 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
736 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -8, 0),
737 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
738 BPF_JMP_IMM(BPF_JA
, 0, 0, 19),
739 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 1, 3),
740 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -16, 0),
741 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
742 BPF_JMP_IMM(BPF_JA
, 0, 0, 15),
743 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 2, 3),
744 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -32, 0),
745 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -32),
746 BPF_JMP_IMM(BPF_JA
, 0, 0, 11),
747 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 3, 3),
748 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -40, 0),
749 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -40),
750 BPF_JMP_IMM(BPF_JA
, 0, 0, 7),
751 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 4, 3),
752 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -48, 0),
753 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -48),
754 BPF_JMP_IMM(BPF_JA
, 0, 0, 3),
755 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 5, 0),
756 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, -56, 0),
757 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -56),
758 BPF_LD_MAP_FD(BPF_REG_1
, 0),
759 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
760 BPF_FUNC_map_delete_elem
),
763 .fixup_map1
= { 24 },
764 .errstr_unpriv
= "R1 pointer comparison",
765 .result_unpriv
= REJECT
,
771 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
772 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
774 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
775 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
776 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
777 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
778 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
779 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
780 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
781 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
782 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
783 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
784 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
785 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
786 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
787 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
788 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
789 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
790 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
791 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
792 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
793 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
794 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
795 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
796 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
797 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
798 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
799 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
800 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
801 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
802 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
803 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 1),
804 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 2),
805 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 3),
806 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 4),
807 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
808 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
809 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
810 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
811 BPF_MOV64_IMM(BPF_REG_0
, 0),
814 .errstr_unpriv
= "R1 pointer comparison",
815 .result_unpriv
= REJECT
,
821 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
822 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
823 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
824 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
825 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
826 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
827 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
828 BPF_MOV64_IMM(BPF_REG_0
, 0),
829 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
830 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
831 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
832 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
833 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
834 BPF_MOV64_IMM(BPF_REG_0
, 0),
835 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
836 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
837 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
838 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
839 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
840 BPF_MOV64_IMM(BPF_REG_0
, 0),
841 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
842 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
843 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
844 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
845 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
846 BPF_MOV64_IMM(BPF_REG_0
, 0),
847 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
848 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_3
, -8),
849 BPF_JMP_IMM(BPF_JA
, 0, 0, 2),
850 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_2
, -8),
851 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
852 BPF_MOV64_IMM(BPF_REG_0
, 0),
855 .errstr_unpriv
= "R1 pointer comparison",
856 .result_unpriv
= REJECT
,
860 "access skb fields ok",
862 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
863 offsetof(struct __sk_buff
, len
)),
864 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
865 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
866 offsetof(struct __sk_buff
, mark
)),
867 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
868 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
869 offsetof(struct __sk_buff
, pkt_type
)),
870 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
871 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
872 offsetof(struct __sk_buff
, queue_mapping
)),
873 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
874 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
875 offsetof(struct __sk_buff
, protocol
)),
876 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
877 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
878 offsetof(struct __sk_buff
, vlan_present
)),
879 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
880 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
881 offsetof(struct __sk_buff
, vlan_tci
)),
882 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
883 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
884 offsetof(struct __sk_buff
, napi_id
)),
885 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 0),
891 "access skb fields bad1",
893 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -4),
896 .errstr
= "invalid bpf_context access",
900 "access skb fields bad2",
902 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 9),
903 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
904 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
905 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
906 BPF_LD_MAP_FD(BPF_REG_1
, 0),
907 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
908 BPF_FUNC_map_lookup_elem
),
909 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
911 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
912 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
913 offsetof(struct __sk_buff
, pkt_type
)),
917 .errstr
= "different pointers",
918 .errstr_unpriv
= "R1 pointer comparison",
922 "access skb fields bad3",
924 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 2),
925 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
926 offsetof(struct __sk_buff
, pkt_type
)),
928 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
929 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
930 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
931 BPF_LD_MAP_FD(BPF_REG_1
, 0),
932 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
933 BPF_FUNC_map_lookup_elem
),
934 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
936 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
937 BPF_JMP_IMM(BPF_JA
, 0, 0, -12),
940 .errstr
= "different pointers",
941 .errstr_unpriv
= "R1 pointer comparison",
945 "access skb fields bad4",
947 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, 0, 3),
948 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
949 offsetof(struct __sk_buff
, len
)),
950 BPF_MOV64_IMM(BPF_REG_0
, 0),
952 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
953 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
954 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
955 BPF_LD_MAP_FD(BPF_REG_1
, 0),
956 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
957 BPF_FUNC_map_lookup_elem
),
958 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
960 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
961 BPF_JMP_IMM(BPF_JA
, 0, 0, -13),
964 .errstr
= "different pointers",
965 .errstr_unpriv
= "R1 pointer comparison",
969 "invalid access __sk_buff family",
971 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
972 offsetof(struct __sk_buff
, family
)),
975 .errstr
= "invalid bpf_context access",
979 "invalid access __sk_buff remote_ip4",
981 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
982 offsetof(struct __sk_buff
, remote_ip4
)),
985 .errstr
= "invalid bpf_context access",
989 "invalid access __sk_buff local_ip4",
991 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
992 offsetof(struct __sk_buff
, local_ip4
)),
995 .errstr
= "invalid bpf_context access",
999 "invalid access __sk_buff remote_ip6",
1001 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1002 offsetof(struct __sk_buff
, remote_ip6
)),
1005 .errstr
= "invalid bpf_context access",
1009 "invalid access __sk_buff local_ip6",
1011 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1012 offsetof(struct __sk_buff
, local_ip6
)),
1015 .errstr
= "invalid bpf_context access",
1019 "invalid access __sk_buff remote_port",
1021 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1022 offsetof(struct __sk_buff
, remote_port
)),
1025 .errstr
= "invalid bpf_context access",
1029 "invalid access __sk_buff remote_port",
1031 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1032 offsetof(struct __sk_buff
, local_port
)),
1035 .errstr
= "invalid bpf_context access",
1039 "valid access __sk_buff family",
1041 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1042 offsetof(struct __sk_buff
, family
)),
1046 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1049 "valid access __sk_buff remote_ip4",
1051 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1052 offsetof(struct __sk_buff
, remote_ip4
)),
1056 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1059 "valid access __sk_buff local_ip4",
1061 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1062 offsetof(struct __sk_buff
, local_ip4
)),
1066 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1069 "valid access __sk_buff remote_ip6",
1071 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1072 offsetof(struct __sk_buff
, remote_ip6
[0])),
1073 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1074 offsetof(struct __sk_buff
, remote_ip6
[1])),
1075 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1076 offsetof(struct __sk_buff
, remote_ip6
[2])),
1077 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1078 offsetof(struct __sk_buff
, remote_ip6
[3])),
1082 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1085 "valid access __sk_buff local_ip6",
1087 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1088 offsetof(struct __sk_buff
, local_ip6
[0])),
1089 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1090 offsetof(struct __sk_buff
, local_ip6
[1])),
1091 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1092 offsetof(struct __sk_buff
, local_ip6
[2])),
1093 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1094 offsetof(struct __sk_buff
, local_ip6
[3])),
1098 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1101 "valid access __sk_buff remote_port",
1103 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1104 offsetof(struct __sk_buff
, remote_port
)),
1108 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1111 "valid access __sk_buff remote_port",
1113 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1114 offsetof(struct __sk_buff
, local_port
)),
1118 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1121 "invalid access of tc_classid for SK_SKB",
1123 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1124 offsetof(struct __sk_buff
, tc_classid
)),
1128 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1129 .errstr
= "invalid bpf_context access",
1132 "invalid access of skb->mark for SK_SKB",
1134 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1135 offsetof(struct __sk_buff
, mark
)),
1139 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1140 .errstr
= "invalid bpf_context access",
1143 "check skb->mark is not writeable by SK_SKB",
1145 BPF_MOV64_IMM(BPF_REG_0
, 0),
1146 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1147 offsetof(struct __sk_buff
, mark
)),
1151 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1152 .errstr
= "invalid bpf_context access",
1155 "check skb->tc_index is writeable by SK_SKB",
1157 BPF_MOV64_IMM(BPF_REG_0
, 0),
1158 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1159 offsetof(struct __sk_buff
, tc_index
)),
1163 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1166 "check skb->priority is writeable by SK_SKB",
1168 BPF_MOV64_IMM(BPF_REG_0
, 0),
1169 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1170 offsetof(struct __sk_buff
, priority
)),
1174 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1177 "direct packet read for SK_SKB",
1179 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1180 offsetof(struct __sk_buff
, data
)),
1181 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1182 offsetof(struct __sk_buff
, data_end
)),
1183 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1184 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1185 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1186 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
1187 BPF_MOV64_IMM(BPF_REG_0
, 0),
1191 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1194 "direct packet write for SK_SKB",
1196 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1197 offsetof(struct __sk_buff
, data
)),
1198 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1199 offsetof(struct __sk_buff
, data_end
)),
1200 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1201 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1202 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
1203 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
1204 BPF_MOV64_IMM(BPF_REG_0
, 0),
1208 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1211 "overlapping checks for direct packet access SK_SKB",
1213 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
1214 offsetof(struct __sk_buff
, data
)),
1215 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
1216 offsetof(struct __sk_buff
, data_end
)),
1217 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
1218 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
1219 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
1220 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
1221 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
1222 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
1223 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
1224 BPF_MOV64_IMM(BPF_REG_0
, 0),
1228 .prog_type
= BPF_PROG_TYPE_SK_SKB
,
1231 "check skb->mark is not writeable by sockets",
1233 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1234 offsetof(struct __sk_buff
, mark
)),
1237 .errstr
= "invalid bpf_context access",
1238 .errstr_unpriv
= "R1 leaks addr",
1242 "check skb->tc_index is not writeable by sockets",
1244 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1245 offsetof(struct __sk_buff
, tc_index
)),
1248 .errstr
= "invalid bpf_context access",
1249 .errstr_unpriv
= "R1 leaks addr",
1253 "check cb access: byte",
1255 BPF_MOV64_IMM(BPF_REG_0
, 0),
1256 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1257 offsetof(struct __sk_buff
, cb
[0])),
1258 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1259 offsetof(struct __sk_buff
, cb
[0]) + 1),
1260 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1261 offsetof(struct __sk_buff
, cb
[0]) + 2),
1262 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1263 offsetof(struct __sk_buff
, cb
[0]) + 3),
1264 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1265 offsetof(struct __sk_buff
, cb
[1])),
1266 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1267 offsetof(struct __sk_buff
, cb
[1]) + 1),
1268 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1269 offsetof(struct __sk_buff
, cb
[1]) + 2),
1270 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1271 offsetof(struct __sk_buff
, cb
[1]) + 3),
1272 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1273 offsetof(struct __sk_buff
, cb
[2])),
1274 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1275 offsetof(struct __sk_buff
, cb
[2]) + 1),
1276 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1277 offsetof(struct __sk_buff
, cb
[2]) + 2),
1278 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1279 offsetof(struct __sk_buff
, cb
[2]) + 3),
1280 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1281 offsetof(struct __sk_buff
, cb
[3])),
1282 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1283 offsetof(struct __sk_buff
, cb
[3]) + 1),
1284 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1285 offsetof(struct __sk_buff
, cb
[3]) + 2),
1286 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1287 offsetof(struct __sk_buff
, cb
[3]) + 3),
1288 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1289 offsetof(struct __sk_buff
, cb
[4])),
1290 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1291 offsetof(struct __sk_buff
, cb
[4]) + 1),
1292 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1293 offsetof(struct __sk_buff
, cb
[4]) + 2),
1294 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1295 offsetof(struct __sk_buff
, cb
[4]) + 3),
1296 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1297 offsetof(struct __sk_buff
, cb
[0])),
1298 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1299 offsetof(struct __sk_buff
, cb
[0]) + 1),
1300 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1301 offsetof(struct __sk_buff
, cb
[0]) + 2),
1302 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1303 offsetof(struct __sk_buff
, cb
[0]) + 3),
1304 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1305 offsetof(struct __sk_buff
, cb
[1])),
1306 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1307 offsetof(struct __sk_buff
, cb
[1]) + 1),
1308 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1309 offsetof(struct __sk_buff
, cb
[1]) + 2),
1310 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1311 offsetof(struct __sk_buff
, cb
[1]) + 3),
1312 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1313 offsetof(struct __sk_buff
, cb
[2])),
1314 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1315 offsetof(struct __sk_buff
, cb
[2]) + 1),
1316 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1317 offsetof(struct __sk_buff
, cb
[2]) + 2),
1318 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1319 offsetof(struct __sk_buff
, cb
[2]) + 3),
1320 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1321 offsetof(struct __sk_buff
, cb
[3])),
1322 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1323 offsetof(struct __sk_buff
, cb
[3]) + 1),
1324 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1325 offsetof(struct __sk_buff
, cb
[3]) + 2),
1326 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1327 offsetof(struct __sk_buff
, cb
[3]) + 3),
1328 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1329 offsetof(struct __sk_buff
, cb
[4])),
1330 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1331 offsetof(struct __sk_buff
, cb
[4]) + 1),
1332 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1333 offsetof(struct __sk_buff
, cb
[4]) + 2),
1334 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1335 offsetof(struct __sk_buff
, cb
[4]) + 3),
1341 "__sk_buff->hash, offset 0, byte store not permitted",
1343 BPF_MOV64_IMM(BPF_REG_0
, 0),
1344 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1345 offsetof(struct __sk_buff
, hash
)),
1348 .errstr
= "invalid bpf_context access",
1352 "__sk_buff->tc_index, offset 3, byte store not permitted",
1354 BPF_MOV64_IMM(BPF_REG_0
, 0),
1355 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1356 offsetof(struct __sk_buff
, tc_index
) + 3),
1359 .errstr
= "invalid bpf_context access",
1363 "check skb->hash byte load permitted",
1365 BPF_MOV64_IMM(BPF_REG_0
, 0),
1366 #if __BYTE_ORDER == __LITTLE_ENDIAN
1367 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1368 offsetof(struct __sk_buff
, hash
)),
1370 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1371 offsetof(struct __sk_buff
, hash
) + 3),
1378 "check skb->hash byte load not permitted 1",
1380 BPF_MOV64_IMM(BPF_REG_0
, 0),
1381 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1382 offsetof(struct __sk_buff
, hash
) + 1),
1385 .errstr
= "invalid bpf_context access",
1389 "check skb->hash byte load not permitted 2",
1391 BPF_MOV64_IMM(BPF_REG_0
, 0),
1392 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1393 offsetof(struct __sk_buff
, hash
) + 2),
1396 .errstr
= "invalid bpf_context access",
1400 "check skb->hash byte load not permitted 3",
1402 BPF_MOV64_IMM(BPF_REG_0
, 0),
1403 #if __BYTE_ORDER == __LITTLE_ENDIAN
1404 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1405 offsetof(struct __sk_buff
, hash
) + 3),
1407 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
1408 offsetof(struct __sk_buff
, hash
)),
1412 .errstr
= "invalid bpf_context access",
1416 "check cb access: byte, wrong type",
1418 BPF_MOV64_IMM(BPF_REG_0
, 0),
1419 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
,
1420 offsetof(struct __sk_buff
, cb
[0])),
1423 .errstr
= "invalid bpf_context access",
1425 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1428 "check cb access: half",
1430 BPF_MOV64_IMM(BPF_REG_0
, 0),
1431 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1432 offsetof(struct __sk_buff
, cb
[0])),
1433 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1434 offsetof(struct __sk_buff
, cb
[0]) + 2),
1435 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1436 offsetof(struct __sk_buff
, cb
[1])),
1437 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1438 offsetof(struct __sk_buff
, cb
[1]) + 2),
1439 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1440 offsetof(struct __sk_buff
, cb
[2])),
1441 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1442 offsetof(struct __sk_buff
, cb
[2]) + 2),
1443 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1444 offsetof(struct __sk_buff
, cb
[3])),
1445 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1446 offsetof(struct __sk_buff
, cb
[3]) + 2),
1447 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1448 offsetof(struct __sk_buff
, cb
[4])),
1449 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1450 offsetof(struct __sk_buff
, cb
[4]) + 2),
1451 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1452 offsetof(struct __sk_buff
, cb
[0])),
1453 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1454 offsetof(struct __sk_buff
, cb
[0]) + 2),
1455 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1456 offsetof(struct __sk_buff
, cb
[1])),
1457 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1458 offsetof(struct __sk_buff
, cb
[1]) + 2),
1459 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1460 offsetof(struct __sk_buff
, cb
[2])),
1461 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1462 offsetof(struct __sk_buff
, cb
[2]) + 2),
1463 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1464 offsetof(struct __sk_buff
, cb
[3])),
1465 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1466 offsetof(struct __sk_buff
, cb
[3]) + 2),
1467 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1468 offsetof(struct __sk_buff
, cb
[4])),
1469 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1470 offsetof(struct __sk_buff
, cb
[4]) + 2),
1476 "check cb access: half, unaligned",
1478 BPF_MOV64_IMM(BPF_REG_0
, 0),
1479 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1480 offsetof(struct __sk_buff
, cb
[0]) + 1),
1483 .errstr
= "misaligned context access",
1485 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1488 "check __sk_buff->hash, offset 0, half store not permitted",
1490 BPF_MOV64_IMM(BPF_REG_0
, 0),
1491 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1492 offsetof(struct __sk_buff
, hash
)),
1495 .errstr
= "invalid bpf_context access",
1499 "check __sk_buff->tc_index, offset 2, half store not permitted",
1501 BPF_MOV64_IMM(BPF_REG_0
, 0),
1502 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1503 offsetof(struct __sk_buff
, tc_index
) + 2),
1506 .errstr
= "invalid bpf_context access",
1510 "check skb->hash half load permitted",
1512 BPF_MOV64_IMM(BPF_REG_0
, 0),
1513 #if __BYTE_ORDER == __LITTLE_ENDIAN
1514 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1515 offsetof(struct __sk_buff
, hash
)),
1517 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1518 offsetof(struct __sk_buff
, hash
) + 2),
1525 "check skb->hash half load not permitted",
1527 BPF_MOV64_IMM(BPF_REG_0
, 0),
1528 #if __BYTE_ORDER == __LITTLE_ENDIAN
1529 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1530 offsetof(struct __sk_buff
, hash
) + 2),
1532 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
1533 offsetof(struct __sk_buff
, hash
)),
1537 .errstr
= "invalid bpf_context access",
1541 "check cb access: half, wrong type",
1543 BPF_MOV64_IMM(BPF_REG_0
, 0),
1544 BPF_STX_MEM(BPF_H
, BPF_REG_1
, BPF_REG_0
,
1545 offsetof(struct __sk_buff
, cb
[0])),
1548 .errstr
= "invalid bpf_context access",
1550 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1553 "check cb access: word",
1555 BPF_MOV64_IMM(BPF_REG_0
, 0),
1556 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1557 offsetof(struct __sk_buff
, cb
[0])),
1558 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1559 offsetof(struct __sk_buff
, cb
[1])),
1560 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1561 offsetof(struct __sk_buff
, cb
[2])),
1562 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1563 offsetof(struct __sk_buff
, cb
[3])),
1564 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1565 offsetof(struct __sk_buff
, cb
[4])),
1566 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1567 offsetof(struct __sk_buff
, cb
[0])),
1568 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1569 offsetof(struct __sk_buff
, cb
[1])),
1570 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1571 offsetof(struct __sk_buff
, cb
[2])),
1572 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1573 offsetof(struct __sk_buff
, cb
[3])),
1574 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1575 offsetof(struct __sk_buff
, cb
[4])),
1581 "check cb access: word, unaligned 1",
1583 BPF_MOV64_IMM(BPF_REG_0
, 0),
1584 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1585 offsetof(struct __sk_buff
, cb
[0]) + 2),
1588 .errstr
= "misaligned context access",
1590 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1593 "check cb access: word, unaligned 2",
1595 BPF_MOV64_IMM(BPF_REG_0
, 0),
1596 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1597 offsetof(struct __sk_buff
, cb
[4]) + 1),
1600 .errstr
= "misaligned context access",
1602 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1605 "check cb access: word, unaligned 3",
1607 BPF_MOV64_IMM(BPF_REG_0
, 0),
1608 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1609 offsetof(struct __sk_buff
, cb
[4]) + 2),
1612 .errstr
= "misaligned context access",
1614 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1617 "check cb access: word, unaligned 4",
1619 BPF_MOV64_IMM(BPF_REG_0
, 0),
1620 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1621 offsetof(struct __sk_buff
, cb
[4]) + 3),
1624 .errstr
= "misaligned context access",
1626 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1629 "check cb access: double",
1631 BPF_MOV64_IMM(BPF_REG_0
, 0),
1632 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1633 offsetof(struct __sk_buff
, cb
[0])),
1634 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1635 offsetof(struct __sk_buff
, cb
[2])),
1636 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1637 offsetof(struct __sk_buff
, cb
[0])),
1638 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1639 offsetof(struct __sk_buff
, cb
[2])),
1645 "check cb access: double, unaligned 1",
1647 BPF_MOV64_IMM(BPF_REG_0
, 0),
1648 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1649 offsetof(struct __sk_buff
, cb
[1])),
1652 .errstr
= "misaligned context access",
1654 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1657 "check cb access: double, unaligned 2",
1659 BPF_MOV64_IMM(BPF_REG_0
, 0),
1660 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1661 offsetof(struct __sk_buff
, cb
[3])),
1664 .errstr
= "misaligned context access",
1666 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
1669 "check cb access: double, oob 1",
1671 BPF_MOV64_IMM(BPF_REG_0
, 0),
1672 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1673 offsetof(struct __sk_buff
, cb
[4])),
1676 .errstr
= "invalid bpf_context access",
1680 "check cb access: double, oob 2",
1682 BPF_MOV64_IMM(BPF_REG_0
, 0),
1683 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1684 offsetof(struct __sk_buff
, cb
[4])),
1687 .errstr
= "invalid bpf_context access",
1691 "check __sk_buff->ifindex dw store not permitted",
1693 BPF_MOV64_IMM(BPF_REG_0
, 0),
1694 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1695 offsetof(struct __sk_buff
, ifindex
)),
1698 .errstr
= "invalid bpf_context access",
1702 "check __sk_buff->ifindex dw load not permitted",
1704 BPF_MOV64_IMM(BPF_REG_0
, 0),
1705 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
1706 offsetof(struct __sk_buff
, ifindex
)),
1709 .errstr
= "invalid bpf_context access",
1713 "check cb access: double, wrong type",
1715 BPF_MOV64_IMM(BPF_REG_0
, 0),
1716 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
1717 offsetof(struct __sk_buff
, cb
[0])),
1720 .errstr
= "invalid bpf_context access",
1722 .prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
,
1725 "check out of range skb->cb access",
1727 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1728 offsetof(struct __sk_buff
, cb
[0]) + 256),
1731 .errstr
= "invalid bpf_context access",
1732 .errstr_unpriv
= "",
1734 .prog_type
= BPF_PROG_TYPE_SCHED_ACT
,
1737 "write skb fields from socket prog",
1739 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1740 offsetof(struct __sk_buff
, cb
[4])),
1741 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1742 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1743 offsetof(struct __sk_buff
, mark
)),
1744 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1745 offsetof(struct __sk_buff
, tc_index
)),
1746 BPF_JMP_IMM(BPF_JGE
, BPF_REG_0
, 0, 1),
1747 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1748 offsetof(struct __sk_buff
, cb
[0])),
1749 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
1750 offsetof(struct __sk_buff
, cb
[2])),
1754 .errstr_unpriv
= "R1 leaks addr",
1755 .result_unpriv
= REJECT
,
1758 "write skb fields from tc_cls_act prog",
1760 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1761 offsetof(struct __sk_buff
, cb
[0])),
1762 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1763 offsetof(struct __sk_buff
, mark
)),
1764 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
1765 offsetof(struct __sk_buff
, tc_index
)),
1766 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1767 offsetof(struct __sk_buff
, tc_index
)),
1768 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
,
1769 offsetof(struct __sk_buff
, cb
[3])),
1772 .errstr_unpriv
= "",
1773 .result_unpriv
= REJECT
,
1775 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
1778 "PTR_TO_STACK store/load",
1780 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1781 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1782 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1783 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1789 "PTR_TO_STACK store/load - bad alignment on off",
1791 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1792 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1793 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 2, 0xfaceb00c),
1794 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 2),
1798 .errstr
= "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1801 "PTR_TO_STACK store/load - bad alignment on reg",
1803 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1804 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -10),
1805 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1806 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1810 .errstr
= "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 "PTR_TO_STACK store/load - out of bounds low",
1815 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1816 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -80000),
1817 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1818 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1822 .errstr
= "invalid stack off=-79992 size=8",
1825 "PTR_TO_STACK store/load - out of bounds high",
1827 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1828 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1829 BPF_ST_MEM(BPF_DW
, BPF_REG_1
, 8, 0xfaceb00c),
1830 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 8),
1834 .errstr
= "invalid stack off=0 size=8",
1837 "unpriv: return pointer",
1839 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
1843 .result_unpriv
= REJECT
,
1844 .errstr_unpriv
= "R0 leaks addr",
1847 "unpriv: add const to pointer",
1849 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
1850 BPF_MOV64_IMM(BPF_REG_0
, 0),
1856 "unpriv: add pointer to pointer",
1858 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
1859 BPF_MOV64_IMM(BPF_REG_0
, 0),
1863 .result_unpriv
= REJECT
,
1864 .errstr_unpriv
= "R1 pointer += pointer",
1867 "unpriv: neg pointer",
1869 BPF_ALU64_IMM(BPF_NEG
, BPF_REG_1
, 0),
1870 BPF_MOV64_IMM(BPF_REG_0
, 0),
1874 .result_unpriv
= REJECT
,
1875 .errstr_unpriv
= "R1 pointer arithmetic",
1878 "unpriv: cmp pointer with const",
1880 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
1881 BPF_MOV64_IMM(BPF_REG_0
, 0),
1885 .result_unpriv
= REJECT
,
1886 .errstr_unpriv
= "R1 pointer comparison",
1889 "unpriv: cmp pointer with pointer",
1891 BPF_JMP_REG(BPF_JEQ
, BPF_REG_1
, BPF_REG_10
, 0),
1892 BPF_MOV64_IMM(BPF_REG_0
, 0),
1896 .result_unpriv
= REJECT
,
1897 .errstr_unpriv
= "R10 pointer comparison",
1900 "unpriv: check that printk is disallowed",
1902 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1903 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
1904 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
1905 BPF_MOV64_IMM(BPF_REG_2
, 8),
1906 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
1907 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1908 BPF_FUNC_trace_printk
),
1909 BPF_MOV64_IMM(BPF_REG_0
, 0),
1912 .errstr_unpriv
= "unknown func bpf_trace_printk#6",
1913 .result_unpriv
= REJECT
,
1917 "unpriv: pass pointer to helper function",
1919 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
1920 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1921 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1922 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1923 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
1924 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
1925 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1926 BPF_FUNC_map_update_elem
),
1927 BPF_MOV64_IMM(BPF_REG_0
, 0),
1930 .fixup_map1
= { 3 },
1931 .errstr_unpriv
= "R4 leaks addr",
1932 .result_unpriv
= REJECT
,
1936 "unpriv: indirectly pass pointer on stack to helper function",
1938 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1939 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
1940 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
1941 BPF_LD_MAP_FD(BPF_REG_1
, 0),
1942 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
1943 BPF_FUNC_map_lookup_elem
),
1944 BPF_MOV64_IMM(BPF_REG_0
, 0),
1947 .fixup_map1
= { 3 },
1948 .errstr
= "invalid indirect read from stack off -8+0 size 8",
1952 "unpriv: mangle pointer on stack 1",
1954 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1955 BPF_ST_MEM(BPF_W
, BPF_REG_10
, -8, 0),
1956 BPF_MOV64_IMM(BPF_REG_0
, 0),
1959 .errstr_unpriv
= "attempt to corrupt spilled",
1960 .result_unpriv
= REJECT
,
1964 "unpriv: mangle pointer on stack 2",
1966 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1967 BPF_ST_MEM(BPF_B
, BPF_REG_10
, -1, 0),
1968 BPF_MOV64_IMM(BPF_REG_0
, 0),
1971 .errstr_unpriv
= "attempt to corrupt spilled",
1972 .result_unpriv
= REJECT
,
1976 "unpriv: read pointer from stack in small chunks",
1978 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_10
, -8),
1979 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_10
, -8),
1980 BPF_MOV64_IMM(BPF_REG_0
, 0),
1983 .errstr
= "invalid size",
1987 "unpriv: write pointer into ctx",
1989 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
, 0),
1990 BPF_MOV64_IMM(BPF_REG_0
, 0),
1993 .errstr_unpriv
= "R1 leaks addr",
1994 .result_unpriv
= REJECT
,
1995 .errstr
= "invalid bpf_context access",
1999 "unpriv: spill/fill of ctx",
2001 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2002 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2003 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2004 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2005 BPF_MOV64_IMM(BPF_REG_0
, 0),
2011 "unpriv: spill/fill of ctx 2",
2013 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2014 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2015 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2016 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2017 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2018 BPF_FUNC_get_hash_recalc
),
2022 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2025 "unpriv: spill/fill of ctx 3",
2027 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2028 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2029 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2030 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2031 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2032 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2033 BPF_FUNC_get_hash_recalc
),
2037 .errstr
= "R1 type=fp expected=ctx",
2038 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2041 "unpriv: spill/fill of ctx 4",
2043 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2044 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2045 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2046 BPF_MOV64_IMM(BPF_REG_0
, 1),
2047 BPF_RAW_INSN(BPF_STX
| BPF_XADD
| BPF_DW
, BPF_REG_10
,
2049 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2050 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2051 BPF_FUNC_get_hash_recalc
),
2055 .errstr
= "R1 type=inv expected=ctx",
2056 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2059 "unpriv: spill/fill of different pointers stx",
2061 BPF_MOV64_IMM(BPF_REG_3
, 42),
2062 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2063 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2064 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2065 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2066 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -16),
2067 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2068 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2069 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2070 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2071 BPF_STX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_3
,
2072 offsetof(struct __sk_buff
, mark
)),
2073 BPF_MOV64_IMM(BPF_REG_0
, 0),
2077 .errstr
= "same insn cannot be used with different pointers",
2078 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2081 "unpriv: spill/fill of different pointers ldx",
2083 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2084 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2085 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 3),
2086 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2087 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
,
2088 -(__s32
)offsetof(struct bpf_perf_event_data
,
2089 sample_period
) - 8),
2090 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_2
, 0),
2091 BPF_JMP_IMM(BPF_JNE
, BPF_REG_1
, 0, 1),
2092 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2093 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_6
, 0),
2094 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_1
,
2095 offsetof(struct bpf_perf_event_data
,
2097 BPF_MOV64_IMM(BPF_REG_0
, 0),
2101 .errstr
= "same insn cannot be used with different pointers",
2102 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
2105 "unpriv: write pointer into map elem value",
2107 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
2108 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2109 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2110 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2111 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2112 BPF_FUNC_map_lookup_elem
),
2113 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
2114 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
2117 .fixup_map1
= { 3 },
2118 .errstr_unpriv
= "R0 leaks addr",
2119 .result_unpriv
= REJECT
,
2123 "unpriv: partial copy of pointer",
2125 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_10
),
2126 BPF_MOV64_IMM(BPF_REG_0
, 0),
2129 .errstr_unpriv
= "R10 partial copy",
2130 .result_unpriv
= REJECT
,
2134 "unpriv: pass pointer to tail_call",
2136 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_1
),
2137 BPF_LD_MAP_FD(BPF_REG_2
, 0),
2138 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2139 BPF_FUNC_tail_call
),
2140 BPF_MOV64_IMM(BPF_REG_0
, 0),
2143 .fixup_prog
= { 1 },
2144 .errstr_unpriv
= "R3 leaks addr into helper",
2145 .result_unpriv
= REJECT
,
2149 "unpriv: cmp map pointer with zero",
2151 BPF_MOV64_IMM(BPF_REG_1
, 0),
2152 BPF_LD_MAP_FD(BPF_REG_1
, 0),
2153 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_1
, 0, 0),
2154 BPF_MOV64_IMM(BPF_REG_0
, 0),
2157 .fixup_map1
= { 1 },
2158 .errstr_unpriv
= "R1 pointer comparison",
2159 .result_unpriv
= REJECT
,
2163 "unpriv: write into frame pointer",
2165 BPF_MOV64_REG(BPF_REG_10
, BPF_REG_1
),
2166 BPF_MOV64_IMM(BPF_REG_0
, 0),
2169 .errstr
= "frame pointer is read only",
2173 "unpriv: spill/fill frame pointer",
2175 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2176 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2177 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_10
, 0),
2178 BPF_LDX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_6
, 0),
2179 BPF_MOV64_IMM(BPF_REG_0
, 0),
2182 .errstr
= "frame pointer is read only",
2186 "unpriv: cmp of frame pointer",
2188 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_10
, 0, 0),
2189 BPF_MOV64_IMM(BPF_REG_0
, 0),
2192 .errstr_unpriv
= "R10 pointer comparison",
2193 .result_unpriv
= REJECT
,
2197 "unpriv: adding of fp",
2199 BPF_MOV64_IMM(BPF_REG_0
, 0),
2200 BPF_MOV64_IMM(BPF_REG_1
, 0),
2201 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_10
),
2202 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, -8),
2208 "unpriv: cmp of stack pointer",
2210 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
2211 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
2212 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_2
, 0, 0),
2213 BPF_MOV64_IMM(BPF_REG_0
, 0),
2216 .errstr_unpriv
= "R2 pointer comparison",
2217 .result_unpriv
= REJECT
,
2221 "stack pointer arithmetic",
2223 BPF_MOV64_IMM(BPF_REG_1
, 4),
2224 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
2225 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_10
),
2226 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, -10),
2228 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2229 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_1
),
2230 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2231 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
2232 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2233 BPF_ST_MEM(0, BPF_REG_2
, 4, 0),
2234 BPF_MOV64_IMM(BPF_REG_0
, 0),
2240 "raw_stack: no skb_load_bytes",
2242 BPF_MOV64_IMM(BPF_REG_2
, 4),
2243 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2244 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2245 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2246 BPF_MOV64_IMM(BPF_REG_4
, 8),
2247 /* Call to skb_load_bytes() omitted. */
2248 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2252 .errstr
= "invalid read from stack off -8+0 size 8",
2253 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2256 "raw_stack: skb_load_bytes, negative len",
2258 BPF_MOV64_IMM(BPF_REG_2
, 4),
2259 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2260 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2261 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2262 BPF_MOV64_IMM(BPF_REG_4
, -8),
2263 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2264 BPF_FUNC_skb_load_bytes
),
2265 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2269 .errstr
= "R4 min value is negative",
2270 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2273 "raw_stack: skb_load_bytes, negative len 2",
2275 BPF_MOV64_IMM(BPF_REG_2
, 4),
2276 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2277 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2278 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2279 BPF_MOV64_IMM(BPF_REG_4
, ~0),
2280 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2281 BPF_FUNC_skb_load_bytes
),
2282 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2286 .errstr
= "R4 min value is negative",
2287 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2290 "raw_stack: skb_load_bytes, zero len",
2292 BPF_MOV64_IMM(BPF_REG_2
, 4),
2293 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2294 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2295 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2296 BPF_MOV64_IMM(BPF_REG_4
, 0),
2297 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2298 BPF_FUNC_skb_load_bytes
),
2299 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2303 .errstr
= "invalid stack type R3",
2304 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2307 "raw_stack: skb_load_bytes, no init",
2309 BPF_MOV64_IMM(BPF_REG_2
, 4),
2310 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2311 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2312 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2313 BPF_MOV64_IMM(BPF_REG_4
, 8),
2314 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2315 BPF_FUNC_skb_load_bytes
),
2316 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2320 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2323 "raw_stack: skb_load_bytes, init",
2325 BPF_MOV64_IMM(BPF_REG_2
, 4),
2326 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2327 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2328 BPF_ST_MEM(BPF_DW
, BPF_REG_6
, 0, 0xcafe),
2329 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2330 BPF_MOV64_IMM(BPF_REG_4
, 8),
2331 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2332 BPF_FUNC_skb_load_bytes
),
2333 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2337 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2340 "raw_stack: skb_load_bytes, spilled regs around bounds",
2342 BPF_MOV64_IMM(BPF_REG_2
, 4),
2343 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2344 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2345 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2346 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2347 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2348 BPF_MOV64_IMM(BPF_REG_4
, 8),
2349 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2350 BPF_FUNC_skb_load_bytes
),
2351 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2352 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2353 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2354 offsetof(struct __sk_buff
, mark
)),
2355 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2356 offsetof(struct __sk_buff
, priority
)),
2357 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2361 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2364 "raw_stack: skb_load_bytes, spilled regs corruption",
2366 BPF_MOV64_IMM(BPF_REG_2
, 4),
2367 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2368 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -8),
2369 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2370 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2371 BPF_MOV64_IMM(BPF_REG_4
, 8),
2372 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2373 BPF_FUNC_skb_load_bytes
),
2374 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2375 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2376 offsetof(struct __sk_buff
, mark
)),
2380 .errstr
= "R0 invalid mem access 'inv'",
2381 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2384 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2386 BPF_MOV64_IMM(BPF_REG_2
, 4),
2387 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2388 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2389 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2390 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2391 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2392 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2393 BPF_MOV64_IMM(BPF_REG_4
, 8),
2394 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2395 BPF_FUNC_skb_load_bytes
),
2396 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2397 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2398 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2399 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2400 offsetof(struct __sk_buff
, mark
)),
2401 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2402 offsetof(struct __sk_buff
, priority
)),
2403 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2404 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_3
,
2405 offsetof(struct __sk_buff
, pkt_type
)),
2406 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2410 .errstr
= "R3 invalid mem access 'inv'",
2411 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2414 "raw_stack: skb_load_bytes, spilled regs + data",
2416 BPF_MOV64_IMM(BPF_REG_2
, 4),
2417 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2418 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -16),
2419 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, -8),
2420 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 0),
2421 BPF_STX_MEM(BPF_DW
, BPF_REG_6
, BPF_REG_1
, 8),
2422 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2423 BPF_MOV64_IMM(BPF_REG_4
, 8),
2424 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2425 BPF_FUNC_skb_load_bytes
),
2426 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, -8),
2427 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_6
, 8),
2428 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_6
, 0),
2429 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
2430 offsetof(struct __sk_buff
, mark
)),
2431 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_2
,
2432 offsetof(struct __sk_buff
, priority
)),
2433 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2434 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
2438 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2441 "raw_stack: skb_load_bytes, invalid access 1",
2443 BPF_MOV64_IMM(BPF_REG_2
, 4),
2444 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2445 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -513),
2446 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2447 BPF_MOV64_IMM(BPF_REG_4
, 8),
2448 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2449 BPF_FUNC_skb_load_bytes
),
2450 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2454 .errstr
= "invalid stack type R3 off=-513 access_size=8",
2455 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2458 "raw_stack: skb_load_bytes, invalid access 2",
2460 BPF_MOV64_IMM(BPF_REG_2
, 4),
2461 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2462 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2463 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2464 BPF_MOV64_IMM(BPF_REG_4
, 8),
2465 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2466 BPF_FUNC_skb_load_bytes
),
2467 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2471 .errstr
= "invalid stack type R3 off=-1 access_size=8",
2472 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2475 "raw_stack: skb_load_bytes, invalid access 3",
2477 BPF_MOV64_IMM(BPF_REG_2
, 4),
2478 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2479 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 0xffffffff),
2480 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2481 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2482 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2483 BPF_FUNC_skb_load_bytes
),
2484 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2488 .errstr
= "R4 min value is negative",
2489 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2492 "raw_stack: skb_load_bytes, invalid access 4",
2494 BPF_MOV64_IMM(BPF_REG_2
, 4),
2495 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2496 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -1),
2497 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2498 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2499 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2500 BPF_FUNC_skb_load_bytes
),
2501 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2505 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2506 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2509 "raw_stack: skb_load_bytes, invalid access 5",
2511 BPF_MOV64_IMM(BPF_REG_2
, 4),
2512 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2513 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2514 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2515 BPF_MOV64_IMM(BPF_REG_4
, 0x7fffffff),
2516 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2517 BPF_FUNC_skb_load_bytes
),
2518 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2522 .errstr
= "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2523 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2526 "raw_stack: skb_load_bytes, invalid access 6",
2528 BPF_MOV64_IMM(BPF_REG_2
, 4),
2529 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2530 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2531 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2532 BPF_MOV64_IMM(BPF_REG_4
, 0),
2533 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2534 BPF_FUNC_skb_load_bytes
),
2535 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2539 .errstr
= "invalid stack type R3 off=-512 access_size=0",
2540 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2543 "raw_stack: skb_load_bytes, large access",
2545 BPF_MOV64_IMM(BPF_REG_2
, 4),
2546 BPF_ALU64_REG(BPF_MOV
, BPF_REG_6
, BPF_REG_10
),
2547 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, -512),
2548 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
2549 BPF_MOV64_IMM(BPF_REG_4
, 512),
2550 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
2551 BPF_FUNC_skb_load_bytes
),
2552 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
2556 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2559 "direct packet access: test1",
2561 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2562 offsetof(struct __sk_buff
, data
)),
2563 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2564 offsetof(struct __sk_buff
, data_end
)),
2565 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2566 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2567 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2568 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2569 BPF_MOV64_IMM(BPF_REG_0
, 0),
2573 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2576 "direct packet access: test2",
2578 BPF_MOV64_IMM(BPF_REG_0
, 1),
2579 BPF_LDX_MEM(BPF_W
, BPF_REG_4
, BPF_REG_1
,
2580 offsetof(struct __sk_buff
, data_end
)),
2581 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2582 offsetof(struct __sk_buff
, data
)),
2583 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2584 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 14),
2585 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_4
, 15),
2586 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_3
, 7),
2587 BPF_LDX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_3
, 12),
2588 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_4
, 14),
2589 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2590 offsetof(struct __sk_buff
, data
)),
2591 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_4
),
2592 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_1
),
2593 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 49),
2594 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 49),
2595 BPF_ALU64_REG(BPF_ADD
, BPF_REG_3
, BPF_REG_2
),
2596 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_3
),
2597 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 8),
2598 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_1
,
2599 offsetof(struct __sk_buff
, data_end
)),
2600 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
2601 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_3
, 4),
2602 BPF_MOV64_IMM(BPF_REG_0
, 0),
2606 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2609 "direct packet access: test3",
2611 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2612 offsetof(struct __sk_buff
, data
)),
2613 BPF_MOV64_IMM(BPF_REG_0
, 0),
2616 .errstr
= "invalid bpf_context access off=76",
2618 .prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
,
2621 "direct packet access: test4 (write)",
2623 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2624 offsetof(struct __sk_buff
, data
)),
2625 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2626 offsetof(struct __sk_buff
, data_end
)),
2627 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2628 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2629 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2630 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2631 BPF_MOV64_IMM(BPF_REG_0
, 0),
2635 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2638 "direct packet access: test5 (pkt_end >= reg, good access)",
2640 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2641 offsetof(struct __sk_buff
, data
)),
2642 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2643 offsetof(struct __sk_buff
, data_end
)),
2644 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2645 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2646 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2647 BPF_MOV64_IMM(BPF_REG_0
, 1),
2649 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2650 BPF_MOV64_IMM(BPF_REG_0
, 0),
2654 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2657 "direct packet access: test6 (pkt_end >= reg, bad access)",
2659 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2660 offsetof(struct __sk_buff
, data
)),
2661 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2662 offsetof(struct __sk_buff
, data_end
)),
2663 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2665 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2666 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2667 BPF_MOV64_IMM(BPF_REG_0
, 1),
2669 BPF_MOV64_IMM(BPF_REG_0
, 0),
2672 .errstr
= "invalid access to packet",
2674 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2677 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2679 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2680 offsetof(struct __sk_buff
, data
)),
2681 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2682 offsetof(struct __sk_buff
, data_end
)),
2683 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2684 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2685 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 3),
2686 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2687 BPF_MOV64_IMM(BPF_REG_0
, 1),
2689 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2690 BPF_MOV64_IMM(BPF_REG_0
, 0),
2693 .errstr
= "invalid access to packet",
2695 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2698 "direct packet access: test8 (double test, variant 1)",
2700 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2701 offsetof(struct __sk_buff
, data
)),
2702 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2703 offsetof(struct __sk_buff
, data_end
)),
2704 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2705 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2706 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 4),
2707 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2708 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2709 BPF_MOV64_IMM(BPF_REG_0
, 1),
2711 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2712 BPF_MOV64_IMM(BPF_REG_0
, 0),
2716 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2719 "direct packet access: test9 (double test, variant 2)",
2721 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2722 offsetof(struct __sk_buff
, data
)),
2723 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2724 offsetof(struct __sk_buff
, data_end
)),
2725 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2727 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_0
, 2),
2728 BPF_MOV64_IMM(BPF_REG_0
, 1),
2730 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2731 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2732 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
2733 BPF_MOV64_IMM(BPF_REG_0
, 0),
2737 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2740 "direct packet access: test10 (write invalid)",
2742 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2743 offsetof(struct __sk_buff
, data
)),
2744 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2745 offsetof(struct __sk_buff
, data_end
)),
2746 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2747 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2748 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
2749 BPF_MOV64_IMM(BPF_REG_0
, 0),
2751 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2752 BPF_MOV64_IMM(BPF_REG_0
, 0),
2755 .errstr
= "invalid access to packet",
2757 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2760 "direct packet access: test11 (shift, good access)",
2762 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2763 offsetof(struct __sk_buff
, data
)),
2764 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2765 offsetof(struct __sk_buff
, data_end
)),
2766 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2767 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2768 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2769 BPF_MOV64_IMM(BPF_REG_3
, 144),
2770 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2771 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2772 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 3),
2773 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2774 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2775 BPF_MOV64_IMM(BPF_REG_0
, 1),
2777 BPF_MOV64_IMM(BPF_REG_0
, 0),
2781 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2784 "direct packet access: test12 (and, good access)",
2786 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2787 offsetof(struct __sk_buff
, data
)),
2788 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2789 offsetof(struct __sk_buff
, data_end
)),
2790 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2791 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2792 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2793 BPF_MOV64_IMM(BPF_REG_3
, 144),
2794 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2795 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2796 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2797 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2798 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2799 BPF_MOV64_IMM(BPF_REG_0
, 1),
2801 BPF_MOV64_IMM(BPF_REG_0
, 0),
2805 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2808 "direct packet access: test13 (branches, good access)",
2810 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2811 offsetof(struct __sk_buff
, data
)),
2812 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2813 offsetof(struct __sk_buff
, data_end
)),
2814 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2815 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2816 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 13),
2817 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2818 offsetof(struct __sk_buff
, mark
)),
2819 BPF_MOV64_IMM(BPF_REG_4
, 1),
2820 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_4
, 2),
2821 BPF_MOV64_IMM(BPF_REG_3
, 14),
2822 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
2823 BPF_MOV64_IMM(BPF_REG_3
, 24),
2824 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_3
),
2825 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 23),
2826 BPF_ALU64_IMM(BPF_AND
, BPF_REG_5
, 15),
2827 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2828 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2829 BPF_MOV64_IMM(BPF_REG_0
, 1),
2831 BPF_MOV64_IMM(BPF_REG_0
, 0),
2835 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2838 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2840 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2841 offsetof(struct __sk_buff
, data
)),
2842 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2843 offsetof(struct __sk_buff
, data_end
)),
2844 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2845 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 22),
2846 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 7),
2847 BPF_MOV64_IMM(BPF_REG_5
, 12),
2848 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_5
, 4),
2849 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_2
),
2850 BPF_ALU64_REG(BPF_ADD
, BPF_REG_6
, BPF_REG_5
),
2851 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_6
, 0),
2852 BPF_MOV64_IMM(BPF_REG_0
, 1),
2854 BPF_MOV64_IMM(BPF_REG_0
, 0),
2858 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2861 "direct packet access: test15 (spill with xadd)",
2863 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2864 offsetof(struct __sk_buff
, data
)),
2865 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2866 offsetof(struct __sk_buff
, data_end
)),
2867 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2868 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2869 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 8),
2870 BPF_MOV64_IMM(BPF_REG_5
, 4096),
2871 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_10
),
2872 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -8),
2873 BPF_STX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_2
, 0),
2874 BPF_STX_XADD(BPF_DW
, BPF_REG_4
, BPF_REG_5
, 0),
2875 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_4
, 0),
2876 BPF_STX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_5
, 0),
2877 BPF_MOV64_IMM(BPF_REG_0
, 0),
2880 .errstr
= "R2 invalid mem access 'inv'",
2882 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2885 "direct packet access: test16 (arith on data_end)",
2887 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2888 offsetof(struct __sk_buff
, data
)),
2889 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2890 offsetof(struct __sk_buff
, data_end
)),
2891 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2892 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2893 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 16),
2894 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2895 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2896 BPF_MOV64_IMM(BPF_REG_0
, 0),
2899 .errstr
= "invalid access to packet",
2901 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2904 "direct packet access: test17 (pruning, alignment)",
2906 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2907 offsetof(struct __sk_buff
, data
)),
2908 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2909 offsetof(struct __sk_buff
, data_end
)),
2910 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
2911 offsetof(struct __sk_buff
, mark
)),
2912 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2913 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 14),
2914 BPF_JMP_IMM(BPF_JGT
, BPF_REG_7
, 1, 4),
2915 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2916 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
, -4),
2917 BPF_MOV64_IMM(BPF_REG_0
, 0),
2919 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 1),
2922 .errstr
= "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2924 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2925 .flags
= F_LOAD_WITH_STRICT_ALIGNMENT
,
2928 "direct packet access: test18 (imm += pkt_ptr, 1)",
2930 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2931 offsetof(struct __sk_buff
, data
)),
2932 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2933 offsetof(struct __sk_buff
, data_end
)),
2934 BPF_MOV64_IMM(BPF_REG_0
, 8),
2935 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
2936 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
2937 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
2938 BPF_MOV64_IMM(BPF_REG_0
, 0),
2942 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2945 "direct packet access: test19 (imm += pkt_ptr, 2)",
2947 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2948 offsetof(struct __sk_buff
, data
)),
2949 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2950 offsetof(struct __sk_buff
, data_end
)),
2951 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2952 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2953 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 3),
2954 BPF_MOV64_IMM(BPF_REG_4
, 4),
2955 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2956 BPF_STX_MEM(BPF_B
, BPF_REG_4
, BPF_REG_4
, 0),
2957 BPF_MOV64_IMM(BPF_REG_0
, 0),
2961 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2964 "direct packet access: test20 (x += pkt_ptr, 1)",
2966 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2967 offsetof(struct __sk_buff
, data
)),
2968 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2969 offsetof(struct __sk_buff
, data_end
)),
2970 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
2971 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
2972 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
2973 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0x7fff),
2974 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
2975 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
2976 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
2977 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
2978 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
2979 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
2980 BPF_MOV64_IMM(BPF_REG_0
, 0),
2983 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
2987 "direct packet access: test21 (x += pkt_ptr, 2)",
2989 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
2990 offsetof(struct __sk_buff
, data
)),
2991 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
2992 offsetof(struct __sk_buff
, data_end
)),
2993 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
2994 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
2995 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 9),
2996 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
2997 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
2998 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
2999 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, 0x7fff),
3000 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3001 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3002 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 0x7fff - 1),
3003 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 1),
3004 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_4
, 0),
3005 BPF_MOV64_IMM(BPF_REG_0
, 0),
3008 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3012 "direct packet access: test22 (x += pkt_ptr, 3)",
3014 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3015 offsetof(struct __sk_buff
, data
)),
3016 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3017 offsetof(struct __sk_buff
, data_end
)),
3018 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3019 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3020 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -8),
3021 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_3
, -16),
3022 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_10
, -16),
3023 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 11),
3024 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -8),
3025 BPF_MOV64_IMM(BPF_REG_4
, 0xffffffff),
3026 BPF_STX_XADD(BPF_DW
, BPF_REG_10
, BPF_REG_4
, -8),
3027 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -8),
3028 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_4
, 49),
3029 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_2
),
3030 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
3031 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 2),
3032 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 2),
3033 BPF_MOV64_IMM(BPF_REG_2
, 1),
3034 BPF_STX_MEM(BPF_H
, BPF_REG_4
, BPF_REG_2
, 0),
3035 BPF_MOV64_IMM(BPF_REG_0
, 0),
3038 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3042 "direct packet access: test23 (x += pkt_ptr, 4)",
3044 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3045 offsetof(struct __sk_buff
, data
)),
3046 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3047 offsetof(struct __sk_buff
, data_end
)),
3048 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3049 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3050 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3051 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xffff),
3052 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3053 BPF_MOV64_IMM(BPF_REG_0
, 31),
3054 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3055 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3056 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3057 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0xffff - 1),
3058 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3059 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3060 BPF_MOV64_IMM(BPF_REG_0
, 0),
3063 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3065 .errstr
= "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3068 "direct packet access: test24 (x += pkt_ptr, 5)",
3070 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3071 offsetof(struct __sk_buff
, data
)),
3072 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3073 offsetof(struct __sk_buff
, data_end
)),
3074 BPF_MOV64_IMM(BPF_REG_0
, 0xffffffff),
3075 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
3076 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_10
, -8),
3077 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 0xff),
3078 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3079 BPF_MOV64_IMM(BPF_REG_0
, 64),
3080 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_4
),
3081 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
3082 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_0
),
3083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7fff - 1),
3084 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
3085 BPF_STX_MEM(BPF_DW
, BPF_REG_5
, BPF_REG_0
, 0),
3086 BPF_MOV64_IMM(BPF_REG_0
, 0),
3089 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3093 "direct packet access: test25 (marking on <, good access)",
3095 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3096 offsetof(struct __sk_buff
, data
)),
3097 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3098 offsetof(struct __sk_buff
, data_end
)),
3099 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3100 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3101 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 2),
3102 BPF_MOV64_IMM(BPF_REG_0
, 0),
3104 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3105 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3108 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3111 "direct packet access: test26 (marking on <, bad access)",
3113 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3114 offsetof(struct __sk_buff
, data
)),
3115 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3116 offsetof(struct __sk_buff
, data_end
)),
3117 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3118 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3119 BPF_JMP_REG(BPF_JLT
, BPF_REG_0
, BPF_REG_3
, 3),
3120 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3121 BPF_MOV64_IMM(BPF_REG_0
, 0),
3123 BPF_JMP_IMM(BPF_JA
, 0, 0, -3),
3126 .errstr
= "invalid access to packet",
3127 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3130 "direct packet access: test27 (marking on <=, good access)",
3132 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3133 offsetof(struct __sk_buff
, data
)),
3134 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3135 offsetof(struct __sk_buff
, data_end
)),
3136 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3137 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3138 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 1),
3139 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3140 BPF_MOV64_IMM(BPF_REG_0
, 1),
3144 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3147 "direct packet access: test28 (marking on <=, bad access)",
3149 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3150 offsetof(struct __sk_buff
, data
)),
3151 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3152 offsetof(struct __sk_buff
, data_end
)),
3153 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
3154 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
3155 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_0
, 2),
3156 BPF_MOV64_IMM(BPF_REG_0
, 1),
3158 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
3159 BPF_JMP_IMM(BPF_JA
, 0, 0, -4),
3162 .errstr
= "invalid access to packet",
3163 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3166 "helper access to packet: test1, valid packet_ptr range",
3168 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3169 offsetof(struct xdp_md
, data
)),
3170 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3171 offsetof(struct xdp_md
, data_end
)),
3172 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3173 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3174 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3175 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3176 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3177 BPF_MOV64_IMM(BPF_REG_4
, 0),
3178 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3179 BPF_FUNC_map_update_elem
),
3180 BPF_MOV64_IMM(BPF_REG_0
, 0),
3183 .fixup_map1
= { 5 },
3184 .result_unpriv
= ACCEPT
,
3186 .prog_type
= BPF_PROG_TYPE_XDP
,
3189 "helper access to packet: test2, unchecked packet_ptr",
3191 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3192 offsetof(struct xdp_md
, data
)),
3193 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3194 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3195 BPF_FUNC_map_lookup_elem
),
3196 BPF_MOV64_IMM(BPF_REG_0
, 0),
3199 .fixup_map1
= { 1 },
3201 .errstr
= "invalid access to packet",
3202 .prog_type
= BPF_PROG_TYPE_XDP
,
3205 "helper access to packet: test3, variable add",
3207 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3208 offsetof(struct xdp_md
, data
)),
3209 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3210 offsetof(struct xdp_md
, data_end
)),
3211 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3212 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3213 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3214 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3215 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3216 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3217 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3218 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3219 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3220 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3221 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3222 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3223 BPF_FUNC_map_lookup_elem
),
3224 BPF_MOV64_IMM(BPF_REG_0
, 0),
3227 .fixup_map1
= { 11 },
3229 .prog_type
= BPF_PROG_TYPE_XDP
,
3232 "helper access to packet: test4, packet_ptr with bad range",
3234 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3235 offsetof(struct xdp_md
, data
)),
3236 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3237 offsetof(struct xdp_md
, data_end
)),
3238 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3239 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3240 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3241 BPF_MOV64_IMM(BPF_REG_0
, 0),
3243 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3244 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3245 BPF_FUNC_map_lookup_elem
),
3246 BPF_MOV64_IMM(BPF_REG_0
, 0),
3249 .fixup_map1
= { 7 },
3251 .errstr
= "invalid access to packet",
3252 .prog_type
= BPF_PROG_TYPE_XDP
,
3255 "helper access to packet: test5, packet_ptr with too short range",
3257 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3258 offsetof(struct xdp_md
, data
)),
3259 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3260 offsetof(struct xdp_md
, data_end
)),
3261 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3262 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3263 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3264 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3265 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3266 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3267 BPF_FUNC_map_lookup_elem
),
3268 BPF_MOV64_IMM(BPF_REG_0
, 0),
3271 .fixup_map1
= { 6 },
3273 .errstr
= "invalid access to packet",
3274 .prog_type
= BPF_PROG_TYPE_XDP
,
3277 "helper access to packet: test6, cls valid packet_ptr range",
3279 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3280 offsetof(struct __sk_buff
, data
)),
3281 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3282 offsetof(struct __sk_buff
, data_end
)),
3283 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
3284 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
3285 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 5),
3286 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3287 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_2
),
3288 BPF_MOV64_IMM(BPF_REG_4
, 0),
3289 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3290 BPF_FUNC_map_update_elem
),
3291 BPF_MOV64_IMM(BPF_REG_0
, 0),
3294 .fixup_map1
= { 5 },
3296 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3299 "helper access to packet: test7, cls unchecked packet_ptr",
3301 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3302 offsetof(struct __sk_buff
, data
)),
3303 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3304 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3305 BPF_FUNC_map_lookup_elem
),
3306 BPF_MOV64_IMM(BPF_REG_0
, 0),
3309 .fixup_map1
= { 1 },
3311 .errstr
= "invalid access to packet",
3312 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3315 "helper access to packet: test8, cls variable add",
3317 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3318 offsetof(struct __sk_buff
, data
)),
3319 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3320 offsetof(struct __sk_buff
, data_end
)),
3321 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3322 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 8),
3323 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 10),
3324 BPF_LDX_MEM(BPF_B
, BPF_REG_5
, BPF_REG_2
, 0),
3325 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3326 BPF_ALU64_REG(BPF_ADD
, BPF_REG_4
, BPF_REG_5
),
3327 BPF_MOV64_REG(BPF_REG_5
, BPF_REG_4
),
3328 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_5
, 8),
3329 BPF_JMP_REG(BPF_JGT
, BPF_REG_5
, BPF_REG_3
, 4),
3330 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3331 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_4
),
3332 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3333 BPF_FUNC_map_lookup_elem
),
3334 BPF_MOV64_IMM(BPF_REG_0
, 0),
3337 .fixup_map1
= { 11 },
3339 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3342 "helper access to packet: test9, cls packet_ptr with bad range",
3344 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3345 offsetof(struct __sk_buff
, data
)),
3346 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3347 offsetof(struct __sk_buff
, data_end
)),
3348 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3349 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 4),
3350 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 2),
3351 BPF_MOV64_IMM(BPF_REG_0
, 0),
3353 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3354 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3355 BPF_FUNC_map_lookup_elem
),
3356 BPF_MOV64_IMM(BPF_REG_0
, 0),
3359 .fixup_map1
= { 7 },
3361 .errstr
= "invalid access to packet",
3362 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3365 "helper access to packet: test10, cls packet_ptr with too short range",
3367 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
3368 offsetof(struct __sk_buff
, data
)),
3369 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
3370 offsetof(struct __sk_buff
, data_end
)),
3371 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
3372 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_2
),
3373 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 7),
3374 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_3
, 3),
3375 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3376 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3377 BPF_FUNC_map_lookup_elem
),
3378 BPF_MOV64_IMM(BPF_REG_0
, 0),
3381 .fixup_map1
= { 6 },
3383 .errstr
= "invalid access to packet",
3384 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3387 "helper access to packet: test11, cls unsuitable helper 1",
3389 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3390 offsetof(struct __sk_buff
, data
)),
3391 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3392 offsetof(struct __sk_buff
, data_end
)),
3393 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3394 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3395 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 7),
3396 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_7
, 4),
3397 BPF_MOV64_IMM(BPF_REG_2
, 0),
3398 BPF_MOV64_IMM(BPF_REG_4
, 42),
3399 BPF_MOV64_IMM(BPF_REG_5
, 0),
3400 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3401 BPF_FUNC_skb_store_bytes
),
3402 BPF_MOV64_IMM(BPF_REG_0
, 0),
3406 .errstr
= "helper access to the packet",
3407 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3410 "helper access to packet: test12, cls unsuitable helper 2",
3412 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3413 offsetof(struct __sk_buff
, data
)),
3414 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3415 offsetof(struct __sk_buff
, data_end
)),
3416 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_6
),
3417 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 8),
3418 BPF_JMP_REG(BPF_JGT
, BPF_REG_6
, BPF_REG_7
, 3),
3419 BPF_MOV64_IMM(BPF_REG_2
, 0),
3420 BPF_MOV64_IMM(BPF_REG_4
, 4),
3421 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3422 BPF_FUNC_skb_load_bytes
),
3423 BPF_MOV64_IMM(BPF_REG_0
, 0),
3427 .errstr
= "helper access to the packet",
3428 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3431 "helper access to packet: test13, cls helper ok",
3433 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3434 offsetof(struct __sk_buff
, data
)),
3435 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3436 offsetof(struct __sk_buff
, data_end
)),
3437 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3438 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3439 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3440 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3441 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3442 BPF_MOV64_IMM(BPF_REG_2
, 4),
3443 BPF_MOV64_IMM(BPF_REG_3
, 0),
3444 BPF_MOV64_IMM(BPF_REG_4
, 0),
3445 BPF_MOV64_IMM(BPF_REG_5
, 0),
3446 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3447 BPF_FUNC_csum_diff
),
3448 BPF_MOV64_IMM(BPF_REG_0
, 0),
3452 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3455 "helper access to packet: test14, cls helper ok sub",
3457 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3458 offsetof(struct __sk_buff
, data
)),
3459 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3460 offsetof(struct __sk_buff
, data_end
)),
3461 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3462 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3463 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3464 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3465 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 4),
3466 BPF_MOV64_IMM(BPF_REG_2
, 4),
3467 BPF_MOV64_IMM(BPF_REG_3
, 0),
3468 BPF_MOV64_IMM(BPF_REG_4
, 0),
3469 BPF_MOV64_IMM(BPF_REG_5
, 0),
3470 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3471 BPF_FUNC_csum_diff
),
3472 BPF_MOV64_IMM(BPF_REG_0
, 0),
3476 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3479 "helper access to packet: test15, cls helper fail sub",
3481 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3482 offsetof(struct __sk_buff
, data
)),
3483 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3484 offsetof(struct __sk_buff
, data_end
)),
3485 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3486 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3487 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3488 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3489 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 12),
3490 BPF_MOV64_IMM(BPF_REG_2
, 4),
3491 BPF_MOV64_IMM(BPF_REG_3
, 0),
3492 BPF_MOV64_IMM(BPF_REG_4
, 0),
3493 BPF_MOV64_IMM(BPF_REG_5
, 0),
3494 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3495 BPF_FUNC_csum_diff
),
3496 BPF_MOV64_IMM(BPF_REG_0
, 0),
3500 .errstr
= "invalid access to packet",
3501 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3504 "helper access to packet: test16, cls helper fail range 1",
3506 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3507 offsetof(struct __sk_buff
, data
)),
3508 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3509 offsetof(struct __sk_buff
, data_end
)),
3510 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3511 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3512 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3513 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3514 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3515 BPF_MOV64_IMM(BPF_REG_2
, 8),
3516 BPF_MOV64_IMM(BPF_REG_3
, 0),
3517 BPF_MOV64_IMM(BPF_REG_4
, 0),
3518 BPF_MOV64_IMM(BPF_REG_5
, 0),
3519 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3520 BPF_FUNC_csum_diff
),
3521 BPF_MOV64_IMM(BPF_REG_0
, 0),
3525 .errstr
= "invalid access to packet",
3526 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3529 "helper access to packet: test17, cls helper fail range 2",
3531 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3532 offsetof(struct __sk_buff
, data
)),
3533 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3534 offsetof(struct __sk_buff
, data_end
)),
3535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3536 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3537 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3538 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3539 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3540 BPF_MOV64_IMM(BPF_REG_2
, -9),
3541 BPF_MOV64_IMM(BPF_REG_3
, 0),
3542 BPF_MOV64_IMM(BPF_REG_4
, 0),
3543 BPF_MOV64_IMM(BPF_REG_5
, 0),
3544 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3545 BPF_FUNC_csum_diff
),
3546 BPF_MOV64_IMM(BPF_REG_0
, 0),
3550 .errstr
= "R2 min value is negative",
3551 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3554 "helper access to packet: test18, cls helper fail range 3",
3556 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3557 offsetof(struct __sk_buff
, data
)),
3558 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3559 offsetof(struct __sk_buff
, data_end
)),
3560 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3561 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3562 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3563 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3564 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3565 BPF_MOV64_IMM(BPF_REG_2
, ~0),
3566 BPF_MOV64_IMM(BPF_REG_3
, 0),
3567 BPF_MOV64_IMM(BPF_REG_4
, 0),
3568 BPF_MOV64_IMM(BPF_REG_5
, 0),
3569 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3570 BPF_FUNC_csum_diff
),
3571 BPF_MOV64_IMM(BPF_REG_0
, 0),
3575 .errstr
= "R2 min value is negative",
3576 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3579 "helper access to packet: test19, cls helper fail range zero",
3581 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3582 offsetof(struct __sk_buff
, data
)),
3583 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3584 offsetof(struct __sk_buff
, data_end
)),
3585 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3586 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3587 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3588 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3589 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3590 BPF_MOV64_IMM(BPF_REG_2
, 0),
3591 BPF_MOV64_IMM(BPF_REG_3
, 0),
3592 BPF_MOV64_IMM(BPF_REG_4
, 0),
3593 BPF_MOV64_IMM(BPF_REG_5
, 0),
3594 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3595 BPF_FUNC_csum_diff
),
3596 BPF_MOV64_IMM(BPF_REG_0
, 0),
3600 .errstr
= "invalid access to packet",
3601 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3604 "helper access to packet: test20, pkt end as input",
3606 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3607 offsetof(struct __sk_buff
, data
)),
3608 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3609 offsetof(struct __sk_buff
, data_end
)),
3610 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3611 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3612 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3613 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3614 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_7
),
3615 BPF_MOV64_IMM(BPF_REG_2
, 4),
3616 BPF_MOV64_IMM(BPF_REG_3
, 0),
3617 BPF_MOV64_IMM(BPF_REG_4
, 0),
3618 BPF_MOV64_IMM(BPF_REG_5
, 0),
3619 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3620 BPF_FUNC_csum_diff
),
3621 BPF_MOV64_IMM(BPF_REG_0
, 0),
3625 .errstr
= "R1 type=pkt_end expected=fp",
3626 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3629 "helper access to packet: test21, wrong reg",
3631 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
3632 offsetof(struct __sk_buff
, data
)),
3633 BPF_LDX_MEM(BPF_W
, BPF_REG_7
, BPF_REG_1
,
3634 offsetof(struct __sk_buff
, data_end
)),
3635 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, 1),
3636 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_6
),
3637 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 7),
3638 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_7
, 6),
3639 BPF_MOV64_IMM(BPF_REG_2
, 4),
3640 BPF_MOV64_IMM(BPF_REG_3
, 0),
3641 BPF_MOV64_IMM(BPF_REG_4
, 0),
3642 BPF_MOV64_IMM(BPF_REG_5
, 0),
3643 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3644 BPF_FUNC_csum_diff
),
3645 BPF_MOV64_IMM(BPF_REG_0
, 0),
3649 .errstr
= "invalid access to packet",
3650 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
3653 "valid map access into an array with a constant",
3655 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3656 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3657 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3658 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3659 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3660 BPF_FUNC_map_lookup_elem
),
3661 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3662 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3663 offsetof(struct test_val
, foo
)),
3666 .fixup_map2
= { 3 },
3667 .errstr_unpriv
= "R0 leaks addr",
3668 .result_unpriv
= REJECT
,
3672 "valid map access into an array with a register",
3674 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3675 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3676 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3677 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3678 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3679 BPF_FUNC_map_lookup_elem
),
3680 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3681 BPF_MOV64_IMM(BPF_REG_1
, 4),
3682 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3683 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3684 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3685 offsetof(struct test_val
, foo
)),
3688 .fixup_map2
= { 3 },
3689 .errstr_unpriv
= "R0 leaks addr",
3690 .result_unpriv
= REJECT
,
3692 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3695 "valid map access into an array with a variable",
3697 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3698 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3699 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3700 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3701 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3702 BPF_FUNC_map_lookup_elem
),
3703 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
3704 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3705 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 3),
3706 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3707 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3708 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3709 offsetof(struct test_val
, foo
)),
3712 .fixup_map2
= { 3 },
3713 .errstr_unpriv
= "R0 leaks addr",
3714 .result_unpriv
= REJECT
,
3716 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3719 "valid map access into an array with a signed variable",
3721 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3722 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3723 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3724 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3725 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3726 BPF_FUNC_map_lookup_elem
),
3727 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
3728 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3729 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 0xffffffff, 1),
3730 BPF_MOV32_IMM(BPF_REG_1
, 0),
3731 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3732 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3733 BPF_MOV32_IMM(BPF_REG_1
, 0),
3734 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3735 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3736 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3737 offsetof(struct test_val
, foo
)),
3740 .fixup_map2
= { 3 },
3741 .errstr_unpriv
= "R0 leaks addr",
3742 .result_unpriv
= REJECT
,
3744 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3747 "invalid map access into an array with a constant",
3749 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3750 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3751 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3752 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3753 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3754 BPF_FUNC_map_lookup_elem
),
3755 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3756 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, (MAX_ENTRIES
+ 1) << 2,
3757 offsetof(struct test_val
, foo
)),
3760 .fixup_map2
= { 3 },
3761 .errstr
= "invalid access to map value, value_size=48 off=48 size=8",
3765 "invalid map access into an array with a register",
3767 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3768 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3770 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3771 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3772 BPF_FUNC_map_lookup_elem
),
3773 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3774 BPF_MOV64_IMM(BPF_REG_1
, MAX_ENTRIES
+ 1),
3775 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3776 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3777 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3778 offsetof(struct test_val
, foo
)),
3781 .fixup_map2
= { 3 },
3782 .errstr
= "R0 min value is outside of the array range",
3784 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3787 "invalid map access into an array with a variable",
3789 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3790 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3791 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3792 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3793 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3794 BPF_FUNC_map_lookup_elem
),
3795 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
3796 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3797 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
3798 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3799 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3800 offsetof(struct test_val
, foo
)),
3803 .fixup_map2
= { 3 },
3804 .errstr
= "R0 unbounded memory access, make sure to bounds check any array access into a map",
3806 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3809 "invalid map access into an array with no floor check",
3811 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3812 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3813 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3814 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3815 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3816 BPF_FUNC_map_lookup_elem
),
3817 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3818 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
3819 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
3820 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
3821 BPF_MOV32_IMM(BPF_REG_1
, 0),
3822 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3823 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3824 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3825 offsetof(struct test_val
, foo
)),
3828 .fixup_map2
= { 3 },
3829 .errstr_unpriv
= "R0 leaks addr",
3830 .errstr
= "R0 unbounded memory access",
3831 .result_unpriv
= REJECT
,
3833 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3836 "invalid map access into an array with a invalid max check",
3838 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3839 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3840 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3841 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3842 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3843 BPF_FUNC_map_lookup_elem
),
3844 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
3845 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
3846 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
+ 1),
3847 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 1),
3848 BPF_MOV32_IMM(BPF_REG_1
, 0),
3849 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
3850 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
3851 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
3852 offsetof(struct test_val
, foo
)),
3855 .fixup_map2
= { 3 },
3856 .errstr_unpriv
= "R0 leaks addr",
3857 .errstr
= "invalid access to map value, value_size=48 off=44 size=8",
3858 .result_unpriv
= REJECT
,
3860 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3863 "invalid map access into an array with a invalid max check",
3865 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3866 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3867 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3868 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3869 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3870 BPF_FUNC_map_lookup_elem
),
3871 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
3872 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
3873 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
3874 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3875 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3876 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3877 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3878 BPF_FUNC_map_lookup_elem
),
3879 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
3880 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
3881 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_0
,
3882 offsetof(struct test_val
, foo
)),
3885 .fixup_map2
= { 3, 11 },
3886 .errstr_unpriv
= "R0 pointer += pointer",
3887 .errstr
= "R0 invalid mem access 'inv'",
3888 .result_unpriv
= REJECT
,
3890 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
3893 "multiple registers share map_lookup_elem result",
3895 BPF_MOV64_IMM(BPF_REG_1
, 10),
3896 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3897 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3898 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3899 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3900 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3901 BPF_FUNC_map_lookup_elem
),
3902 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3903 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3904 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3907 .fixup_map1
= { 4 },
3909 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3912 "alu ops on ptr_to_map_value_or_null, 1",
3914 BPF_MOV64_IMM(BPF_REG_1
, 10),
3915 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3916 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3917 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3918 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3919 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3920 BPF_FUNC_map_lookup_elem
),
3921 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3922 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, -2),
3923 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 2),
3924 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3925 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3928 .fixup_map1
= { 4 },
3929 .errstr
= "R4 invalid mem access",
3931 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3934 "alu ops on ptr_to_map_value_or_null, 2",
3936 BPF_MOV64_IMM(BPF_REG_1
, 10),
3937 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3938 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3939 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3940 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3941 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3942 BPF_FUNC_map_lookup_elem
),
3943 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3944 BPF_ALU64_IMM(BPF_AND
, BPF_REG_4
, -1),
3945 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3946 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3949 .fixup_map1
= { 4 },
3950 .errstr
= "R4 invalid mem access",
3952 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3955 "alu ops on ptr_to_map_value_or_null, 3",
3957 BPF_MOV64_IMM(BPF_REG_1
, 10),
3958 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3959 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3960 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3961 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3962 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3963 BPF_FUNC_map_lookup_elem
),
3964 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3965 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_4
, 1),
3966 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3967 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3970 .fixup_map1
= { 4 },
3971 .errstr
= "R4 invalid mem access",
3973 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
3976 "invalid memory access with multiple map_lookup_elem calls",
3978 BPF_MOV64_IMM(BPF_REG_1
, 10),
3979 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
3980 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
3981 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
3982 BPF_LD_MAP_FD(BPF_REG_1
, 0),
3983 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
3984 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
3985 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3986 BPF_FUNC_map_lookup_elem
),
3987 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
3988 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
3989 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
3990 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
3991 BPF_FUNC_map_lookup_elem
),
3992 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
3993 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
3996 .fixup_map1
= { 4 },
3998 .errstr
= "R4 !read_ok",
3999 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4002 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4004 BPF_MOV64_IMM(BPF_REG_1
, 10),
4005 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_1
, -8),
4006 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4007 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4008 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4009 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
4010 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_2
),
4011 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4012 BPF_FUNC_map_lookup_elem
),
4013 BPF_MOV64_IMM(BPF_REG_2
, 10),
4014 BPF_JMP_IMM(BPF_JNE
, BPF_REG_2
, 0, 3),
4015 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_8
),
4016 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_7
),
4017 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4018 BPF_FUNC_map_lookup_elem
),
4019 BPF_MOV64_REG(BPF_REG_4
, BPF_REG_0
),
4020 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
4021 BPF_ST_MEM(BPF_DW
, BPF_REG_4
, 0, 0),
4024 .fixup_map1
= { 4 },
4026 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
4029 "invalid map access from else condition",
4031 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4032 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4033 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4034 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4035 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0, BPF_FUNC_map_lookup_elem
),
4036 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4037 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
4038 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
-1, 1),
4039 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 1),
4040 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
4041 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
4042 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, offsetof(struct test_val
, foo
)),
4045 .fixup_map2
= { 3 },
4046 .errstr
= "R0 unbounded memory access",
4048 .errstr_unpriv
= "R0 leaks addr",
4049 .result_unpriv
= REJECT
,
4050 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
4053 "constant register |= constant should keep constant type",
4055 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4056 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4057 BPF_MOV64_IMM(BPF_REG_2
, 34),
4058 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 13),
4059 BPF_MOV64_IMM(BPF_REG_3
, 0),
4060 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4064 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4067 "constant register |= constant should not bypass stack boundary checks",
4069 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4070 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4071 BPF_MOV64_IMM(BPF_REG_2
, 34),
4072 BPF_ALU64_IMM(BPF_OR
, BPF_REG_2
, 24),
4073 BPF_MOV64_IMM(BPF_REG_3
, 0),
4074 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4077 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4079 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4082 "constant register |= constant register should keep constant type",
4084 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4085 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4086 BPF_MOV64_IMM(BPF_REG_2
, 34),
4087 BPF_MOV64_IMM(BPF_REG_4
, 13),
4088 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4089 BPF_MOV64_IMM(BPF_REG_3
, 0),
4090 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4094 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4097 "constant register |= constant register should not bypass stack boundary checks",
4099 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
4100 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -48),
4101 BPF_MOV64_IMM(BPF_REG_2
, 34),
4102 BPF_MOV64_IMM(BPF_REG_4
, 24),
4103 BPF_ALU64_REG(BPF_OR
, BPF_REG_2
, BPF_REG_4
),
4104 BPF_MOV64_IMM(BPF_REG_3
, 0),
4105 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4108 .errstr
= "invalid stack type R1 off=-48 access_size=58",
4110 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4113 "invalid direct packet write for LWT_IN",
4115 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4116 offsetof(struct __sk_buff
, data
)),
4117 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4118 offsetof(struct __sk_buff
, data_end
)),
4119 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4120 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4121 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4122 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4123 BPF_MOV64_IMM(BPF_REG_0
, 0),
4126 .errstr
= "cannot write into packet",
4128 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4131 "invalid direct packet write for LWT_OUT",
4133 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4134 offsetof(struct __sk_buff
, data
)),
4135 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4136 offsetof(struct __sk_buff
, data_end
)),
4137 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4138 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4139 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4140 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4141 BPF_MOV64_IMM(BPF_REG_0
, 0),
4144 .errstr
= "cannot write into packet",
4146 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4149 "direct packet write for LWT_XMIT",
4151 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4152 offsetof(struct __sk_buff
, data
)),
4153 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4154 offsetof(struct __sk_buff
, data_end
)),
4155 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4156 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4157 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4158 BPF_STX_MEM(BPF_B
, BPF_REG_2
, BPF_REG_2
, 0),
4159 BPF_MOV64_IMM(BPF_REG_0
, 0),
4163 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4166 "direct packet read for LWT_IN",
4168 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4169 offsetof(struct __sk_buff
, data
)),
4170 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4171 offsetof(struct __sk_buff
, data_end
)),
4172 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4173 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4174 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4175 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4176 BPF_MOV64_IMM(BPF_REG_0
, 0),
4180 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
4183 "direct packet read for LWT_OUT",
4185 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4186 offsetof(struct __sk_buff
, data
)),
4187 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4188 offsetof(struct __sk_buff
, data_end
)),
4189 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4190 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4191 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4192 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4193 BPF_MOV64_IMM(BPF_REG_0
, 0),
4197 .prog_type
= BPF_PROG_TYPE_LWT_OUT
,
4200 "direct packet read for LWT_XMIT",
4202 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4203 offsetof(struct __sk_buff
, data
)),
4204 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4205 offsetof(struct __sk_buff
, data_end
)),
4206 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4207 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4208 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 1),
4209 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_2
, 0),
4210 BPF_MOV64_IMM(BPF_REG_0
, 0),
4214 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4217 "overlapping checks for direct packet access",
4219 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
4220 offsetof(struct __sk_buff
, data
)),
4221 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
4222 offsetof(struct __sk_buff
, data_end
)),
4223 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
4224 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 8),
4225 BPF_JMP_REG(BPF_JGT
, BPF_REG_0
, BPF_REG_3
, 4),
4226 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
4227 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 6),
4228 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
4229 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_2
, 6),
4230 BPF_MOV64_IMM(BPF_REG_0
, 0),
4234 .prog_type
= BPF_PROG_TYPE_LWT_XMIT
,
4237 "invalid access of tc_classid for LWT_IN",
4239 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4240 offsetof(struct __sk_buff
, tc_classid
)),
4244 .errstr
= "invalid bpf_context access",
4247 "invalid access of tc_classid for LWT_OUT",
4249 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4250 offsetof(struct __sk_buff
, tc_classid
)),
4254 .errstr
= "invalid bpf_context access",
4257 "invalid access of tc_classid for LWT_XMIT",
4259 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
4260 offsetof(struct __sk_buff
, tc_classid
)),
4264 .errstr
= "invalid bpf_context access",
4267 "leak pointer into ctx 1",
4269 BPF_MOV64_IMM(BPF_REG_0
, 0),
4270 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4271 offsetof(struct __sk_buff
, cb
[0])),
4272 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4273 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4274 offsetof(struct __sk_buff
, cb
[0])),
4277 .fixup_map1
= { 2 },
4278 .errstr_unpriv
= "R2 leaks addr into mem",
4279 .result_unpriv
= REJECT
,
4283 "leak pointer into ctx 2",
4285 BPF_MOV64_IMM(BPF_REG_0
, 0),
4286 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
,
4287 offsetof(struct __sk_buff
, cb
[0])),
4288 BPF_STX_XADD(BPF_DW
, BPF_REG_1
, BPF_REG_10
,
4289 offsetof(struct __sk_buff
, cb
[0])),
4292 .errstr_unpriv
= "R10 leaks addr into mem",
4293 .result_unpriv
= REJECT
,
4297 "leak pointer into ctx 3",
4299 BPF_MOV64_IMM(BPF_REG_0
, 0),
4300 BPF_LD_MAP_FD(BPF_REG_2
, 0),
4301 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
,
4302 offsetof(struct __sk_buff
, cb
[0])),
4305 .fixup_map1
= { 1 },
4306 .errstr_unpriv
= "R2 leaks addr into ctx",
4307 .result_unpriv
= REJECT
,
4311 "leak pointer into map val",
4313 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
4314 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
4315 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4316 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4317 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4318 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
4319 BPF_FUNC_map_lookup_elem
),
4320 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 3),
4321 BPF_MOV64_IMM(BPF_REG_3
, 0),
4322 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_3
, 0),
4323 BPF_STX_XADD(BPF_DW
, BPF_REG_0
, BPF_REG_6
, 0),
4324 BPF_MOV64_IMM(BPF_REG_0
, 0),
4327 .fixup_map1
= { 4 },
4328 .errstr_unpriv
= "R6 leaks addr into mem",
4329 .result_unpriv
= REJECT
,
4333 "helper access to map: full range",
4335 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4336 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4337 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4338 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4340 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4341 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4342 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
4343 BPF_MOV64_IMM(BPF_REG_3
, 0),
4344 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4347 .fixup_map2
= { 3 },
4349 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4352 "helper access to map: partial range",
4354 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4355 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4356 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4357 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4358 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4359 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4360 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4361 BPF_MOV64_IMM(BPF_REG_2
, 8),
4362 BPF_MOV64_IMM(BPF_REG_3
, 0),
4363 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4366 .fixup_map2
= { 3 },
4368 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4371 "helper access to map: empty range",
4373 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4374 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4375 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4376 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4377 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4378 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4379 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4380 BPF_MOV64_IMM(BPF_REG_2
, 0),
4381 BPF_MOV64_IMM(BPF_REG_3
, 0),
4382 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4385 .fixup_map2
= { 3 },
4386 .errstr
= "invalid access to map value, value_size=48 off=0 size=0",
4388 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4391 "helper access to map: out-of-bound range",
4393 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4394 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4395 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4397 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4398 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4399 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4400 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
) + 8),
4401 BPF_MOV64_IMM(BPF_REG_3
, 0),
4402 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4405 .fixup_map2
= { 3 },
4406 .errstr
= "invalid access to map value, value_size=48 off=0 size=56",
4408 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4411 "helper access to map: negative range",
4413 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4414 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4415 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4416 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4417 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4418 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4419 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4420 BPF_MOV64_IMM(BPF_REG_2
, -8),
4421 BPF_MOV64_IMM(BPF_REG_3
, 0),
4422 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4425 .fixup_map2
= { 3 },
4426 .errstr
= "R2 min value is negative",
4428 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4431 "helper access to adjusted map (via const imm): full range",
4433 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4434 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4435 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4436 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4437 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4438 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4439 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4440 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4441 offsetof(struct test_val
, foo
)),
4442 BPF_MOV64_IMM(BPF_REG_2
,
4443 sizeof(struct test_val
) -
4444 offsetof(struct test_val
, foo
)),
4445 BPF_MOV64_IMM(BPF_REG_3
, 0),
4446 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4449 .fixup_map2
= { 3 },
4451 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4454 "helper access to adjusted map (via const imm): partial range",
4456 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4457 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4458 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4459 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4460 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4461 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4462 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4463 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4464 offsetof(struct test_val
, foo
)),
4465 BPF_MOV64_IMM(BPF_REG_2
, 8),
4466 BPF_MOV64_IMM(BPF_REG_3
, 0),
4467 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4470 .fixup_map2
= { 3 },
4472 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4475 "helper access to adjusted map (via const imm): empty range",
4477 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4478 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4479 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4480 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4481 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4482 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4483 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4484 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4485 offsetof(struct test_val
, foo
)),
4486 BPF_MOV64_IMM(BPF_REG_2
, 0),
4487 BPF_MOV64_IMM(BPF_REG_3
, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4491 .fixup_map2
= { 3 },
4492 .errstr
= "invalid access to map value, value_size=48 off=4 size=0",
4494 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4497 "helper access to adjusted map (via const imm): out-of-bound range",
4499 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4500 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4501 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4502 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4503 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4504 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4505 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4506 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4507 offsetof(struct test_val
, foo
)),
4508 BPF_MOV64_IMM(BPF_REG_2
,
4509 sizeof(struct test_val
) -
4510 offsetof(struct test_val
, foo
) + 8),
4511 BPF_MOV64_IMM(BPF_REG_3
, 0),
4512 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4515 .fixup_map2
= { 3 },
4516 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4518 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4521 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4523 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4524 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4525 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4526 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4527 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4528 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4529 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4530 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4531 offsetof(struct test_val
, foo
)),
4532 BPF_MOV64_IMM(BPF_REG_2
, -8),
4533 BPF_MOV64_IMM(BPF_REG_3
, 0),
4534 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4537 .fixup_map2
= { 3 },
4538 .errstr
= "R2 min value is negative",
4540 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4543 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4545 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4546 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4547 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4548 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4549 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4550 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
4551 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4552 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
4553 offsetof(struct test_val
, foo
)),
4554 BPF_MOV64_IMM(BPF_REG_2
, -1),
4555 BPF_MOV64_IMM(BPF_REG_3
, 0),
4556 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4559 .fixup_map2
= { 3 },
4560 .errstr
= "R2 min value is negative",
4562 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4565 "helper access to adjusted map (via const reg): full range",
4567 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4569 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4570 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4572 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4573 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4574 BPF_MOV64_IMM(BPF_REG_3
,
4575 offsetof(struct test_val
, foo
)),
4576 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4577 BPF_MOV64_IMM(BPF_REG_2
,
4578 sizeof(struct test_val
) -
4579 offsetof(struct test_val
, foo
)),
4580 BPF_MOV64_IMM(BPF_REG_3
, 0),
4581 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4584 .fixup_map2
= { 3 },
4586 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4589 "helper access to adjusted map (via const reg): partial range",
4591 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4592 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4593 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4594 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4595 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4596 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4597 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4598 BPF_MOV64_IMM(BPF_REG_3
,
4599 offsetof(struct test_val
, foo
)),
4600 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4601 BPF_MOV64_IMM(BPF_REG_2
, 8),
4602 BPF_MOV64_IMM(BPF_REG_3
, 0),
4603 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4606 .fixup_map2
= { 3 },
4608 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4611 "helper access to adjusted map (via const reg): empty range",
4613 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4614 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4615 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4616 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4618 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4619 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4620 BPF_MOV64_IMM(BPF_REG_3
, 0),
4621 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4622 BPF_MOV64_IMM(BPF_REG_2
, 0),
4623 BPF_MOV64_IMM(BPF_REG_3
, 0),
4624 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4627 .fixup_map2
= { 3 },
4628 .errstr
= "R1 min value is outside of the array range",
4630 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4633 "helper access to adjusted map (via const reg): out-of-bound range",
4635 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4636 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4637 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4638 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4639 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4640 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4641 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4642 BPF_MOV64_IMM(BPF_REG_3
,
4643 offsetof(struct test_val
, foo
)),
4644 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4645 BPF_MOV64_IMM(BPF_REG_2
,
4646 sizeof(struct test_val
) -
4647 offsetof(struct test_val
, foo
) + 8),
4648 BPF_MOV64_IMM(BPF_REG_3
, 0),
4649 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4652 .fixup_map2
= { 3 },
4653 .errstr
= "invalid access to map value, value_size=48 off=4 size=52",
4655 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4658 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4660 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4661 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4662 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4663 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4664 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4665 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4666 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4667 BPF_MOV64_IMM(BPF_REG_3
,
4668 offsetof(struct test_val
, foo
)),
4669 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4670 BPF_MOV64_IMM(BPF_REG_2
, -8),
4671 BPF_MOV64_IMM(BPF_REG_3
, 0),
4672 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4675 .fixup_map2
= { 3 },
4676 .errstr
= "R2 min value is negative",
4678 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4681 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4683 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4684 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4685 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4686 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4688 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4689 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4690 BPF_MOV64_IMM(BPF_REG_3
,
4691 offsetof(struct test_val
, foo
)),
4692 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4693 BPF_MOV64_IMM(BPF_REG_2
, -1),
4694 BPF_MOV64_IMM(BPF_REG_3
, 0),
4695 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4698 .fixup_map2
= { 3 },
4699 .errstr
= "R2 min value is negative",
4701 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4704 "helper access to adjusted map (via variable): full range",
4706 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4707 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4708 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4709 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4710 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4711 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4712 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4713 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4714 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4715 offsetof(struct test_val
, foo
), 4),
4716 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4717 BPF_MOV64_IMM(BPF_REG_2
,
4718 sizeof(struct test_val
) -
4719 offsetof(struct test_val
, foo
)),
4720 BPF_MOV64_IMM(BPF_REG_3
, 0),
4721 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4724 .fixup_map2
= { 3 },
4726 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4729 "helper access to adjusted map (via variable): partial range",
4731 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4732 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4733 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4734 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4735 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4736 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4737 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4738 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4739 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4740 offsetof(struct test_val
, foo
), 4),
4741 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4742 BPF_MOV64_IMM(BPF_REG_2
, 8),
4743 BPF_MOV64_IMM(BPF_REG_3
, 0),
4744 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4747 .fixup_map2
= { 3 },
4749 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4752 "helper access to adjusted map (via variable): empty range",
4754 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4755 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4756 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4757 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4759 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4760 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4761 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4762 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4763 offsetof(struct test_val
, foo
), 4),
4764 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4765 BPF_MOV64_IMM(BPF_REG_2
, 0),
4766 BPF_MOV64_IMM(BPF_REG_3
, 0),
4767 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4770 .fixup_map2
= { 3 },
4771 .errstr
= "R1 min value is outside of the array range",
4773 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4776 "helper access to adjusted map (via variable): no max check",
4778 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4779 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4780 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4781 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4782 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4783 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
4784 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4785 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4786 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4787 BPF_MOV64_IMM(BPF_REG_2
, 1),
4788 BPF_MOV64_IMM(BPF_REG_3
, 0),
4789 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4792 .fixup_map2
= { 3 },
4793 .errstr
= "R1 unbounded memory access",
4795 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4798 "helper access to adjusted map (via variable): wrong max check",
4800 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4801 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4802 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4803 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4805 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
4806 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4807 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4808 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
,
4809 offsetof(struct test_val
, foo
), 4),
4810 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4811 BPF_MOV64_IMM(BPF_REG_2
,
4812 sizeof(struct test_val
) -
4813 offsetof(struct test_val
, foo
) + 1),
4814 BPF_MOV64_IMM(BPF_REG_3
, 0),
4815 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
4818 .fixup_map2
= { 3 },
4819 .errstr
= "invalid access to map value, value_size=48 off=4 size=45",
4821 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4824 "helper access to map: bounds check using <, good access",
4826 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4827 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4828 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4829 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4831 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4832 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4833 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4834 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 2),
4835 BPF_MOV64_IMM(BPF_REG_0
, 0),
4837 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4838 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4839 BPF_MOV64_IMM(BPF_REG_0
, 0),
4842 .fixup_map2
= { 3 },
4844 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4847 "helper access to map: bounds check using <, bad access",
4849 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4850 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4851 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4852 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4853 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4854 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4855 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4856 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4857 BPF_JMP_IMM(BPF_JLT
, BPF_REG_3
, 32, 4),
4858 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4859 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4860 BPF_MOV64_IMM(BPF_REG_0
, 0),
4862 BPF_MOV64_IMM(BPF_REG_0
, 0),
4865 .fixup_map2
= { 3 },
4867 .errstr
= "R1 unbounded memory access",
4868 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4871 "helper access to map: bounds check using <=, good access",
4873 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4874 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4875 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4876 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4877 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4878 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4879 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4880 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4881 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 2),
4882 BPF_MOV64_IMM(BPF_REG_0
, 0),
4884 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4885 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4886 BPF_MOV64_IMM(BPF_REG_0
, 0),
4889 .fixup_map2
= { 3 },
4891 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4894 "helper access to map: bounds check using <=, bad access",
4896 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4897 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4898 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4899 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4900 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4901 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4902 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4903 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4904 BPF_JMP_IMM(BPF_JLE
, BPF_REG_3
, 32, 4),
4905 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4906 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4907 BPF_MOV64_IMM(BPF_REG_0
, 0),
4909 BPF_MOV64_IMM(BPF_REG_0
, 0),
4912 .fixup_map2
= { 3 },
4914 .errstr
= "R1 unbounded memory access",
4915 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4918 "helper access to map: bounds check using s<, good access",
4920 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4921 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4922 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4925 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4926 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4927 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4928 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4929 BPF_MOV64_IMM(BPF_REG_0
, 0),
4931 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 0, -3),
4932 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4933 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4934 BPF_MOV64_IMM(BPF_REG_0
, 0),
4937 .fixup_map2
= { 3 },
4939 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4942 "helper access to map: bounds check using s<, good access 2",
4944 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4945 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4946 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4947 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4949 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4950 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4951 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
4952 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4953 BPF_MOV64_IMM(BPF_REG_0
, 0),
4955 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4956 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4957 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4958 BPF_MOV64_IMM(BPF_REG_0
, 0),
4961 .fixup_map2
= { 3 },
4963 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4966 "helper access to map: bounds check using s<, bad access",
4968 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4969 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4970 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4973 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4974 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
4975 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
4976 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, 32, 2),
4977 BPF_MOV64_IMM(BPF_REG_0
, 0),
4979 BPF_JMP_IMM(BPF_JSLT
, BPF_REG_3
, -3, -3),
4980 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
4981 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
4982 BPF_MOV64_IMM(BPF_REG_0
, 0),
4985 .fixup_map2
= { 3 },
4987 .errstr
= "R1 min value is negative",
4988 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
4991 "helper access to map: bounds check using s<=, good access",
4993 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
4994 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
4995 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
4996 BPF_LD_MAP_FD(BPF_REG_1
, 0),
4997 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
4998 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
4999 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5000 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5001 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5002 BPF_MOV64_IMM(BPF_REG_0
, 0),
5004 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 0, -3),
5005 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5006 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5007 BPF_MOV64_IMM(BPF_REG_0
, 0),
5010 .fixup_map2
= { 3 },
5012 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5015 "helper access to map: bounds check using s<=, good access 2",
5017 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5018 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5019 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5020 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5021 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5022 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5023 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5024 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_0
, 0),
5025 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5026 BPF_MOV64_IMM(BPF_REG_0
, 0),
5028 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5029 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5030 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5031 BPF_MOV64_IMM(BPF_REG_0
, 0),
5034 .fixup_map2
= { 3 },
5036 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5039 "helper access to map: bounds check using s<=, bad access",
5041 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5042 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5043 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5044 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5045 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5046 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5047 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5048 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_0
, 0),
5049 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, 32, 2),
5050 BPF_MOV64_IMM(BPF_REG_0
, 0),
5052 BPF_JMP_IMM(BPF_JSLE
, BPF_REG_3
, -3, -3),
5053 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_3
),
5054 BPF_ST_MEM(BPF_B
, BPF_REG_1
, 0, 0),
5055 BPF_MOV64_IMM(BPF_REG_0
, 0),
5058 .fixup_map2
= { 3 },
5060 .errstr
= "R1 min value is negative",
5061 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5064 "map element value is preserved across register spilling",
5066 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5067 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5068 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5069 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5070 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5071 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5072 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5073 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5074 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5075 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5076 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5077 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5080 .fixup_map2
= { 3 },
5081 .errstr_unpriv
= "R0 leaks addr",
5083 .result_unpriv
= REJECT
,
5086 "map element value or null is marked on register spilling",
5088 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5089 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5090 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5091 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5092 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5093 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5094 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -152),
5095 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5096 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5097 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5098 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5101 .fixup_map2
= { 3 },
5102 .errstr_unpriv
= "R0 leaks addr",
5104 .result_unpriv
= REJECT
,
5107 "map element value store of cleared call register",
5109 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5110 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5111 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5112 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5113 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5114 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 1),
5115 BPF_STX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, 0),
5118 .fixup_map2
= { 3 },
5119 .errstr_unpriv
= "R1 !read_ok",
5120 .errstr
= "R1 !read_ok",
5122 .result_unpriv
= REJECT
,
5125 "map element value with unaligned store",
5127 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5128 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5129 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5130 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5131 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5132 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 17),
5133 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5134 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5135 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 2, 43),
5136 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, -2, 44),
5137 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5138 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 32),
5139 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 2, 33),
5140 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -2, 34),
5141 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_8
, 5),
5142 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 0, 22),
5143 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, 4, 23),
5144 BPF_ST_MEM(BPF_DW
, BPF_REG_8
, -7, 24),
5145 BPF_MOV64_REG(BPF_REG_7
, BPF_REG_8
),
5146 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_7
, 3),
5147 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 0, 22),
5148 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, 4, 23),
5149 BPF_ST_MEM(BPF_DW
, BPF_REG_7
, -4, 24),
5152 .fixup_map2
= { 3 },
5153 .errstr_unpriv
= "R0 leaks addr",
5155 .result_unpriv
= REJECT
,
5156 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5159 "map element value with unaligned load",
5161 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5162 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5163 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5164 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5165 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5166 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5167 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5168 BPF_JMP_IMM(BPF_JGE
, BPF_REG_1
, MAX_ENTRIES
, 9),
5169 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 3),
5170 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5171 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 2),
5172 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_0
),
5173 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 0),
5174 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_8
, 2),
5175 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 5),
5176 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 0),
5177 BPF_LDX_MEM(BPF_DW
, BPF_REG_7
, BPF_REG_0
, 4),
5180 .fixup_map2
= { 3 },
5181 .errstr_unpriv
= "R0 leaks addr",
5183 .result_unpriv
= REJECT
,
5184 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5187 "map element value illegal alu op, 1",
5189 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5190 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5191 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5192 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5193 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5194 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5195 BPF_ALU64_IMM(BPF_AND
, BPF_REG_0
, 8),
5196 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5199 .fixup_map2
= { 3 },
5200 .errstr_unpriv
= "R0 bitwise operator &= on pointer",
5201 .errstr
= "invalid mem access 'inv'",
5203 .result_unpriv
= REJECT
,
5206 "map element value illegal alu op, 2",
5208 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5209 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5210 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5211 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5212 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5213 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5214 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_0
, 0),
5215 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5218 .fixup_map2
= { 3 },
5219 .errstr_unpriv
= "R0 32-bit pointer arithmetic prohibited",
5220 .errstr
= "invalid mem access 'inv'",
5222 .result_unpriv
= REJECT
,
5225 "map element value illegal alu op, 3",
5227 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5228 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5229 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5230 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5231 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5232 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5233 BPF_ALU64_IMM(BPF_DIV
, BPF_REG_0
, 42),
5234 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5237 .fixup_map2
= { 3 },
5238 .errstr_unpriv
= "R0 pointer arithmetic with /= operator",
5239 .errstr
= "invalid mem access 'inv'",
5241 .result_unpriv
= REJECT
,
5244 "map element value illegal alu op, 4",
5246 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5247 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5248 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5249 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5250 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5251 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
5252 BPF_ENDIAN(BPF_FROM_BE
, BPF_REG_0
, 64),
5253 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5256 .fixup_map2
= { 3 },
5257 .errstr_unpriv
= "R0 pointer arithmetic prohibited",
5258 .errstr
= "invalid mem access 'inv'",
5260 .result_unpriv
= REJECT
,
5263 "map element value illegal alu op, 5",
5265 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5266 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5267 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5268 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5269 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5270 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5271 BPF_MOV64_IMM(BPF_REG_3
, 4096),
5272 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5273 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5274 BPF_STX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_0
, 0),
5275 BPF_STX_XADD(BPF_DW
, BPF_REG_2
, BPF_REG_3
, 0),
5276 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_2
, 0),
5277 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 22),
5280 .fixup_map2
= { 3 },
5281 .errstr
= "R0 invalid mem access 'inv'",
5285 "map element value is preserved across register spilling",
5287 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5288 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5289 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5290 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5291 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5292 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
5293 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
,
5294 offsetof(struct test_val
, foo
)),
5295 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0, 42),
5296 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5297 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -184),
5298 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
5299 BPF_LDX_MEM(BPF_DW
, BPF_REG_3
, BPF_REG_1
, 0),
5300 BPF_ST_MEM(BPF_DW
, BPF_REG_3
, 0, 42),
5303 .fixup_map2
= { 3 },
5304 .errstr_unpriv
= "R0 leaks addr",
5306 .result_unpriv
= REJECT
,
5307 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5310 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5312 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5313 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5314 BPF_MOV64_IMM(BPF_REG_0
, 0),
5315 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5316 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5317 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5318 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5319 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5320 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5321 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5322 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5323 BPF_MOV64_IMM(BPF_REG_2
, 16),
5324 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5325 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5326 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5327 BPF_MOV64_IMM(BPF_REG_4
, 0),
5328 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5329 BPF_MOV64_IMM(BPF_REG_3
, 0),
5330 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5331 BPF_MOV64_IMM(BPF_REG_0
, 0),
5335 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5338 "helper access to variable memory: stack, bitwise AND, zero included",
5340 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5341 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5342 BPF_MOV64_IMM(BPF_REG_2
, 16),
5343 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5344 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5345 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5346 BPF_MOV64_IMM(BPF_REG_3
, 0),
5347 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5350 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5352 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5355 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5357 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5358 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5359 BPF_MOV64_IMM(BPF_REG_2
, 16),
5360 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5361 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5362 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 65),
5363 BPF_MOV64_IMM(BPF_REG_4
, 0),
5364 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5365 BPF_MOV64_IMM(BPF_REG_3
, 0),
5366 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5367 BPF_MOV64_IMM(BPF_REG_0
, 0),
5370 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5372 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5375 "helper access to variable memory: stack, JMP, correct bounds",
5377 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5378 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5379 BPF_MOV64_IMM(BPF_REG_0
, 0),
5380 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5381 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5382 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5383 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5384 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5385 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5386 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5387 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5388 BPF_MOV64_IMM(BPF_REG_2
, 16),
5389 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5390 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5391 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 4),
5392 BPF_MOV64_IMM(BPF_REG_4
, 0),
5393 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5394 BPF_MOV64_IMM(BPF_REG_3
, 0),
5395 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5396 BPF_MOV64_IMM(BPF_REG_0
, 0),
5400 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5403 "helper access to variable memory: stack, JMP (signed), correct bounds",
5405 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5406 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5407 BPF_MOV64_IMM(BPF_REG_0
, 0),
5408 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5409 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5410 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5411 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5412 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5413 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5414 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5415 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5416 BPF_MOV64_IMM(BPF_REG_2
, 16),
5417 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5418 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5419 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 4),
5420 BPF_MOV64_IMM(BPF_REG_4
, 0),
5421 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5422 BPF_MOV64_IMM(BPF_REG_3
, 0),
5423 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5424 BPF_MOV64_IMM(BPF_REG_0
, 0),
5428 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5431 "helper access to variable memory: stack, JMP, bounds + offset",
5433 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5434 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5435 BPF_MOV64_IMM(BPF_REG_2
, 16),
5436 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5437 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5438 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 5),
5439 BPF_MOV64_IMM(BPF_REG_4
, 0),
5440 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 3),
5441 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5442 BPF_MOV64_IMM(BPF_REG_3
, 0),
5443 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5444 BPF_MOV64_IMM(BPF_REG_0
, 0),
5447 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5449 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5452 "helper access to variable memory: stack, JMP, wrong max",
5454 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5455 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5456 BPF_MOV64_IMM(BPF_REG_2
, 16),
5457 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5458 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5459 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 65, 4),
5460 BPF_MOV64_IMM(BPF_REG_4
, 0),
5461 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5462 BPF_MOV64_IMM(BPF_REG_3
, 0),
5463 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5464 BPF_MOV64_IMM(BPF_REG_0
, 0),
5467 .errstr
= "invalid stack type R1 off=-64 access_size=65",
5469 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5472 "helper access to variable memory: stack, JMP, no max check",
5474 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5475 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5476 BPF_MOV64_IMM(BPF_REG_2
, 16),
5477 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5478 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5479 BPF_MOV64_IMM(BPF_REG_4
, 0),
5480 BPF_JMP_REG(BPF_JGE
, BPF_REG_4
, BPF_REG_2
, 2),
5481 BPF_MOV64_IMM(BPF_REG_3
, 0),
5482 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5483 BPF_MOV64_IMM(BPF_REG_0
, 0),
5486 /* because max wasn't checked, signed min is negative */
5487 .errstr
= "R2 min value is negative, either use unsigned or 'var &= const'",
5489 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5492 "helper access to variable memory: stack, JMP, no min check",
5494 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5495 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5496 BPF_MOV64_IMM(BPF_REG_2
, 16),
5497 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5498 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5499 BPF_JMP_IMM(BPF_JGT
, BPF_REG_2
, 64, 3),
5500 BPF_MOV64_IMM(BPF_REG_3
, 0),
5501 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5502 BPF_MOV64_IMM(BPF_REG_0
, 0),
5505 .errstr
= "invalid stack type R1 off=-64 access_size=0",
5507 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5510 "helper access to variable memory: stack, JMP (signed), no min check",
5512 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5513 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5514 BPF_MOV64_IMM(BPF_REG_2
, 16),
5515 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, -128),
5516 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_1
, -128),
5517 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
, 64, 3),
5518 BPF_MOV64_IMM(BPF_REG_3
, 0),
5519 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5520 BPF_MOV64_IMM(BPF_REG_0
, 0),
5523 .errstr
= "R2 min value is negative",
5525 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5528 "helper access to variable memory: map, JMP, correct bounds",
5530 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5531 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5532 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5533 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5534 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5535 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5536 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5537 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5538 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5539 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5540 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5541 sizeof(struct test_val
), 4),
5542 BPF_MOV64_IMM(BPF_REG_4
, 0),
5543 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5544 BPF_MOV64_IMM(BPF_REG_3
, 0),
5545 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5546 BPF_MOV64_IMM(BPF_REG_0
, 0),
5549 .fixup_map2
= { 3 },
5551 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5554 "helper access to variable memory: map, JMP, wrong max",
5556 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5557 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5558 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5559 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5560 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5561 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
5562 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5563 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5564 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5565 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5566 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5567 sizeof(struct test_val
) + 1, 4),
5568 BPF_MOV64_IMM(BPF_REG_4
, 0),
5569 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5570 BPF_MOV64_IMM(BPF_REG_3
, 0),
5571 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5572 BPF_MOV64_IMM(BPF_REG_0
, 0),
5575 .fixup_map2
= { 3 },
5576 .errstr
= "invalid access to map value, value_size=48 off=0 size=49",
5578 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5581 "helper access to variable memory: map adjusted, JMP, correct bounds",
5583 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5584 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5585 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5586 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5587 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5588 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5589 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5590 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5591 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5592 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5593 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5594 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5595 sizeof(struct test_val
) - 20, 4),
5596 BPF_MOV64_IMM(BPF_REG_4
, 0),
5597 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5598 BPF_MOV64_IMM(BPF_REG_3
, 0),
5599 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5600 BPF_MOV64_IMM(BPF_REG_0
, 0),
5603 .fixup_map2
= { 3 },
5605 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5608 "helper access to variable memory: map adjusted, JMP, wrong max",
5610 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5611 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5612 BPF_ST_MEM(BPF_DW
, BPF_REG_2
, 0, 0),
5613 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5614 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem
),
5615 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 11),
5616 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5617 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 20),
5618 BPF_MOV64_IMM(BPF_REG_2
, sizeof(struct test_val
)),
5619 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5620 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5621 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_2
,
5622 sizeof(struct test_val
) - 19, 4),
5623 BPF_MOV64_IMM(BPF_REG_4
, 0),
5624 BPF_JMP_REG(BPF_JSGE
, BPF_REG_4
, BPF_REG_2
, 2),
5625 BPF_MOV64_IMM(BPF_REG_3
, 0),
5626 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5627 BPF_MOV64_IMM(BPF_REG_0
, 0),
5630 .fixup_map2
= { 3 },
5631 .errstr
= "R1 min value is outside of the array range",
5633 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5636 "helper access to variable memory: size = 0 allowed on NULL",
5638 BPF_MOV64_IMM(BPF_REG_1
, 0),
5639 BPF_MOV64_IMM(BPF_REG_2
, 0),
5640 BPF_MOV64_IMM(BPF_REG_3
, 0),
5641 BPF_MOV64_IMM(BPF_REG_4
, 0),
5642 BPF_MOV64_IMM(BPF_REG_5
, 0),
5643 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5647 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5650 "helper access to variable memory: size > 0 not allowed on NULL",
5652 BPF_MOV64_IMM(BPF_REG_1
, 0),
5653 BPF_MOV64_IMM(BPF_REG_2
, 0),
5654 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5655 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5656 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 64),
5657 BPF_MOV64_IMM(BPF_REG_3
, 0),
5658 BPF_MOV64_IMM(BPF_REG_4
, 0),
5659 BPF_MOV64_IMM(BPF_REG_5
, 0),
5660 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5663 .errstr
= "R1 type=inv expected=fp",
5665 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5668 "helper access to variable memory: size = 0 not allowed on != NULL",
5670 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5671 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -8),
5672 BPF_MOV64_IMM(BPF_REG_2
, 0),
5673 BPF_STX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_2
, 0),
5674 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 8),
5675 BPF_MOV64_IMM(BPF_REG_3
, 0),
5676 BPF_MOV64_IMM(BPF_REG_4
, 0),
5677 BPF_MOV64_IMM(BPF_REG_5
, 0),
5678 BPF_EMIT_CALL(BPF_FUNC_csum_diff
),
5681 .errstr
= "invalid stack type R1 off=-8 access_size=0",
5683 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
5686 "helper access to variable memory: 8 bytes leak",
5688 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5689 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5690 BPF_MOV64_IMM(BPF_REG_0
, 0),
5691 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5692 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5693 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5694 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5695 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5696 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5697 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5698 BPF_MOV64_IMM(BPF_REG_2
, 0),
5699 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_2
, -128),
5700 BPF_LDX_MEM(BPF_DW
, BPF_REG_2
, BPF_REG_10
, -128),
5701 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 63),
5702 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 1),
5703 BPF_MOV64_IMM(BPF_REG_3
, 0),
5704 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5705 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5708 .errstr
= "invalid indirect read from stack off -64+32 size 64",
5710 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5713 "helper access to variable memory: 8 bytes no leak (init memory)",
5715 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
5716 BPF_MOV64_IMM(BPF_REG_0
, 0),
5717 BPF_MOV64_IMM(BPF_REG_0
, 0),
5718 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -64),
5719 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -56),
5720 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -48),
5721 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -40),
5722 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -32),
5723 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -24),
5724 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -16),
5725 BPF_STX_MEM(BPF_DW
, BPF_REG_10
, BPF_REG_0
, -8),
5726 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, -64),
5727 BPF_MOV64_IMM(BPF_REG_2
, 0),
5728 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 32),
5729 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, 32),
5730 BPF_MOV64_IMM(BPF_REG_3
, 0),
5731 BPF_EMIT_CALL(BPF_FUNC_probe_read
),
5732 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
5736 .prog_type
= BPF_PROG_TYPE_TRACEPOINT
,
5739 "invalid and of negative number",
5741 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5742 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5743 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5744 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5745 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5746 BPF_FUNC_map_lookup_elem
),
5747 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
5748 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
5749 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, -4),
5750 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_1
, 2),
5751 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
5752 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
5753 offsetof(struct test_val
, foo
)),
5756 .fixup_map2
= { 3 },
5757 .errstr
= "R0 max value is outside of the array range",
5759 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5762 "invalid range check",
5764 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
5765 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5766 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
5767 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5768 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5769 BPF_FUNC_map_lookup_elem
),
5770 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 12),
5771 BPF_LDX_MEM(BPF_W
, BPF_REG_1
, BPF_REG_0
, 0),
5772 BPF_MOV64_IMM(BPF_REG_9
, 1),
5773 BPF_ALU32_IMM(BPF_MOD
, BPF_REG_1
, 2),
5774 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 1),
5775 BPF_ALU32_REG(BPF_AND
, BPF_REG_9
, BPF_REG_1
),
5776 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_9
, 1),
5777 BPF_ALU32_IMM(BPF_RSH
, BPF_REG_9
, 1),
5778 BPF_MOV32_IMM(BPF_REG_3
, 1),
5779 BPF_ALU32_REG(BPF_SUB
, BPF_REG_3
, BPF_REG_9
),
5780 BPF_ALU32_IMM(BPF_MUL
, BPF_REG_3
, 0x10000000),
5781 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_3
),
5782 BPF_STX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_3
, 0),
5783 BPF_MOV64_REG(BPF_REG_0
, 0),
5786 .fixup_map2
= { 3 },
5787 .errstr
= "R0 max value is outside of the array range",
5789 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
5792 "map in map access",
5794 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5795 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5796 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5797 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5798 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5799 BPF_FUNC_map_lookup_elem
),
5800 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
5801 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5802 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5803 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5804 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5805 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5806 BPF_FUNC_map_lookup_elem
),
5807 BPF_MOV64_REG(BPF_REG_0
, 0),
5810 .fixup_map_in_map
= { 3 },
5814 "invalid inner map pointer",
5816 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5817 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5818 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5819 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5820 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5821 BPF_FUNC_map_lookup_elem
),
5822 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
5823 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5824 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5825 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5826 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5827 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
5828 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5829 BPF_FUNC_map_lookup_elem
),
5830 BPF_MOV64_REG(BPF_REG_0
, 0),
5833 .fixup_map_in_map
= { 3 },
5834 .errstr
= "R1 type=inv expected=map_ptr",
5835 .errstr_unpriv
= "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5839 "forgot null checking on the inner map pointer",
5841 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5842 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5843 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5844 BPF_LD_MAP_FD(BPF_REG_1
, 0),
5845 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5846 BPF_FUNC_map_lookup_elem
),
5847 BPF_ST_MEM(0, BPF_REG_10
, -4, 0),
5848 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
5849 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -4),
5850 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_0
),
5851 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
5852 BPF_FUNC_map_lookup_elem
),
5853 BPF_MOV64_REG(BPF_REG_0
, 0),
5856 .fixup_map_in_map
= { 3 },
5857 .errstr
= "R1 type=map_value_or_null expected=map_ptr",
5861 "ld_abs: check calling conv, r1",
5863 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5864 BPF_MOV64_IMM(BPF_REG_1
, 0),
5865 BPF_LD_ABS(BPF_W
, -0x200000),
5866 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5869 .errstr
= "R1 !read_ok",
5873 "ld_abs: check calling conv, r2",
5875 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5876 BPF_MOV64_IMM(BPF_REG_2
, 0),
5877 BPF_LD_ABS(BPF_W
, -0x200000),
5878 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5881 .errstr
= "R2 !read_ok",
5885 "ld_abs: check calling conv, r3",
5887 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5888 BPF_MOV64_IMM(BPF_REG_3
, 0),
5889 BPF_LD_ABS(BPF_W
, -0x200000),
5890 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5893 .errstr
= "R3 !read_ok",
5897 "ld_abs: check calling conv, r4",
5899 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5900 BPF_MOV64_IMM(BPF_REG_4
, 0),
5901 BPF_LD_ABS(BPF_W
, -0x200000),
5902 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5905 .errstr
= "R4 !read_ok",
5909 "ld_abs: check calling conv, r5",
5911 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5912 BPF_MOV64_IMM(BPF_REG_5
, 0),
5913 BPF_LD_ABS(BPF_W
, -0x200000),
5914 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5917 .errstr
= "R5 !read_ok",
5921 "ld_abs: check calling conv, r7",
5923 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5924 BPF_MOV64_IMM(BPF_REG_7
, 0),
5925 BPF_LD_ABS(BPF_W
, -0x200000),
5926 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
5932 "ld_ind: check calling conv, r1",
5934 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5935 BPF_MOV64_IMM(BPF_REG_1
, 1),
5936 BPF_LD_IND(BPF_W
, BPF_REG_1
, -0x200000),
5937 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_1
),
5940 .errstr
= "R1 !read_ok",
5944 "ld_ind: check calling conv, r2",
5946 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5947 BPF_MOV64_IMM(BPF_REG_2
, 1),
5948 BPF_LD_IND(BPF_W
, BPF_REG_2
, -0x200000),
5949 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_2
),
5952 .errstr
= "R2 !read_ok",
5956 "ld_ind: check calling conv, r3",
5958 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5959 BPF_MOV64_IMM(BPF_REG_3
, 1),
5960 BPF_LD_IND(BPF_W
, BPF_REG_3
, -0x200000),
5961 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_3
),
5964 .errstr
= "R3 !read_ok",
5968 "ld_ind: check calling conv, r4",
5970 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5971 BPF_MOV64_IMM(BPF_REG_4
, 1),
5972 BPF_LD_IND(BPF_W
, BPF_REG_4
, -0x200000),
5973 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_4
),
5976 .errstr
= "R4 !read_ok",
5980 "ld_ind: check calling conv, r5",
5982 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5983 BPF_MOV64_IMM(BPF_REG_5
, 1),
5984 BPF_LD_IND(BPF_W
, BPF_REG_5
, -0x200000),
5985 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_5
),
5988 .errstr
= "R5 !read_ok",
5992 "ld_ind: check calling conv, r7",
5994 BPF_MOV64_REG(BPF_REG_6
, BPF_REG_1
),
5995 BPF_MOV64_IMM(BPF_REG_7
, 1),
5996 BPF_LD_IND(BPF_W
, BPF_REG_7
, -0x200000),
5997 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_7
),
6003 "check bpf_perf_event_data->sample_period byte load permitted",
6005 BPF_MOV64_IMM(BPF_REG_0
, 0),
6006 #if __BYTE_ORDER == __LITTLE_ENDIAN
6007 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6008 offsetof(struct bpf_perf_event_data
, sample_period
)),
6010 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_1
,
6011 offsetof(struct bpf_perf_event_data
, sample_period
) + 7),
6016 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6019 "check bpf_perf_event_data->sample_period half load permitted",
6021 BPF_MOV64_IMM(BPF_REG_0
, 0),
6022 #if __BYTE_ORDER == __LITTLE_ENDIAN
6023 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6024 offsetof(struct bpf_perf_event_data
, sample_period
)),
6026 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6027 offsetof(struct bpf_perf_event_data
, sample_period
) + 6),
6032 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6035 "check bpf_perf_event_data->sample_period word load permitted",
6037 BPF_MOV64_IMM(BPF_REG_0
, 0),
6038 #if __BYTE_ORDER == __LITTLE_ENDIAN
6039 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6040 offsetof(struct bpf_perf_event_data
, sample_period
)),
6042 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
6043 offsetof(struct bpf_perf_event_data
, sample_period
) + 4),
6048 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6051 "check bpf_perf_event_data->sample_period dword load permitted",
6053 BPF_MOV64_IMM(BPF_REG_0
, 0),
6054 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
,
6055 offsetof(struct bpf_perf_event_data
, sample_period
)),
6059 .prog_type
= BPF_PROG_TYPE_PERF_EVENT
,
6062 "check skb->data half load not permitted",
6064 BPF_MOV64_IMM(BPF_REG_0
, 0),
6065 #if __BYTE_ORDER == __LITTLE_ENDIAN
6066 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6067 offsetof(struct __sk_buff
, data
)),
6069 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6070 offsetof(struct __sk_buff
, data
) + 2),
6075 .errstr
= "invalid bpf_context access",
6078 "check skb->tc_classid half load not permitted for lwt prog",
6080 BPF_MOV64_IMM(BPF_REG_0
, 0),
6081 #if __BYTE_ORDER == __LITTLE_ENDIAN
6082 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6083 offsetof(struct __sk_buff
, tc_classid
)),
6085 BPF_LDX_MEM(BPF_H
, BPF_REG_0
, BPF_REG_1
,
6086 offsetof(struct __sk_buff
, tc_classid
) + 2),
6091 .errstr
= "invalid bpf_context access",
6092 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
6095 "bounds checks mixing signed and unsigned, positive bounds",
6097 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6098 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6099 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6100 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6101 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6102 BPF_FUNC_map_lookup_elem
),
6103 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6104 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6105 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6106 BPF_MOV64_IMM(BPF_REG_2
, 2),
6107 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 3),
6108 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 4, 2),
6109 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6110 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6111 BPF_MOV64_IMM(BPF_REG_0
, 0),
6114 .fixup_map1
= { 3 },
6115 .errstr
= "unbounded min value",
6119 "bounds checks mixing signed and unsigned",
6121 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6122 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6123 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6124 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6125 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6126 BPF_FUNC_map_lookup_elem
),
6127 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6128 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6129 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6130 BPF_MOV64_IMM(BPF_REG_2
, -1),
6131 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6132 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6133 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6134 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6135 BPF_MOV64_IMM(BPF_REG_0
, 0),
6138 .fixup_map1
= { 3 },
6139 .errstr
= "unbounded min value",
6143 "bounds checks mixing signed and unsigned, variant 2",
6145 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6146 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6147 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6148 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6149 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6150 BPF_FUNC_map_lookup_elem
),
6151 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6152 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6153 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6154 BPF_MOV64_IMM(BPF_REG_2
, -1),
6155 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6156 BPF_MOV64_IMM(BPF_REG_8
, 0),
6157 BPF_ALU64_REG(BPF_ADD
, BPF_REG_8
, BPF_REG_1
),
6158 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6159 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6160 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6161 BPF_MOV64_IMM(BPF_REG_0
, 0),
6164 .fixup_map1
= { 3 },
6165 .errstr
= "unbounded min value",
6169 "bounds checks mixing signed and unsigned, variant 3",
6171 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6172 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6173 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6174 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6175 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6176 BPF_FUNC_map_lookup_elem
),
6177 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6178 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6179 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6180 BPF_MOV64_IMM(BPF_REG_2
, -1),
6181 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 4),
6182 BPF_MOV64_REG(BPF_REG_8
, BPF_REG_1
),
6183 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_8
, 1, 2),
6184 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_8
),
6185 BPF_ST_MEM(BPF_B
, BPF_REG_8
, 0, 0),
6186 BPF_MOV64_IMM(BPF_REG_0
, 0),
6189 .fixup_map1
= { 3 },
6190 .errstr
= "unbounded min value",
6194 "bounds checks mixing signed and unsigned, variant 4",
6196 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6197 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6198 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6199 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6200 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6201 BPF_FUNC_map_lookup_elem
),
6202 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6203 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6204 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6205 BPF_MOV64_IMM(BPF_REG_2
, 1),
6206 BPF_ALU64_REG(BPF_AND
, BPF_REG_1
, BPF_REG_2
),
6207 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6208 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6209 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6210 BPF_MOV64_IMM(BPF_REG_0
, 0),
6213 .fixup_map1
= { 3 },
6217 "bounds checks mixing signed and unsigned, variant 5",
6219 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6220 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6221 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6222 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6223 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6224 BPF_FUNC_map_lookup_elem
),
6225 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6226 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6227 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6228 BPF_MOV64_IMM(BPF_REG_2
, -1),
6229 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 5),
6230 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 4),
6231 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 4),
6232 BPF_ALU64_REG(BPF_SUB
, BPF_REG_0
, BPF_REG_1
),
6233 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6234 BPF_MOV64_IMM(BPF_REG_0
, 0),
6237 .fixup_map1
= { 3 },
6238 .errstr
= "unbounded min value",
6242 "bounds checks mixing signed and unsigned, variant 6",
6244 BPF_MOV64_IMM(BPF_REG_2
, 0),
6245 BPF_MOV64_REG(BPF_REG_3
, BPF_REG_10
),
6246 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, -512),
6247 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6248 BPF_LDX_MEM(BPF_DW
, BPF_REG_4
, BPF_REG_10
, -16),
6249 BPF_MOV64_IMM(BPF_REG_6
, -1),
6250 BPF_JMP_REG(BPF_JGT
, BPF_REG_4
, BPF_REG_6
, 5),
6251 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_4
, 1, 4),
6252 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_4
, 1),
6253 BPF_MOV64_IMM(BPF_REG_5
, 0),
6254 BPF_ST_MEM(BPF_H
, BPF_REG_10
, -512, 0),
6255 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6256 BPF_FUNC_skb_load_bytes
),
6257 BPF_MOV64_IMM(BPF_REG_0
, 0),
6260 .errstr
= "R4 min value is negative, either use unsigned",
6264 "bounds checks mixing signed and unsigned, variant 7",
6266 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6267 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6268 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6269 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6270 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6271 BPF_FUNC_map_lookup_elem
),
6272 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 7),
6273 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6274 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6275 BPF_MOV64_IMM(BPF_REG_2
, 1024 * 1024 * 1024),
6276 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, 3),
6277 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6278 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6279 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6280 BPF_MOV64_IMM(BPF_REG_0
, 0),
6283 .fixup_map1
= { 3 },
6287 "bounds checks mixing signed and unsigned, variant 8",
6289 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6290 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6291 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6292 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6293 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6294 BPF_FUNC_map_lookup_elem
),
6295 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6296 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6297 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6298 BPF_MOV64_IMM(BPF_REG_2
, -1),
6299 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6300 BPF_MOV64_IMM(BPF_REG_0
, 0),
6302 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6303 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6304 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6305 BPF_MOV64_IMM(BPF_REG_0
, 0),
6308 .fixup_map1
= { 3 },
6309 .errstr
= "unbounded min value",
6313 "bounds checks mixing signed and unsigned, variant 9",
6315 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6316 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6317 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6318 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6319 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6320 BPF_FUNC_map_lookup_elem
),
6321 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 10),
6322 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6323 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6324 BPF_LD_IMM64(BPF_REG_2
, -9223372036854775808ULL),
6325 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6326 BPF_MOV64_IMM(BPF_REG_0
, 0),
6328 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6329 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6330 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6331 BPF_MOV64_IMM(BPF_REG_0
, 0),
6334 .fixup_map1
= { 3 },
6338 "bounds checks mixing signed and unsigned, variant 10",
6340 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6341 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6342 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6343 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6344 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6345 BPF_FUNC_map_lookup_elem
),
6346 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6347 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6348 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6349 BPF_MOV64_IMM(BPF_REG_2
, 0),
6350 BPF_JMP_REG(BPF_JGT
, BPF_REG_2
, BPF_REG_1
, 2),
6351 BPF_MOV64_IMM(BPF_REG_0
, 0),
6353 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6354 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6355 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6356 BPF_MOV64_IMM(BPF_REG_0
, 0),
6359 .fixup_map1
= { 3 },
6360 .errstr
= "unbounded min value",
6364 "bounds checks mixing signed and unsigned, variant 11",
6366 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6367 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6368 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6369 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6370 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6371 BPF_FUNC_map_lookup_elem
),
6372 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6373 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6374 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6375 BPF_MOV64_IMM(BPF_REG_2
, -1),
6376 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6378 BPF_MOV64_IMM(BPF_REG_0
, 0),
6380 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6381 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6382 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6383 BPF_MOV64_IMM(BPF_REG_0
, 0),
6386 .fixup_map1
= { 3 },
6387 .errstr
= "unbounded min value",
6391 "bounds checks mixing signed and unsigned, variant 12",
6393 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6394 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6395 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6396 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6397 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6398 BPF_FUNC_map_lookup_elem
),
6399 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6400 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6401 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6402 BPF_MOV64_IMM(BPF_REG_2
, -6),
6403 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6404 BPF_MOV64_IMM(BPF_REG_0
, 0),
6406 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6407 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6408 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6409 BPF_MOV64_IMM(BPF_REG_0
, 0),
6412 .fixup_map1
= { 3 },
6413 .errstr
= "unbounded min value",
6417 "bounds checks mixing signed and unsigned, variant 13",
6419 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6420 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6421 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6422 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6423 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6424 BPF_FUNC_map_lookup_elem
),
6425 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6426 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6427 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6428 BPF_MOV64_IMM(BPF_REG_2
, 2),
6429 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6430 BPF_MOV64_IMM(BPF_REG_7
, 1),
6431 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 0, 2),
6432 BPF_MOV64_IMM(BPF_REG_0
, 0),
6434 BPF_ALU64_REG(BPF_ADD
, BPF_REG_7
, BPF_REG_1
),
6435 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_7
, 4, 2),
6436 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_7
),
6437 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6438 BPF_MOV64_IMM(BPF_REG_0
, 0),
6441 .fixup_map1
= { 3 },
6442 .errstr
= "unbounded min value",
6446 "bounds checks mixing signed and unsigned, variant 14",
6448 BPF_LDX_MEM(BPF_W
, BPF_REG_9
, BPF_REG_1
,
6449 offsetof(struct __sk_buff
, mark
)),
6450 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6451 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6452 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6453 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6454 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6455 BPF_FUNC_map_lookup_elem
),
6456 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6457 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6458 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6459 BPF_MOV64_IMM(BPF_REG_2
, -1),
6460 BPF_MOV64_IMM(BPF_REG_8
, 2),
6461 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_9
, 42, 6),
6462 BPF_JMP_REG(BPF_JSGT
, BPF_REG_8
, BPF_REG_1
, 3),
6463 BPF_JMP_IMM(BPF_JSGT
, BPF_REG_1
, 1, 2),
6464 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6465 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6466 BPF_MOV64_IMM(BPF_REG_0
, 0),
6468 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_2
, -3),
6469 BPF_JMP_IMM(BPF_JA
, 0, 0, -7),
6471 .fixup_map1
= { 4 },
6472 .errstr
= "unbounded min value",
6476 "bounds checks mixing signed and unsigned, variant 15",
6478 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6479 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6480 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6481 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6482 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6483 BPF_FUNC_map_lookup_elem
),
6484 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6485 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -16, -8),
6486 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_10
, -16),
6487 BPF_MOV64_IMM(BPF_REG_2
, -6),
6488 BPF_JMP_REG(BPF_JGE
, BPF_REG_2
, BPF_REG_1
, 2),
6489 BPF_MOV64_IMM(BPF_REG_0
, 0),
6491 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6492 BPF_JMP_IMM(BPF_JGT
, BPF_REG_0
, 1, 2),
6493 BPF_MOV64_IMM(BPF_REG_0
, 0),
6495 BPF_ST_MEM(BPF_B
, BPF_REG_0
, 0, 0),
6496 BPF_MOV64_IMM(BPF_REG_0
, 0),
6499 .fixup_map1
= { 3 },
6500 .errstr
= "unbounded min value",
6502 .result_unpriv
= REJECT
,
6505 "subtraction bounds (map value) variant 1",
6507 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6508 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6509 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6510 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6511 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6512 BPF_FUNC_map_lookup_elem
),
6513 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6514 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6515 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 7),
6516 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6517 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 5),
6518 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6519 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 56),
6520 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6521 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6523 BPF_MOV64_IMM(BPF_REG_0
, 0),
6526 .fixup_map1
= { 3 },
6527 .errstr
= "R0 max value is outside of the array range",
6531 "subtraction bounds (map value) variant 2",
6533 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6534 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6535 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6536 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6537 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6538 BPF_FUNC_map_lookup_elem
),
6539 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
6540 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6541 BPF_JMP_IMM(BPF_JGT
, BPF_REG_1
, 0xff, 6),
6542 BPF_LDX_MEM(BPF_B
, BPF_REG_3
, BPF_REG_0
, 1),
6543 BPF_JMP_IMM(BPF_JGT
, BPF_REG_3
, 0xff, 4),
6544 BPF_ALU64_REG(BPF_SUB
, BPF_REG_1
, BPF_REG_3
),
6545 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6546 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6548 BPF_MOV64_IMM(BPF_REG_0
, 0),
6551 .fixup_map1
= { 3 },
6552 .errstr
= "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6556 "bounds check based on zero-extended MOV",
6558 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6559 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6560 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6561 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6562 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6563 BPF_FUNC_map_lookup_elem
),
6564 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6565 /* r2 = 0x0000'0000'ffff'ffff */
6566 BPF_MOV32_IMM(BPF_REG_2
, 0xffffffff),
6568 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
6570 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
6571 /* access at offset 0 */
6572 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6574 BPF_MOV64_IMM(BPF_REG_0
, 0),
6577 .fixup_map1
= { 3 },
6581 "bounds check based on sign-extended MOV. test1",
6583 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6584 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6585 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6586 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6587 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6588 BPF_FUNC_map_lookup_elem
),
6589 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6590 /* r2 = 0xffff'ffff'ffff'ffff */
6591 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
6592 /* r2 = 0xffff'ffff */
6593 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 32),
6594 /* r0 = <oob pointer> */
6595 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
6596 /* access to OOB pointer */
6597 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6599 BPF_MOV64_IMM(BPF_REG_0
, 0),
6602 .fixup_map1
= { 3 },
6603 .errstr
= "map_value pointer and 4294967295",
6607 "bounds check based on sign-extended MOV. test2",
6609 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6610 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6611 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6612 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6613 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6614 BPF_FUNC_map_lookup_elem
),
6615 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6616 /* r2 = 0xffff'ffff'ffff'ffff */
6617 BPF_MOV64_IMM(BPF_REG_2
, 0xffffffff),
6618 /* r2 = 0xfff'ffff */
6619 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_2
, 36),
6620 /* r0 = <oob pointer> */
6621 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_2
),
6622 /* access to OOB pointer */
6623 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6625 BPF_MOV64_IMM(BPF_REG_0
, 0),
6628 .fixup_map1
= { 3 },
6629 .errstr
= "R0 min value is outside of the array range",
6633 "bounds check based on reg_off + var_off + insn_off. test1",
6635 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
6636 offsetof(struct __sk_buff
, mark
)),
6637 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6638 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6639 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6640 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6641 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6642 BPF_FUNC_map_lookup_elem
),
6643 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6644 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
6645 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 29) - 1),
6646 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
6647 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
6648 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
6649 BPF_MOV64_IMM(BPF_REG_0
, 0),
6652 .fixup_map1
= { 4 },
6653 .errstr
= "value_size=8 off=1073741825",
6655 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6658 "bounds check based on reg_off + var_off + insn_off. test2",
6660 BPF_LDX_MEM(BPF_W
, BPF_REG_6
, BPF_REG_1
,
6661 offsetof(struct __sk_buff
, mark
)),
6662 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6663 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6664 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6665 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6666 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6667 BPF_FUNC_map_lookup_elem
),
6668 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 4),
6669 BPF_ALU64_IMM(BPF_AND
, BPF_REG_6
, 1),
6670 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_6
, (1 << 30) - 1),
6671 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_6
),
6672 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, (1 << 29) - 1),
6673 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 3),
6674 BPF_MOV64_IMM(BPF_REG_0
, 0),
6677 .fixup_map1
= { 4 },
6678 .errstr
= "value 1073741823",
6680 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
6683 "bounds check after truncation of non-boundary-crossing range",
6685 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6686 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6687 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6688 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6689 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6690 BPF_FUNC_map_lookup_elem
),
6691 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6692 /* r1 = [0x00, 0xff] */
6693 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6694 BPF_MOV64_IMM(BPF_REG_2
, 1),
6695 /* r2 = 0x10'0000'0000 */
6696 BPF_ALU64_IMM(BPF_LSH
, BPF_REG_2
, 36),
6697 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6698 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
6699 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6700 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
6701 /* r1 = [0x00, 0xff] */
6702 BPF_ALU32_IMM(BPF_SUB
, BPF_REG_1
, 0x7fffffff),
6704 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
6706 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6707 /* access at offset 0 */
6708 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6710 BPF_MOV64_IMM(BPF_REG_0
, 0),
6713 .fixup_map1
= { 3 },
6717 "bounds check after truncation of boundary-crossing range (1)",
6719 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6720 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6721 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6722 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6723 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6724 BPF_FUNC_map_lookup_elem
),
6725 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6726 /* r1 = [0x00, 0xff] */
6727 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6728 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
6729 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6730 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
6731 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6732 * [0x0000'0000, 0x0000'007f]
6734 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 0),
6735 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
6736 /* r1 = [0x00, 0xff] or
6737 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6739 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
6741 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6743 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
6744 /* no-op or OOB pointer computation */
6745 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6746 /* potentially OOB access */
6747 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6749 BPF_MOV64_IMM(BPF_REG_0
, 0),
6752 .fixup_map1
= { 3 },
6753 /* not actually fully unbounded, but the bound is very high */
6754 .errstr
= "R0 unbounded memory access",
6758 "bounds check after truncation of boundary-crossing range (2)",
6760 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6761 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6762 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6763 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6764 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6765 BPF_FUNC_map_lookup_elem
),
6766 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 9),
6767 /* r1 = [0x00, 0xff] */
6768 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6769 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
6770 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6771 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0xffffff80 >> 1),
6772 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6773 * [0x0000'0000, 0x0000'007f]
6774 * difference to previous test: truncation via MOV32
6777 BPF_MOV32_REG(BPF_REG_1
, BPF_REG_1
),
6778 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
6779 /* r1 = [0x00, 0xff] or
6780 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6782 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 0xffffff80 >> 1),
6784 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6786 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
6787 /* no-op or OOB pointer computation */
6788 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6789 /* potentially OOB access */
6790 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6792 BPF_MOV64_IMM(BPF_REG_0
, 0),
6795 .fixup_map1
= { 3 },
6796 /* not actually fully unbounded, but the bound is very high */
6797 .errstr
= "R0 unbounded memory access",
6801 "bounds check after wrapping 32-bit addition",
6803 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6804 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6805 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6806 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6807 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6808 BPF_FUNC_map_lookup_elem
),
6809 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 5),
6810 /* r1 = 0x7fff'ffff */
6811 BPF_MOV64_IMM(BPF_REG_1
, 0x7fffffff),
6812 /* r1 = 0xffff'fffe */
6813 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
6815 BPF_ALU32_IMM(BPF_ADD
, BPF_REG_1
, 2),
6817 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6818 /* access at offset 0 */
6819 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6821 BPF_MOV64_IMM(BPF_REG_0
, 0),
6824 .fixup_map1
= { 3 },
6828 "bounds check after shift with oversized count operand",
6830 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6831 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6832 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6833 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6834 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6835 BPF_FUNC_map_lookup_elem
),
6836 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6837 BPF_MOV64_IMM(BPF_REG_2
, 32),
6838 BPF_MOV64_IMM(BPF_REG_1
, 1),
6839 /* r1 = (u32)1 << (u32)32 = ? */
6840 BPF_ALU32_REG(BPF_LSH
, BPF_REG_1
, BPF_REG_2
),
6841 /* r1 = [0x0000, 0xffff] */
6842 BPF_ALU64_IMM(BPF_AND
, BPF_REG_1
, 0xffff),
6843 /* computes unknown pointer, potentially OOB */
6844 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6845 /* potentially OOB access */
6846 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6848 BPF_MOV64_IMM(BPF_REG_0
, 0),
6851 .fixup_map1
= { 3 },
6852 .errstr
= "R0 max value is outside of the array range",
6856 "bounds check after right shift of maybe-negative number",
6858 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6859 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6860 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6861 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6862 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6863 BPF_FUNC_map_lookup_elem
),
6864 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 6),
6865 /* r1 = [0x00, 0xff] */
6866 BPF_LDX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
6867 /* r1 = [-0x01, 0xfe] */
6868 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_1
, 1),
6869 /* r1 = 0 or 0xff'ffff'ffff'ffff */
6870 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
6871 /* r1 = 0 or 0xffff'ffff'ffff */
6872 BPF_ALU64_IMM(BPF_RSH
, BPF_REG_1
, 8),
6873 /* computes unknown pointer, potentially OOB */
6874 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6875 /* potentially OOB access */
6876 BPF_LDX_MEM(BPF_B
, BPF_REG_0
, BPF_REG_0
, 0),
6878 BPF_MOV64_IMM(BPF_REG_0
, 0),
6881 .fixup_map1
= { 3 },
6882 .errstr
= "R0 unbounded memory access",
6886 "bounds check map access with off+size signed 32bit overflow. test1",
6888 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6889 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6890 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6891 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6892 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6893 BPF_FUNC_map_lookup_elem
),
6894 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
6896 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x7ffffffe),
6897 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
6901 .fixup_map1
= { 3 },
6902 .errstr
= "map_value pointer and 2147483646",
6906 "bounds check map access with off+size signed 32bit overflow. test2",
6908 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6909 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6910 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6911 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6912 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6913 BPF_FUNC_map_lookup_elem
),
6914 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
6916 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
6917 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
6918 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_0
, 0x1fffffff),
6919 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
6923 .fixup_map1
= { 3 },
6924 .errstr
= "pointer offset 1073741822",
6928 "bounds check map access with off+size signed 32bit overflow. test3",
6930 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6931 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6932 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6933 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6934 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6935 BPF_FUNC_map_lookup_elem
),
6936 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
6938 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
6939 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_0
, 0x1fffffff),
6940 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
6944 .fixup_map1
= { 3 },
6945 .errstr
= "pointer offset -1073741822",
6949 "bounds check map access with off+size signed 32bit overflow. test4",
6951 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6952 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6953 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6954 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6955 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6956 BPF_FUNC_map_lookup_elem
),
6957 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 1),
6959 BPF_MOV64_IMM(BPF_REG_1
, 1000000),
6960 BPF_ALU64_IMM(BPF_MUL
, BPF_REG_1
, 1000000),
6961 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
6962 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 2),
6966 .fixup_map1
= { 3 },
6967 .errstr
= "map_value pointer and 1000000000000",
6971 "pointer/scalar confusion in state equality check (way 1)",
6973 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6974 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6975 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6976 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6977 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6978 BPF_FUNC_map_lookup_elem
),
6979 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 2),
6980 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
6982 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
6986 .fixup_map1
= { 3 },
6988 .result_unpriv
= REJECT
,
6989 .errstr_unpriv
= "R0 leaks addr as return value"
6992 "pointer/scalar confusion in state equality check (way 2)",
6994 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
6995 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
6996 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
6997 BPF_LD_MAP_FD(BPF_REG_1
, 0),
6998 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
6999 BPF_FUNC_map_lookup_elem
),
7000 BPF_JMP_IMM(BPF_JNE
, BPF_REG_0
, 0, 2),
7001 BPF_MOV64_REG(BPF_REG_0
, BPF_REG_10
),
7003 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_0
, 0),
7006 .fixup_map1
= { 3 },
7008 .result_unpriv
= REJECT
,
7009 .errstr_unpriv
= "R0 leaks addr as return value"
7012 "variable-offset ctx access",
7014 /* Get an unknown value */
7015 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7016 /* Make it small and 4-byte aligned */
7017 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7018 /* add it to skb. We now have either &skb->len or
7019 * &skb->pkt_type, but we don't know which
7021 BPF_ALU64_REG(BPF_ADD
, BPF_REG_1
, BPF_REG_2
),
7022 /* dereference it */
7023 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, 0),
7026 .errstr
= "variable ctx access var_off=(0x0; 0x4)",
7028 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7031 "variable-offset stack access",
7033 /* Fill the top 8 bytes of the stack */
7034 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7035 /* Get an unknown value */
7036 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7037 /* Make it small and 4-byte aligned */
7038 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7039 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7040 /* add it to fp. We now have either fp-4 or fp-8, but
7041 * we don't know which
7043 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7044 /* dereference it */
7045 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_2
, 0),
7048 .errstr
= "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7050 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7053 "indirect variable-offset stack access",
7055 /* Fill the top 8 bytes of the stack */
7056 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7057 /* Get an unknown value */
7058 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7059 /* Make it small and 4-byte aligned */
7060 BPF_ALU64_IMM(BPF_AND
, BPF_REG_2
, 4),
7061 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_2
, 8),
7062 /* add it to fp. We now have either fp-4 or fp-8, but
7063 * we don't know which
7065 BPF_ALU64_REG(BPF_ADD
, BPF_REG_2
, BPF_REG_10
),
7066 /* dereference it indirectly */
7067 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7068 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7069 BPF_FUNC_map_lookup_elem
),
7070 BPF_MOV64_IMM(BPF_REG_0
, 0),
7073 .fixup_map1
= { 5 },
7074 .errstr
= "variable stack read R2",
7076 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7079 "direct stack access with 32-bit wraparound. test1",
7081 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7082 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7083 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x7fffffff),
7084 BPF_MOV32_IMM(BPF_REG_0
, 0),
7085 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7088 .errstr
= "fp pointer and 2147483647",
7092 "direct stack access with 32-bit wraparound. test2",
7094 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7095 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7096 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x3fffffff),
7097 BPF_MOV32_IMM(BPF_REG_0
, 0),
7098 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7101 .errstr
= "fp pointer and 1073741823",
7105 "direct stack access with 32-bit wraparound. test3",
7107 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_10
),
7108 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7109 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 0x1fffffff),
7110 BPF_MOV32_IMM(BPF_REG_0
, 0),
7111 BPF_STX_MEM(BPF_B
, BPF_REG_1
, BPF_REG_0
, 0),
7114 .errstr
= "fp pointer offset 1073741822",
7118 "liveness pruning and write screening",
7120 /* Get an unknown value */
7121 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
, 0),
7122 /* branch conditions teach us nothing about R2 */
7123 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7124 BPF_MOV64_IMM(BPF_REG_0
, 0),
7125 BPF_JMP_IMM(BPF_JGE
, BPF_REG_2
, 0, 1),
7126 BPF_MOV64_IMM(BPF_REG_0
, 0),
7129 .errstr
= "R0 !read_ok",
7131 .prog_type
= BPF_PROG_TYPE_LWT_IN
,
7134 "varlen_map_value_access pruning",
7136 BPF_ST_MEM(BPF_DW
, BPF_REG_10
, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2
, BPF_REG_10
),
7138 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_2
, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1
, 0),
7140 BPF_RAW_INSN(BPF_JMP
| BPF_CALL
, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem
),
7142 BPF_JMP_IMM(BPF_JEQ
, BPF_REG_0
, 0, 8),
7143 BPF_LDX_MEM(BPF_DW
, BPF_REG_1
, BPF_REG_0
, 0),
7144 BPF_MOV32_IMM(BPF_REG_2
, MAX_ENTRIES
),
7145 BPF_JMP_REG(BPF_JSGT
, BPF_REG_2
, BPF_REG_1
, 1),
7146 BPF_MOV32_IMM(BPF_REG_1
, 0),
7147 BPF_ALU32_IMM(BPF_LSH
, BPF_REG_1
, 2),
7148 BPF_ALU64_REG(BPF_ADD
, BPF_REG_0
, BPF_REG_1
),
7149 BPF_JMP_IMM(BPF_JA
, 0, 0, 0),
7150 BPF_ST_MEM(BPF_DW
, BPF_REG_0
, 0,
7151 offsetof(struct test_val
, foo
)),
7154 .fixup_map2
= { 3 },
7155 .errstr_unpriv
= "R0 leaks addr",
7156 .errstr
= "R0 unbounded memory access",
7157 .result_unpriv
= REJECT
,
7159 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7162 "invalid 64-bit BPF_END",
7164 BPF_MOV32_IMM(BPF_REG_0
, 0),
7166 .code
= BPF_ALU64
| BPF_END
| BPF_TO_LE
,
7167 .dst_reg
= BPF_REG_0
,
7174 .errstr
= "BPF_END uses reserved fields",
7178 "arithmetic ops make PTR_TO_CTX unusable",
7180 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
,
7181 offsetof(struct __sk_buff
, data
) -
7182 offsetof(struct __sk_buff
, mark
)),
7183 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
,
7184 offsetof(struct __sk_buff
, mark
)),
7187 .errstr
= "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
7189 .prog_type
= BPF_PROG_TYPE_SCHED_CLS
,
7192 "XDP pkt read, pkt_end mangling, bad access 1",
7194 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7195 offsetof(struct xdp_md
, data
)),
7196 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7197 offsetof(struct xdp_md
, data_end
)),
7198 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7199 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7200 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_3
, 8),
7201 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
7202 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7203 BPF_MOV64_IMM(BPF_REG_0
, 0),
7206 .errstr
= "R1 offset is outside of the packet",
7208 .prog_type
= BPF_PROG_TYPE_XDP
,
7211 "XDP pkt read, pkt_end mangling, bad access 2",
7213 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7214 offsetof(struct xdp_md
, data
)),
7215 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7216 offsetof(struct xdp_md
, data_end
)),
7217 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7218 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7219 BPF_ALU64_IMM(BPF_SUB
, BPF_REG_3
, 8),
7220 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
7221 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7222 BPF_MOV64_IMM(BPF_REG_0
, 0),
7225 .errstr
= "R1 offset is outside of the packet",
7227 .prog_type
= BPF_PROG_TYPE_XDP
,
7230 "XDP pkt read, pkt_data' > pkt_end, good access",
7232 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7233 offsetof(struct xdp_md
, data
)),
7234 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7235 offsetof(struct xdp_md
, data_end
)),
7236 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7237 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7238 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
7239 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7240 BPF_MOV64_IMM(BPF_REG_0
, 0),
7244 .prog_type
= BPF_PROG_TYPE_XDP
,
7247 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7249 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7250 offsetof(struct xdp_md
, data
)),
7251 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7252 offsetof(struct xdp_md
, data_end
)),
7253 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7254 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7255 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 1),
7256 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
7257 BPF_MOV64_IMM(BPF_REG_0
, 0),
7260 .errstr
= "R1 offset is outside of the packet",
7262 .prog_type
= BPF_PROG_TYPE_XDP
,
7263 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7266 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7268 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7269 offsetof(struct xdp_md
, data
)),
7270 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7271 offsetof(struct xdp_md
, data_end
)),
7272 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7273 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7274 BPF_JMP_REG(BPF_JGT
, BPF_REG_1
, BPF_REG_3
, 0),
7275 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7276 BPF_MOV64_IMM(BPF_REG_0
, 0),
7279 .errstr
= "R1 offset is outside of the packet",
7281 .prog_type
= BPF_PROG_TYPE_XDP
,
7284 "XDP pkt read, pkt_end > pkt_data', good access",
7286 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7287 offsetof(struct xdp_md
, data
)),
7288 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7289 offsetof(struct xdp_md
, data_end
)),
7290 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7291 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7292 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
7293 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7294 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7295 BPF_MOV64_IMM(BPF_REG_0
, 0),
7299 .prog_type
= BPF_PROG_TYPE_XDP
,
7300 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7303 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7305 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7306 offsetof(struct xdp_md
, data
)),
7307 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7308 offsetof(struct xdp_md
, data_end
)),
7309 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7310 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7311 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
7312 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7313 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7314 BPF_MOV64_IMM(BPF_REG_0
, 0),
7317 .errstr
= "R1 offset is outside of the packet",
7319 .prog_type
= BPF_PROG_TYPE_XDP
,
7322 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7324 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7325 offsetof(struct xdp_md
, data
)),
7326 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7327 offsetof(struct xdp_md
, data_end
)),
7328 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7329 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7330 BPF_JMP_REG(BPF_JGT
, BPF_REG_3
, BPF_REG_1
, 1),
7331 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7332 BPF_MOV64_IMM(BPF_REG_0
, 0),
7335 .errstr
= "R1 offset is outside of the packet",
7337 .prog_type
= BPF_PROG_TYPE_XDP
,
7340 "XDP pkt read, pkt_data' < pkt_end, good access",
7342 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7343 offsetof(struct xdp_md
, data
)),
7344 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7345 offsetof(struct xdp_md
, data_end
)),
7346 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7347 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7348 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
7349 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7350 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7351 BPF_MOV64_IMM(BPF_REG_0
, 0),
7355 .prog_type
= BPF_PROG_TYPE_XDP
,
7356 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7359 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7361 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7362 offsetof(struct xdp_md
, data
)),
7363 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7364 offsetof(struct xdp_md
, data_end
)),
7365 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7366 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7367 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
7368 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7369 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7370 BPF_MOV64_IMM(BPF_REG_0
, 0),
7373 .errstr
= "R1 offset is outside of the packet",
7375 .prog_type
= BPF_PROG_TYPE_XDP
,
7378 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7380 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7381 offsetof(struct xdp_md
, data
)),
7382 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7383 offsetof(struct xdp_md
, data_end
)),
7384 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7385 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7386 BPF_JMP_REG(BPF_JLT
, BPF_REG_1
, BPF_REG_3
, 1),
7387 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7388 BPF_MOV64_IMM(BPF_REG_0
, 0),
7391 .errstr
= "R1 offset is outside of the packet",
7393 .prog_type
= BPF_PROG_TYPE_XDP
,
7396 "XDP pkt read, pkt_end < pkt_data', good access",
7398 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7399 offsetof(struct xdp_md
, data
)),
7400 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7401 offsetof(struct xdp_md
, data_end
)),
7402 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7403 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7404 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
7405 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7406 BPF_MOV64_IMM(BPF_REG_0
, 0),
7410 .prog_type
= BPF_PROG_TYPE_XDP
,
7413 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7415 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7416 offsetof(struct xdp_md
, data
)),
7417 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7418 offsetof(struct xdp_md
, data_end
)),
7419 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7420 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7421 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 1),
7422 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
7423 BPF_MOV64_IMM(BPF_REG_0
, 0),
7426 .errstr
= "R1 offset is outside of the packet",
7428 .prog_type
= BPF_PROG_TYPE_XDP
,
7429 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7432 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7434 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7435 offsetof(struct xdp_md
, data
)),
7436 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7437 offsetof(struct xdp_md
, data_end
)),
7438 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7439 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7440 BPF_JMP_REG(BPF_JLT
, BPF_REG_3
, BPF_REG_1
, 0),
7441 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7442 BPF_MOV64_IMM(BPF_REG_0
, 0),
7445 .errstr
= "R1 offset is outside of the packet",
7447 .prog_type
= BPF_PROG_TYPE_XDP
,
7450 "XDP pkt read, pkt_data' >= pkt_end, good access",
7452 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7453 offsetof(struct xdp_md
, data
)),
7454 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7455 offsetof(struct xdp_md
, data_end
)),
7456 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7457 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7458 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
7459 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7460 BPF_MOV64_IMM(BPF_REG_0
, 0),
7464 .prog_type
= BPF_PROG_TYPE_XDP
,
7465 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7468 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7470 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7471 offsetof(struct xdp_md
, data
)),
7472 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7473 offsetof(struct xdp_md
, data_end
)),
7474 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7475 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7476 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 1),
7477 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7478 BPF_MOV64_IMM(BPF_REG_0
, 0),
7481 .errstr
= "R1 offset is outside of the packet",
7483 .prog_type
= BPF_PROG_TYPE_XDP
,
7486 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7488 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7489 offsetof(struct xdp_md
, data
)),
7490 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7491 offsetof(struct xdp_md
, data_end
)),
7492 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7493 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7494 BPF_JMP_REG(BPF_JGE
, BPF_REG_1
, BPF_REG_3
, 0),
7495 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7496 BPF_MOV64_IMM(BPF_REG_0
, 0),
7499 .errstr
= "R1 offset is outside of the packet",
7501 .prog_type
= BPF_PROG_TYPE_XDP
,
7502 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7505 "XDP pkt read, pkt_end >= pkt_data', good access",
7507 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7508 offsetof(struct xdp_md
, data
)),
7509 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7510 offsetof(struct xdp_md
, data_end
)),
7511 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7512 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7513 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
7514 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7515 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7516 BPF_MOV64_IMM(BPF_REG_0
, 0),
7520 .prog_type
= BPF_PROG_TYPE_XDP
,
7523 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7525 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7526 offsetof(struct xdp_md
, data
)),
7527 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7528 offsetof(struct xdp_md
, data_end
)),
7529 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7530 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7531 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
7532 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7533 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
7534 BPF_MOV64_IMM(BPF_REG_0
, 0),
7537 .errstr
= "R1 offset is outside of the packet",
7539 .prog_type
= BPF_PROG_TYPE_XDP
,
7540 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7543 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7545 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7546 offsetof(struct xdp_md
, data
)),
7547 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7548 offsetof(struct xdp_md
, data_end
)),
7549 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7550 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7551 BPF_JMP_REG(BPF_JGE
, BPF_REG_3
, BPF_REG_1
, 1),
7552 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7553 BPF_MOV64_IMM(BPF_REG_0
, 0),
7556 .errstr
= "R1 offset is outside of the packet",
7558 .prog_type
= BPF_PROG_TYPE_XDP
,
7561 "XDP pkt read, pkt_data' <= pkt_end, good access",
7563 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7564 offsetof(struct xdp_md
, data
)),
7565 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7566 offsetof(struct xdp_md
, data_end
)),
7567 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7568 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7569 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
7570 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7571 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7572 BPF_MOV64_IMM(BPF_REG_0
, 0),
7576 .prog_type
= BPF_PROG_TYPE_XDP
,
7579 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7581 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7582 offsetof(struct xdp_md
, data
)),
7583 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7584 offsetof(struct xdp_md
, data_end
)),
7585 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7586 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7587 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
7588 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
7589 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -4),
7590 BPF_MOV64_IMM(BPF_REG_0
, 0),
7593 .errstr
= "R1 offset is outside of the packet",
7595 .prog_type
= BPF_PROG_TYPE_XDP
,
7596 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7599 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7601 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7602 offsetof(struct xdp_md
, data
)),
7603 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7604 offsetof(struct xdp_md
, data_end
)),
7605 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7606 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7607 BPF_JMP_REG(BPF_JLE
, BPF_REG_1
, BPF_REG_3
, 1),
7608 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7609 BPF_MOV64_IMM(BPF_REG_0
, 0),
7612 .errstr
= "R1 offset is outside of the packet",
7614 .prog_type
= BPF_PROG_TYPE_XDP
,
7617 "XDP pkt read, pkt_end <= pkt_data', good access",
7619 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7620 offsetof(struct xdp_md
, data
)),
7621 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7622 offsetof(struct xdp_md
, data_end
)),
7623 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7624 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7625 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
7626 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7627 BPF_MOV64_IMM(BPF_REG_0
, 0),
7631 .prog_type
= BPF_PROG_TYPE_XDP
,
7632 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7635 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7637 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7638 offsetof(struct xdp_md
, data
)),
7639 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7640 offsetof(struct xdp_md
, data_end
)),
7641 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7642 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7643 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 1),
7644 BPF_LDX_MEM(BPF_DW
, BPF_REG_0
, BPF_REG_1
, -8),
7645 BPF_MOV64_IMM(BPF_REG_0
, 0),
7648 .errstr
= "R1 offset is outside of the packet",
7650 .prog_type
= BPF_PROG_TYPE_XDP
,
7653 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7655 BPF_LDX_MEM(BPF_W
, BPF_REG_2
, BPF_REG_1
,
7656 offsetof(struct xdp_md
, data
)),
7657 BPF_LDX_MEM(BPF_W
, BPF_REG_3
, BPF_REG_1
,
7658 offsetof(struct xdp_md
, data_end
)),
7659 BPF_MOV64_REG(BPF_REG_1
, BPF_REG_2
),
7660 BPF_ALU64_IMM(BPF_ADD
, BPF_REG_1
, 8),
7661 BPF_JMP_REG(BPF_JLE
, BPF_REG_3
, BPF_REG_1
, 0),
7662 BPF_LDX_MEM(BPF_W
, BPF_REG_0
, BPF_REG_1
, -5),
7663 BPF_MOV64_IMM(BPF_REG_0
, 0),
7666 .errstr
= "R1 offset is outside of the packet",
7668 .prog_type
= BPF_PROG_TYPE_XDP
,
7669 .flags
= F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
,
7673 static int probe_filter_length(const struct bpf_insn
*fp
)
7677 for (len
= MAX_INSNS
- 1; len
> 0; --len
)
7678 if (fp
[len
].code
!= 0 || fp
[len
].imm
!= 0)
7683 static int create_map(uint32_t size_value
, uint32_t max_elem
)
7687 fd
= bpf_create_map(BPF_MAP_TYPE_HASH
, sizeof(long long),
7688 size_value
, max_elem
, BPF_F_NO_PREALLOC
);
7690 printf("Failed to create hash map '%s'!\n", strerror(errno
));
7695 static int create_prog_array(void)
7699 fd
= bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY
, sizeof(int),
7702 printf("Failed to create prog array '%s'!\n", strerror(errno
));
7707 static int create_map_in_map(void)
7709 int inner_map_fd
, outer_map_fd
;
7711 inner_map_fd
= bpf_create_map(BPF_MAP_TYPE_ARRAY
, sizeof(int),
7713 if (inner_map_fd
< 0) {
7714 printf("Failed to create array '%s'!\n", strerror(errno
));
7715 return inner_map_fd
;
7718 outer_map_fd
= bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS
,
7719 sizeof(int), inner_map_fd
, 1, 0);
7720 if (outer_map_fd
< 0)
7721 printf("Failed to create array of maps '%s'!\n",
7724 close(inner_map_fd
);
7726 return outer_map_fd
;
7729 static char bpf_vlog
[32768];
7731 static void do_test_fixup(struct bpf_test
*test
, struct bpf_insn
*prog
,
7734 int *fixup_map1
= test
->fixup_map1
;
7735 int *fixup_map2
= test
->fixup_map2
;
7736 int *fixup_prog
= test
->fixup_prog
;
7737 int *fixup_map_in_map
= test
->fixup_map_in_map
;
7739 /* Allocating HTs with 1 elem is fine here, since we only test
7740 * for verifier and not do a runtime lookup, so the only thing
7741 * that really matters is value size in this case.
7744 map_fds
[0] = create_map(sizeof(long long), 1);
7746 prog
[*fixup_map1
].imm
= map_fds
[0];
7748 } while (*fixup_map1
);
7752 map_fds
[1] = create_map(sizeof(struct test_val
), 1);
7754 prog
[*fixup_map2
].imm
= map_fds
[1];
7756 } while (*fixup_map2
);
7760 map_fds
[2] = create_prog_array();
7762 prog
[*fixup_prog
].imm
= map_fds
[2];
7764 } while (*fixup_prog
);
7767 if (*fixup_map_in_map
) {
7768 map_fds
[3] = create_map_in_map();
7770 prog
[*fixup_map_in_map
].imm
= map_fds
[3];
7772 } while (*fixup_map_in_map
);
7776 static void do_test_single(struct bpf_test
*test
, bool unpriv
,
7777 int *passes
, int *errors
)
7779 int fd_prog
, expected_ret
, reject_from_alignment
;
7780 struct bpf_insn
*prog
= test
->insns
;
7781 int prog_len
= probe_filter_length(prog
);
7782 int prog_type
= test
->prog_type
;
7783 int map_fds
[MAX_NR_MAPS
];
7784 const char *expected_err
;
7787 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
7790 do_test_fixup(test
, prog
, map_fds
);
7792 fd_prog
= bpf_verify_program(prog_type
? : BPF_PROG_TYPE_SOCKET_FILTER
,
7793 prog
, prog_len
, test
->flags
& F_LOAD_WITH_STRICT_ALIGNMENT
,
7794 "GPL", 0, bpf_vlog
, sizeof(bpf_vlog
), 1);
7796 expected_ret
= unpriv
&& test
->result_unpriv
!= UNDEF
?
7797 test
->result_unpriv
: test
->result
;
7798 expected_err
= unpriv
&& test
->errstr_unpriv
?
7799 test
->errstr_unpriv
: test
->errstr
;
7801 reject_from_alignment
= fd_prog
< 0 &&
7802 (test
->flags
& F_NEEDS_EFFICIENT_UNALIGNED_ACCESS
) &&
7803 strstr(bpf_vlog
, "Unknown alignment.");
7804 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
7805 if (reject_from_alignment
) {
7806 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
7811 if (expected_ret
== ACCEPT
) {
7812 if (fd_prog
< 0 && !reject_from_alignment
) {
7813 printf("FAIL\nFailed to load prog '%s'!\n",
7819 printf("FAIL\nUnexpected success to load!\n");
7822 if (!strstr(bpf_vlog
, expected_err
) && !reject_from_alignment
) {
7823 printf("FAIL\nUnexpected error message!\n");
7829 printf("OK%s\n", reject_from_alignment
?
7830 " (NOTE: reject due to unknown alignment)" : "");
7833 for (i
= 0; i
< MAX_NR_MAPS
; i
++)
7839 printf("%s", bpf_vlog
);
7843 static bool is_admin(void)
7846 cap_flag_value_t sysadmin
= CAP_CLEAR
;
7847 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
7849 #ifdef CAP_IS_SUPPORTED
7850 if (!CAP_IS_SUPPORTED(CAP_SETFCAP
)) {
7851 perror("cap_get_flag");
7855 caps
= cap_get_proc();
7857 perror("cap_get_proc");
7860 if (cap_get_flag(caps
, cap_val
, CAP_EFFECTIVE
, &sysadmin
))
7861 perror("cap_get_flag");
7864 return (sysadmin
== CAP_SET
);
7867 static int set_admin(bool admin
)
7870 const cap_value_t cap_val
= CAP_SYS_ADMIN
;
7873 caps
= cap_get_proc();
7875 perror("cap_get_proc");
7878 if (cap_set_flag(caps
, CAP_EFFECTIVE
, 1, &cap_val
,
7879 admin
? CAP_SET
: CAP_CLEAR
)) {
7880 perror("cap_set_flag");
7883 if (cap_set_proc(caps
)) {
7884 perror("cap_set_proc");
7894 static int do_test(bool unpriv
, unsigned int from
, unsigned int to
)
7896 int i
, passes
= 0, errors
= 0;
7898 for (i
= from
; i
< to
; i
++) {
7899 struct bpf_test
*test
= &tests
[i
];
7901 /* Program types that are not supported by non-root we
7904 if (!test
->prog_type
) {
7907 printf("#%d/u %s ", i
, test
->descr
);
7908 do_test_single(test
, true, &passes
, &errors
);
7914 printf("#%d/p %s ", i
, test
->descr
);
7915 do_test_single(test
, false, &passes
, &errors
);
7919 printf("Summary: %d PASSED, %d FAILED\n", passes
, errors
);
7920 return errors
? EXIT_FAILURE
: EXIT_SUCCESS
;
7923 int main(int argc
, char **argv
)
7925 struct rlimit rinf
= { RLIM_INFINITY
, RLIM_INFINITY
};
7926 struct rlimit rlim
= { 1 << 20, 1 << 20 };
7927 unsigned int from
= 0, to
= ARRAY_SIZE(tests
);
7928 bool unpriv
= !is_admin();
7931 unsigned int l
= atoi(argv
[argc
- 2]);
7932 unsigned int u
= atoi(argv
[argc
- 1]);
7934 if (l
< to
&& u
< to
) {
7938 } else if (argc
== 2) {
7939 unsigned int t
= atoi(argv
[argc
- 1]);
7947 setrlimit(RLIMIT_MEMLOCK
, unpriv
? &rlim
: &rinf
);
7948 return do_test(unpriv
, from
, to
);