]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - tools/testing/selftests/bpf/test_verifier.c
7a2d221c4702a77dbd60c0f3745e7c7ceef8e440
[thirdparty/kernel/stable.git] / tools / testing / selftests / bpf / test_verifier.c
1 /*
2 * Testsuite for eBPF verifier
3 *
4 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 */
10
11 #include <endian.h>
12 #include <asm/types.h>
13 #include <linux/types.h>
14 #include <stdint.h>
15 #include <stdio.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 #include <errno.h>
19 #include <string.h>
20 #include <stddef.h>
21 #include <stdbool.h>
22 #include <sched.h>
23
24 #include <sys/capability.h>
25 #include <sys/resource.h>
26
27 #include <linux/unistd.h>
28 #include <linux/filter.h>
29 #include <linux/bpf_perf_event.h>
30 #include <linux/bpf.h>
31
32 #include <bpf/bpf.h>
33
34 #ifdef HAVE_GENHDR
35 # include "autoconf.h"
36 #else
37 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
38 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
39 # endif
40 #endif
41
42 #include "../../../include/linux/filter.h"
43
44 #ifndef ARRAY_SIZE
45 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
46 #endif
47
48 #define MAX_INSNS 512
49 #define MAX_FIXUPS 8
50 #define MAX_NR_MAPS 4
51
52 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
53 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
54
55 struct bpf_test {
56 const char *descr;
57 struct bpf_insn insns[MAX_INSNS];
58 int fixup_map1[MAX_FIXUPS];
59 int fixup_map2[MAX_FIXUPS];
60 int fixup_prog[MAX_FIXUPS];
61 int fixup_map_in_map[MAX_FIXUPS];
62 const char *errstr;
63 const char *errstr_unpriv;
64 enum {
65 UNDEF,
66 ACCEPT,
67 REJECT
68 } result, result_unpriv;
69 enum bpf_prog_type prog_type;
70 uint8_t flags;
71 };
72
73 /* Note we want this to be 64 bit aligned so that the end of our array is
74 * actually the end of the structure.
75 */
76 #define MAX_ENTRIES 11
77
78 struct test_val {
79 unsigned int index;
80 int foo[MAX_ENTRIES];
81 };
82
83 static struct bpf_test tests[] = {
84 {
85 "add+sub+mul",
86 .insns = {
87 BPF_MOV64_IMM(BPF_REG_1, 1),
88 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
89 BPF_MOV64_IMM(BPF_REG_2, 3),
90 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
91 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
92 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
93 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
94 BPF_EXIT_INSN(),
95 },
96 .result = ACCEPT,
97 },
98 {
99 "unreachable",
100 .insns = {
101 BPF_EXIT_INSN(),
102 BPF_EXIT_INSN(),
103 },
104 .errstr = "unreachable",
105 .result = REJECT,
106 },
107 {
108 "unreachable2",
109 .insns = {
110 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
111 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
112 BPF_EXIT_INSN(),
113 },
114 .errstr = "unreachable",
115 .result = REJECT,
116 },
117 {
118 "out of range jump",
119 .insns = {
120 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
121 BPF_EXIT_INSN(),
122 },
123 .errstr = "jump out of range",
124 .result = REJECT,
125 },
126 {
127 "out of range jump2",
128 .insns = {
129 BPF_JMP_IMM(BPF_JA, 0, 0, -2),
130 BPF_EXIT_INSN(),
131 },
132 .errstr = "jump out of range",
133 .result = REJECT,
134 },
135 {
136 "test1 ld_imm64",
137 .insns = {
138 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
139 BPF_LD_IMM64(BPF_REG_0, 0),
140 BPF_LD_IMM64(BPF_REG_0, 0),
141 BPF_LD_IMM64(BPF_REG_0, 1),
142 BPF_LD_IMM64(BPF_REG_0, 1),
143 BPF_MOV64_IMM(BPF_REG_0, 2),
144 BPF_EXIT_INSN(),
145 },
146 .errstr = "invalid BPF_LD_IMM insn",
147 .errstr_unpriv = "R1 pointer comparison",
148 .result = REJECT,
149 },
150 {
151 "test2 ld_imm64",
152 .insns = {
153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
154 BPF_LD_IMM64(BPF_REG_0, 0),
155 BPF_LD_IMM64(BPF_REG_0, 0),
156 BPF_LD_IMM64(BPF_REG_0, 1),
157 BPF_LD_IMM64(BPF_REG_0, 1),
158 BPF_EXIT_INSN(),
159 },
160 .errstr = "invalid BPF_LD_IMM insn",
161 .errstr_unpriv = "R1 pointer comparison",
162 .result = REJECT,
163 },
164 {
165 "test3 ld_imm64",
166 .insns = {
167 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
168 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
169 BPF_LD_IMM64(BPF_REG_0, 0),
170 BPF_LD_IMM64(BPF_REG_0, 0),
171 BPF_LD_IMM64(BPF_REG_0, 1),
172 BPF_LD_IMM64(BPF_REG_0, 1),
173 BPF_EXIT_INSN(),
174 },
175 .errstr = "invalid bpf_ld_imm64 insn",
176 .result = REJECT,
177 },
178 {
179 "test4 ld_imm64",
180 .insns = {
181 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
182 BPF_EXIT_INSN(),
183 },
184 .errstr = "invalid bpf_ld_imm64 insn",
185 .result = REJECT,
186 },
187 {
188 "test5 ld_imm64",
189 .insns = {
190 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
191 },
192 .errstr = "invalid bpf_ld_imm64 insn",
193 .result = REJECT,
194 },
195 {
196 "test6 ld_imm64",
197 .insns = {
198 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
199 BPF_RAW_INSN(0, 0, 0, 0, 0),
200 BPF_EXIT_INSN(),
201 },
202 .result = ACCEPT,
203 },
204 {
205 "test7 ld_imm64",
206 .insns = {
207 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
208 BPF_RAW_INSN(0, 0, 0, 0, 1),
209 BPF_EXIT_INSN(),
210 },
211 .result = ACCEPT,
212 },
213 {
214 "test8 ld_imm64",
215 .insns = {
216 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1),
217 BPF_RAW_INSN(0, 0, 0, 0, 1),
218 BPF_EXIT_INSN(),
219 },
220 .errstr = "uses reserved fields",
221 .result = REJECT,
222 },
223 {
224 "test9 ld_imm64",
225 .insns = {
226 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
227 BPF_RAW_INSN(0, 0, 0, 1, 1),
228 BPF_EXIT_INSN(),
229 },
230 .errstr = "invalid bpf_ld_imm64 insn",
231 .result = REJECT,
232 },
233 {
234 "test10 ld_imm64",
235 .insns = {
236 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
237 BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1),
238 BPF_EXIT_INSN(),
239 },
240 .errstr = "invalid bpf_ld_imm64 insn",
241 .result = REJECT,
242 },
243 {
244 "test11 ld_imm64",
245 .insns = {
246 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1),
247 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
248 BPF_EXIT_INSN(),
249 },
250 .errstr = "invalid bpf_ld_imm64 insn",
251 .result = REJECT,
252 },
253 {
254 "test12 ld_imm64",
255 .insns = {
256 BPF_MOV64_IMM(BPF_REG_1, 0),
257 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
258 BPF_RAW_INSN(0, 0, 0, 0, 1),
259 BPF_EXIT_INSN(),
260 },
261 .errstr = "not pointing to valid bpf_map",
262 .result = REJECT,
263 },
264 {
265 "test13 ld_imm64",
266 .insns = {
267 BPF_MOV64_IMM(BPF_REG_1, 0),
268 BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1),
269 BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1),
270 BPF_EXIT_INSN(),
271 },
272 .errstr = "invalid bpf_ld_imm64 insn",
273 .result = REJECT,
274 },
275 {
276 "no bpf_exit",
277 .insns = {
278 BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
279 },
280 .errstr = "jump out of range",
281 .result = REJECT,
282 },
283 {
284 "loop (back-edge)",
285 .insns = {
286 BPF_JMP_IMM(BPF_JA, 0, 0, -1),
287 BPF_EXIT_INSN(),
288 },
289 .errstr = "back-edge",
290 .result = REJECT,
291 },
292 {
293 "loop2 (back-edge)",
294 .insns = {
295 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
296 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
297 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
298 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
299 BPF_EXIT_INSN(),
300 },
301 .errstr = "back-edge",
302 .result = REJECT,
303 },
304 {
305 "conditional loop",
306 .insns = {
307 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
308 BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
309 BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
310 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
311 BPF_EXIT_INSN(),
312 },
313 .errstr = "back-edge",
314 .result = REJECT,
315 },
316 {
317 "read uninitialized register",
318 .insns = {
319 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
320 BPF_EXIT_INSN(),
321 },
322 .errstr = "R2 !read_ok",
323 .result = REJECT,
324 },
325 {
326 "read invalid register",
327 .insns = {
328 BPF_MOV64_REG(BPF_REG_0, -1),
329 BPF_EXIT_INSN(),
330 },
331 .errstr = "R15 is invalid",
332 .result = REJECT,
333 },
334 {
335 "program doesn't init R0 before exit",
336 .insns = {
337 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
338 BPF_EXIT_INSN(),
339 },
340 .errstr = "R0 !read_ok",
341 .result = REJECT,
342 },
343 {
344 "program doesn't init R0 before exit in all branches",
345 .insns = {
346 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
347 BPF_MOV64_IMM(BPF_REG_0, 1),
348 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
349 BPF_EXIT_INSN(),
350 },
351 .errstr = "R0 !read_ok",
352 .errstr_unpriv = "R1 pointer comparison",
353 .result = REJECT,
354 },
355 {
356 "stack out of bounds",
357 .insns = {
358 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
359 BPF_EXIT_INSN(),
360 },
361 .errstr = "invalid stack",
362 .result = REJECT,
363 },
364 {
365 "invalid call insn1",
366 .insns = {
367 BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
368 BPF_EXIT_INSN(),
369 },
370 .errstr = "BPF_CALL uses reserved",
371 .result = REJECT,
372 },
373 {
374 "invalid call insn2",
375 .insns = {
376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
377 BPF_EXIT_INSN(),
378 },
379 .errstr = "BPF_CALL uses reserved",
380 .result = REJECT,
381 },
382 {
383 "invalid function call",
384 .insns = {
385 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
386 BPF_EXIT_INSN(),
387 },
388 .errstr = "invalid func unknown#1234567",
389 .result = REJECT,
390 },
391 {
392 "uninitialized stack1",
393 .insns = {
394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
396 BPF_LD_MAP_FD(BPF_REG_1, 0),
397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
398 BPF_FUNC_map_lookup_elem),
399 BPF_EXIT_INSN(),
400 },
401 .fixup_map1 = { 2 },
402 .errstr = "invalid indirect read from stack",
403 .result = REJECT,
404 },
405 {
406 "uninitialized stack2",
407 .insns = {
408 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
409 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
410 BPF_EXIT_INSN(),
411 },
412 .errstr = "invalid read from stack",
413 .result = REJECT,
414 },
415 {
416 "invalid fp arithmetic",
417 /* If this gets ever changed, make sure JITs can deal with it. */
418 .insns = {
419 BPF_MOV64_IMM(BPF_REG_0, 0),
420 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
421 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(),
424 },
425 .errstr_unpriv = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT,
429 },
430 {
431 "non-invalid fp arithmetic",
432 .insns = {
433 BPF_MOV64_IMM(BPF_REG_0, 0),
434 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
435 BPF_EXIT_INSN(),
436 },
437 .result = ACCEPT,
438 },
439 {
440 "invalid argument register",
441 .insns = {
442 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
443 BPF_FUNC_get_cgroup_classid),
444 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
445 BPF_FUNC_get_cgroup_classid),
446 BPF_EXIT_INSN(),
447 },
448 .errstr = "R1 !read_ok",
449 .result = REJECT,
450 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
451 },
452 {
453 "non-invalid argument register",
454 .insns = {
455 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
456 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
457 BPF_FUNC_get_cgroup_classid),
458 BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
459 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
460 BPF_FUNC_get_cgroup_classid),
461 BPF_EXIT_INSN(),
462 },
463 .result = ACCEPT,
464 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
465 },
466 {
467 "check valid spill/fill",
468 .insns = {
469 /* spill R1(ctx) into stack */
470 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
471 /* fill it back into R2 */
472 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
473 /* should be able to access R0 = *(R2 + 8) */
474 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
475 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
476 BPF_EXIT_INSN(),
477 },
478 .errstr_unpriv = "R0 leaks addr",
479 .result = ACCEPT,
480 .result_unpriv = REJECT,
481 },
482 {
483 "check valid spill/fill, skb mark",
484 .insns = {
485 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
486 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
487 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
488 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
489 offsetof(struct __sk_buff, mark)),
490 BPF_EXIT_INSN(),
491 },
492 .result = ACCEPT,
493 .result_unpriv = ACCEPT,
494 },
495 {
496 "check corrupted spill/fill",
497 .insns = {
498 /* spill R1(ctx) into stack */
499 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
500 /* mess up with R1 pointer on stack */
501 BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
502 /* fill back into R0 should fail */
503 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
504 BPF_EXIT_INSN(),
505 },
506 .errstr_unpriv = "attempt to corrupt spilled",
507 .errstr = "corrupted spill",
508 .result = REJECT,
509 },
510 {
511 "invalid src register in STX",
512 .insns = {
513 BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
514 BPF_EXIT_INSN(),
515 },
516 .errstr = "R15 is invalid",
517 .result = REJECT,
518 },
519 {
520 "invalid dst register in STX",
521 .insns = {
522 BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
523 BPF_EXIT_INSN(),
524 },
525 .errstr = "R14 is invalid",
526 .result = REJECT,
527 },
528 {
529 "invalid dst register in ST",
530 .insns = {
531 BPF_ST_MEM(BPF_B, 14, -1, -1),
532 BPF_EXIT_INSN(),
533 },
534 .errstr = "R14 is invalid",
535 .result = REJECT,
536 },
537 {
538 "invalid src register in LDX",
539 .insns = {
540 BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
541 BPF_EXIT_INSN(),
542 },
543 .errstr = "R12 is invalid",
544 .result = REJECT,
545 },
546 {
547 "invalid dst register in LDX",
548 .insns = {
549 BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
550 BPF_EXIT_INSN(),
551 },
552 .errstr = "R11 is invalid",
553 .result = REJECT,
554 },
555 {
556 "junk insn",
557 .insns = {
558 BPF_RAW_INSN(0, 0, 0, 0, 0),
559 BPF_EXIT_INSN(),
560 },
561 .errstr = "invalid BPF_LD_IMM",
562 .result = REJECT,
563 },
564 {
565 "junk insn2",
566 .insns = {
567 BPF_RAW_INSN(1, 0, 0, 0, 0),
568 BPF_EXIT_INSN(),
569 },
570 .errstr = "BPF_LDX uses reserved fields",
571 .result = REJECT,
572 },
573 {
574 "junk insn3",
575 .insns = {
576 BPF_RAW_INSN(-1, 0, 0, 0, 0),
577 BPF_EXIT_INSN(),
578 },
579 .errstr = "invalid BPF_ALU opcode f0",
580 .result = REJECT,
581 },
582 {
583 "junk insn4",
584 .insns = {
585 BPF_RAW_INSN(-1, -1, -1, -1, -1),
586 BPF_EXIT_INSN(),
587 },
588 .errstr = "invalid BPF_ALU opcode f0",
589 .result = REJECT,
590 },
591 {
592 "junk insn5",
593 .insns = {
594 BPF_RAW_INSN(0x7f, -1, -1, -1, -1),
595 BPF_EXIT_INSN(),
596 },
597 .errstr = "BPF_ALU uses reserved fields",
598 .result = REJECT,
599 },
600 {
601 "misaligned read from stack",
602 .insns = {
603 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
604 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
605 BPF_EXIT_INSN(),
606 },
607 .errstr = "misaligned stack access",
608 .result = REJECT,
609 },
610 {
611 "invalid map_fd for function call",
612 .insns = {
613 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
614 BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10),
615 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
616 BPF_LD_MAP_FD(BPF_REG_1, 0),
617 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
618 BPF_FUNC_map_delete_elem),
619 BPF_EXIT_INSN(),
620 },
621 .errstr = "fd 0 is not pointing to valid bpf_map",
622 .result = REJECT,
623 },
624 {
625 "don't check return value before access",
626 .insns = {
627 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
628 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
629 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
630 BPF_LD_MAP_FD(BPF_REG_1, 0),
631 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
632 BPF_FUNC_map_lookup_elem),
633 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
634 BPF_EXIT_INSN(),
635 },
636 .fixup_map1 = { 3 },
637 .errstr = "R0 invalid mem access 'map_value_or_null'",
638 .result = REJECT,
639 },
640 {
641 "access memory with incorrect alignment",
642 .insns = {
643 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
644 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
646 BPF_LD_MAP_FD(BPF_REG_1, 0),
647 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
648 BPF_FUNC_map_lookup_elem),
649 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
650 BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0),
651 BPF_EXIT_INSN(),
652 },
653 .fixup_map1 = { 3 },
654 .errstr = "misaligned value access",
655 .result = REJECT,
656 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
657 },
658 {
659 "sometimes access memory with incorrect alignment",
660 .insns = {
661 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
662 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
663 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
664 BPF_LD_MAP_FD(BPF_REG_1, 0),
665 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
666 BPF_FUNC_map_lookup_elem),
667 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
668 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
669 BPF_EXIT_INSN(),
670 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
671 BPF_EXIT_INSN(),
672 },
673 .fixup_map1 = { 3 },
674 .errstr = "R0 invalid mem access",
675 .errstr_unpriv = "R0 leaks addr",
676 .result = REJECT,
677 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
678 },
679 {
680 "jump test 1",
681 .insns = {
682 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
683 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -8),
684 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
685 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
686 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
687 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 1),
688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 1),
689 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 2),
690 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 1),
691 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 3),
692 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 1),
693 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 4),
694 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
695 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 5),
696 BPF_MOV64_IMM(BPF_REG_0, 0),
697 BPF_EXIT_INSN(),
698 },
699 .errstr_unpriv = "R1 pointer comparison",
700 .result_unpriv = REJECT,
701 .result = ACCEPT,
702 },
703 {
704 "jump test 2",
705 .insns = {
706 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
707 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
708 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
709 BPF_JMP_IMM(BPF_JA, 0, 0, 14),
710 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 2),
711 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
712 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
713 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 2),
714 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
715 BPF_JMP_IMM(BPF_JA, 0, 0, 8),
716 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 2),
717 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
718 BPF_JMP_IMM(BPF_JA, 0, 0, 5),
719 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 2),
720 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
721 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
722 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 1),
723 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
724 BPF_MOV64_IMM(BPF_REG_0, 0),
725 BPF_EXIT_INSN(),
726 },
727 .errstr_unpriv = "R1 pointer comparison",
728 .result_unpriv = REJECT,
729 .result = ACCEPT,
730 },
731 {
732 "jump test 3",
733 .insns = {
734 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
735 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
736 BPF_ST_MEM(BPF_DW, BPF_REG_2, -8, 0),
737 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
738 BPF_JMP_IMM(BPF_JA, 0, 0, 19),
739 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 3),
740 BPF_ST_MEM(BPF_DW, BPF_REG_2, -16, 0),
741 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
742 BPF_JMP_IMM(BPF_JA, 0, 0, 15),
743 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 2, 3),
744 BPF_ST_MEM(BPF_DW, BPF_REG_2, -32, 0),
745 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32),
746 BPF_JMP_IMM(BPF_JA, 0, 0, 11),
747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 3, 3),
748 BPF_ST_MEM(BPF_DW, BPF_REG_2, -40, 0),
749 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -40),
750 BPF_JMP_IMM(BPF_JA, 0, 0, 7),
751 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 4, 3),
752 BPF_ST_MEM(BPF_DW, BPF_REG_2, -48, 0),
753 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),
754 BPF_JMP_IMM(BPF_JA, 0, 0, 3),
755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 5, 0),
756 BPF_ST_MEM(BPF_DW, BPF_REG_2, -56, 0),
757 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -56),
758 BPF_LD_MAP_FD(BPF_REG_1, 0),
759 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
760 BPF_FUNC_map_delete_elem),
761 BPF_EXIT_INSN(),
762 },
763 .fixup_map1 = { 24 },
764 .errstr_unpriv = "R1 pointer comparison",
765 .result_unpriv = REJECT,
766 .result = ACCEPT,
767 },
768 {
769 "jump test 4",
770 .insns = {
771 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
772 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
774 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
775 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
776 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
777 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
778 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
779 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
780 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
781 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
782 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
784 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
785 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
786 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
788 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
789 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
790 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
791 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
792 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
793 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
794 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
796 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
797 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
798 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
799 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
801 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
802 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
803 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 1),
804 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 2),
805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 3),
806 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 4),
807 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
808 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
810 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
811 BPF_MOV64_IMM(BPF_REG_0, 0),
812 BPF_EXIT_INSN(),
813 },
814 .errstr_unpriv = "R1 pointer comparison",
815 .result_unpriv = REJECT,
816 .result = ACCEPT,
817 },
818 {
819 "jump test 5",
820 .insns = {
821 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
822 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
823 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
824 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
825 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
826 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
827 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
828 BPF_MOV64_IMM(BPF_REG_0, 0),
829 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
830 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
831 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
832 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
833 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
834 BPF_MOV64_IMM(BPF_REG_0, 0),
835 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
836 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
837 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
838 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
839 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
840 BPF_MOV64_IMM(BPF_REG_0, 0),
841 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
842 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
843 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
844 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
845 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
846 BPF_MOV64_IMM(BPF_REG_0, 0),
847 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
848 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, -8),
849 BPF_JMP_IMM(BPF_JA, 0, 0, 2),
850 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_2, -8),
851 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
852 BPF_MOV64_IMM(BPF_REG_0, 0),
853 BPF_EXIT_INSN(),
854 },
855 .errstr_unpriv = "R1 pointer comparison",
856 .result_unpriv = REJECT,
857 .result = ACCEPT,
858 },
859 {
860 "access skb fields ok",
861 .insns = {
862 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
863 offsetof(struct __sk_buff, len)),
864 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
865 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
866 offsetof(struct __sk_buff, mark)),
867 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
868 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
869 offsetof(struct __sk_buff, pkt_type)),
870 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
871 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
872 offsetof(struct __sk_buff, queue_mapping)),
873 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
874 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
875 offsetof(struct __sk_buff, protocol)),
876 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
877 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
878 offsetof(struct __sk_buff, vlan_present)),
879 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
880 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
881 offsetof(struct __sk_buff, vlan_tci)),
882 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
883 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
884 offsetof(struct __sk_buff, napi_id)),
885 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
886 BPF_EXIT_INSN(),
887 },
888 .result = ACCEPT,
889 },
890 {
891 "access skb fields bad1",
892 .insns = {
893 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
894 BPF_EXIT_INSN(),
895 },
896 .errstr = "invalid bpf_context access",
897 .result = REJECT,
898 },
899 {
900 "access skb fields bad2",
901 .insns = {
902 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
903 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
904 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
905 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
906 BPF_LD_MAP_FD(BPF_REG_1, 0),
907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
908 BPF_FUNC_map_lookup_elem),
909 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
910 BPF_EXIT_INSN(),
911 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
912 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
913 offsetof(struct __sk_buff, pkt_type)),
914 BPF_EXIT_INSN(),
915 },
916 .fixup_map1 = { 4 },
917 .errstr = "different pointers",
918 .errstr_unpriv = "R1 pointer comparison",
919 .result = REJECT,
920 },
921 {
922 "access skb fields bad3",
923 .insns = {
924 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
925 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
926 offsetof(struct __sk_buff, pkt_type)),
927 BPF_EXIT_INSN(),
928 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
929 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
930 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
931 BPF_LD_MAP_FD(BPF_REG_1, 0),
932 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
933 BPF_FUNC_map_lookup_elem),
934 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
935 BPF_EXIT_INSN(),
936 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
937 BPF_JMP_IMM(BPF_JA, 0, 0, -12),
938 },
939 .fixup_map1 = { 6 },
940 .errstr = "different pointers",
941 .errstr_unpriv = "R1 pointer comparison",
942 .result = REJECT,
943 },
944 {
945 "access skb fields bad4",
946 .insns = {
947 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
948 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
949 offsetof(struct __sk_buff, len)),
950 BPF_MOV64_IMM(BPF_REG_0, 0),
951 BPF_EXIT_INSN(),
952 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
953 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
954 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
955 BPF_LD_MAP_FD(BPF_REG_1, 0),
956 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
957 BPF_FUNC_map_lookup_elem),
958 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
959 BPF_EXIT_INSN(),
960 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
961 BPF_JMP_IMM(BPF_JA, 0, 0, -13),
962 },
963 .fixup_map1 = { 7 },
964 .errstr = "different pointers",
965 .errstr_unpriv = "R1 pointer comparison",
966 .result = REJECT,
967 },
968 {
969 "invalid access __sk_buff family",
970 .insns = {
971 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
972 offsetof(struct __sk_buff, family)),
973 BPF_EXIT_INSN(),
974 },
975 .errstr = "invalid bpf_context access",
976 .result = REJECT,
977 },
978 {
979 "invalid access __sk_buff remote_ip4",
980 .insns = {
981 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
982 offsetof(struct __sk_buff, remote_ip4)),
983 BPF_EXIT_INSN(),
984 },
985 .errstr = "invalid bpf_context access",
986 .result = REJECT,
987 },
988 {
989 "invalid access __sk_buff local_ip4",
990 .insns = {
991 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
992 offsetof(struct __sk_buff, local_ip4)),
993 BPF_EXIT_INSN(),
994 },
995 .errstr = "invalid bpf_context access",
996 .result = REJECT,
997 },
998 {
999 "invalid access __sk_buff remote_ip6",
1000 .insns = {
1001 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1002 offsetof(struct __sk_buff, remote_ip6)),
1003 BPF_EXIT_INSN(),
1004 },
1005 .errstr = "invalid bpf_context access",
1006 .result = REJECT,
1007 },
1008 {
1009 "invalid access __sk_buff local_ip6",
1010 .insns = {
1011 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1012 offsetof(struct __sk_buff, local_ip6)),
1013 BPF_EXIT_INSN(),
1014 },
1015 .errstr = "invalid bpf_context access",
1016 .result = REJECT,
1017 },
1018 {
1019 "invalid access __sk_buff remote_port",
1020 .insns = {
1021 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1022 offsetof(struct __sk_buff, remote_port)),
1023 BPF_EXIT_INSN(),
1024 },
1025 .errstr = "invalid bpf_context access",
1026 .result = REJECT,
1027 },
1028 {
1029 "invalid access __sk_buff remote_port",
1030 .insns = {
1031 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1032 offsetof(struct __sk_buff, local_port)),
1033 BPF_EXIT_INSN(),
1034 },
1035 .errstr = "invalid bpf_context access",
1036 .result = REJECT,
1037 },
1038 {
1039 "valid access __sk_buff family",
1040 .insns = {
1041 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1042 offsetof(struct __sk_buff, family)),
1043 BPF_EXIT_INSN(),
1044 },
1045 .result = ACCEPT,
1046 .prog_type = BPF_PROG_TYPE_SK_SKB,
1047 },
1048 {
1049 "valid access __sk_buff remote_ip4",
1050 .insns = {
1051 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1052 offsetof(struct __sk_buff, remote_ip4)),
1053 BPF_EXIT_INSN(),
1054 },
1055 .result = ACCEPT,
1056 .prog_type = BPF_PROG_TYPE_SK_SKB,
1057 },
1058 {
1059 "valid access __sk_buff local_ip4",
1060 .insns = {
1061 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1062 offsetof(struct __sk_buff, local_ip4)),
1063 BPF_EXIT_INSN(),
1064 },
1065 .result = ACCEPT,
1066 .prog_type = BPF_PROG_TYPE_SK_SKB,
1067 },
1068 {
1069 "valid access __sk_buff remote_ip6",
1070 .insns = {
1071 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1072 offsetof(struct __sk_buff, remote_ip6[0])),
1073 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1074 offsetof(struct __sk_buff, remote_ip6[1])),
1075 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1076 offsetof(struct __sk_buff, remote_ip6[2])),
1077 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1078 offsetof(struct __sk_buff, remote_ip6[3])),
1079 BPF_EXIT_INSN(),
1080 },
1081 .result = ACCEPT,
1082 .prog_type = BPF_PROG_TYPE_SK_SKB,
1083 },
1084 {
1085 "valid access __sk_buff local_ip6",
1086 .insns = {
1087 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1088 offsetof(struct __sk_buff, local_ip6[0])),
1089 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1090 offsetof(struct __sk_buff, local_ip6[1])),
1091 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1092 offsetof(struct __sk_buff, local_ip6[2])),
1093 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1094 offsetof(struct __sk_buff, local_ip6[3])),
1095 BPF_EXIT_INSN(),
1096 },
1097 .result = ACCEPT,
1098 .prog_type = BPF_PROG_TYPE_SK_SKB,
1099 },
1100 {
1101 "valid access __sk_buff remote_port",
1102 .insns = {
1103 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1104 offsetof(struct __sk_buff, remote_port)),
1105 BPF_EXIT_INSN(),
1106 },
1107 .result = ACCEPT,
1108 .prog_type = BPF_PROG_TYPE_SK_SKB,
1109 },
1110 {
1111 "valid access __sk_buff remote_port",
1112 .insns = {
1113 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1114 offsetof(struct __sk_buff, local_port)),
1115 BPF_EXIT_INSN(),
1116 },
1117 .result = ACCEPT,
1118 .prog_type = BPF_PROG_TYPE_SK_SKB,
1119 },
1120 {
1121 "invalid access of tc_classid for SK_SKB",
1122 .insns = {
1123 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1124 offsetof(struct __sk_buff, tc_classid)),
1125 BPF_EXIT_INSN(),
1126 },
1127 .result = REJECT,
1128 .prog_type = BPF_PROG_TYPE_SK_SKB,
1129 .errstr = "invalid bpf_context access",
1130 },
1131 {
1132 "invalid access of skb->mark for SK_SKB",
1133 .insns = {
1134 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1135 offsetof(struct __sk_buff, mark)),
1136 BPF_EXIT_INSN(),
1137 },
1138 .result = REJECT,
1139 .prog_type = BPF_PROG_TYPE_SK_SKB,
1140 .errstr = "invalid bpf_context access",
1141 },
1142 {
1143 "check skb->mark is not writeable by SK_SKB",
1144 .insns = {
1145 BPF_MOV64_IMM(BPF_REG_0, 0),
1146 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1147 offsetof(struct __sk_buff, mark)),
1148 BPF_EXIT_INSN(),
1149 },
1150 .result = REJECT,
1151 .prog_type = BPF_PROG_TYPE_SK_SKB,
1152 .errstr = "invalid bpf_context access",
1153 },
1154 {
1155 "check skb->tc_index is writeable by SK_SKB",
1156 .insns = {
1157 BPF_MOV64_IMM(BPF_REG_0, 0),
1158 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1159 offsetof(struct __sk_buff, tc_index)),
1160 BPF_EXIT_INSN(),
1161 },
1162 .result = ACCEPT,
1163 .prog_type = BPF_PROG_TYPE_SK_SKB,
1164 },
1165 {
1166 "check skb->priority is writeable by SK_SKB",
1167 .insns = {
1168 BPF_MOV64_IMM(BPF_REG_0, 0),
1169 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1170 offsetof(struct __sk_buff, priority)),
1171 BPF_EXIT_INSN(),
1172 },
1173 .result = ACCEPT,
1174 .prog_type = BPF_PROG_TYPE_SK_SKB,
1175 },
1176 {
1177 "direct packet read for SK_SKB",
1178 .insns = {
1179 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1180 offsetof(struct __sk_buff, data)),
1181 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1182 offsetof(struct __sk_buff, data_end)),
1183 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1184 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1185 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1186 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
1187 BPF_MOV64_IMM(BPF_REG_0, 0),
1188 BPF_EXIT_INSN(),
1189 },
1190 .result = ACCEPT,
1191 .prog_type = BPF_PROG_TYPE_SK_SKB,
1192 },
1193 {
1194 "direct packet write for SK_SKB",
1195 .insns = {
1196 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1197 offsetof(struct __sk_buff, data)),
1198 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1199 offsetof(struct __sk_buff, data_end)),
1200 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1201 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1202 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
1203 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
1204 BPF_MOV64_IMM(BPF_REG_0, 0),
1205 BPF_EXIT_INSN(),
1206 },
1207 .result = ACCEPT,
1208 .prog_type = BPF_PROG_TYPE_SK_SKB,
1209 },
1210 {
1211 "overlapping checks for direct packet access SK_SKB",
1212 .insns = {
1213 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
1214 offsetof(struct __sk_buff, data)),
1215 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
1216 offsetof(struct __sk_buff, data_end)),
1217 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
1218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
1219 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
1220 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
1221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
1222 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
1223 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
1224 BPF_MOV64_IMM(BPF_REG_0, 0),
1225 BPF_EXIT_INSN(),
1226 },
1227 .result = ACCEPT,
1228 .prog_type = BPF_PROG_TYPE_SK_SKB,
1229 },
1230 {
1231 "check skb->mark is not writeable by sockets",
1232 .insns = {
1233 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1234 offsetof(struct __sk_buff, mark)),
1235 BPF_EXIT_INSN(),
1236 },
1237 .errstr = "invalid bpf_context access",
1238 .errstr_unpriv = "R1 leaks addr",
1239 .result = REJECT,
1240 },
1241 {
1242 "check skb->tc_index is not writeable by sockets",
1243 .insns = {
1244 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1245 offsetof(struct __sk_buff, tc_index)),
1246 BPF_EXIT_INSN(),
1247 },
1248 .errstr = "invalid bpf_context access",
1249 .errstr_unpriv = "R1 leaks addr",
1250 .result = REJECT,
1251 },
1252 {
1253 "check cb access: byte",
1254 .insns = {
1255 BPF_MOV64_IMM(BPF_REG_0, 0),
1256 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1257 offsetof(struct __sk_buff, cb[0])),
1258 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1259 offsetof(struct __sk_buff, cb[0]) + 1),
1260 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1261 offsetof(struct __sk_buff, cb[0]) + 2),
1262 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1263 offsetof(struct __sk_buff, cb[0]) + 3),
1264 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1265 offsetof(struct __sk_buff, cb[1])),
1266 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1267 offsetof(struct __sk_buff, cb[1]) + 1),
1268 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1269 offsetof(struct __sk_buff, cb[1]) + 2),
1270 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1271 offsetof(struct __sk_buff, cb[1]) + 3),
1272 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1273 offsetof(struct __sk_buff, cb[2])),
1274 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1275 offsetof(struct __sk_buff, cb[2]) + 1),
1276 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1277 offsetof(struct __sk_buff, cb[2]) + 2),
1278 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1279 offsetof(struct __sk_buff, cb[2]) + 3),
1280 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1281 offsetof(struct __sk_buff, cb[3])),
1282 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1283 offsetof(struct __sk_buff, cb[3]) + 1),
1284 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1285 offsetof(struct __sk_buff, cb[3]) + 2),
1286 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1287 offsetof(struct __sk_buff, cb[3]) + 3),
1288 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1289 offsetof(struct __sk_buff, cb[4])),
1290 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1291 offsetof(struct __sk_buff, cb[4]) + 1),
1292 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1293 offsetof(struct __sk_buff, cb[4]) + 2),
1294 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1295 offsetof(struct __sk_buff, cb[4]) + 3),
1296 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1297 offsetof(struct __sk_buff, cb[0])),
1298 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1299 offsetof(struct __sk_buff, cb[0]) + 1),
1300 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1301 offsetof(struct __sk_buff, cb[0]) + 2),
1302 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1303 offsetof(struct __sk_buff, cb[0]) + 3),
1304 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1305 offsetof(struct __sk_buff, cb[1])),
1306 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1307 offsetof(struct __sk_buff, cb[1]) + 1),
1308 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1309 offsetof(struct __sk_buff, cb[1]) + 2),
1310 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1311 offsetof(struct __sk_buff, cb[1]) + 3),
1312 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1313 offsetof(struct __sk_buff, cb[2])),
1314 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1315 offsetof(struct __sk_buff, cb[2]) + 1),
1316 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1317 offsetof(struct __sk_buff, cb[2]) + 2),
1318 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1319 offsetof(struct __sk_buff, cb[2]) + 3),
1320 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1321 offsetof(struct __sk_buff, cb[3])),
1322 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1323 offsetof(struct __sk_buff, cb[3]) + 1),
1324 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1325 offsetof(struct __sk_buff, cb[3]) + 2),
1326 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1327 offsetof(struct __sk_buff, cb[3]) + 3),
1328 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1329 offsetof(struct __sk_buff, cb[4])),
1330 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1331 offsetof(struct __sk_buff, cb[4]) + 1),
1332 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1333 offsetof(struct __sk_buff, cb[4]) + 2),
1334 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1335 offsetof(struct __sk_buff, cb[4]) + 3),
1336 BPF_EXIT_INSN(),
1337 },
1338 .result = ACCEPT,
1339 },
1340 {
1341 "__sk_buff->hash, offset 0, byte store not permitted",
1342 .insns = {
1343 BPF_MOV64_IMM(BPF_REG_0, 0),
1344 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1345 offsetof(struct __sk_buff, hash)),
1346 BPF_EXIT_INSN(),
1347 },
1348 .errstr = "invalid bpf_context access",
1349 .result = REJECT,
1350 },
1351 {
1352 "__sk_buff->tc_index, offset 3, byte store not permitted",
1353 .insns = {
1354 BPF_MOV64_IMM(BPF_REG_0, 0),
1355 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1356 offsetof(struct __sk_buff, tc_index) + 3),
1357 BPF_EXIT_INSN(),
1358 },
1359 .errstr = "invalid bpf_context access",
1360 .result = REJECT,
1361 },
1362 {
1363 "check skb->hash byte load permitted",
1364 .insns = {
1365 BPF_MOV64_IMM(BPF_REG_0, 0),
1366 #if __BYTE_ORDER == __LITTLE_ENDIAN
1367 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1368 offsetof(struct __sk_buff, hash)),
1369 #else
1370 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1371 offsetof(struct __sk_buff, hash) + 3),
1372 #endif
1373 BPF_EXIT_INSN(),
1374 },
1375 .result = ACCEPT,
1376 },
1377 {
1378 "check skb->hash byte load not permitted 1",
1379 .insns = {
1380 BPF_MOV64_IMM(BPF_REG_0, 0),
1381 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1382 offsetof(struct __sk_buff, hash) + 1),
1383 BPF_EXIT_INSN(),
1384 },
1385 .errstr = "invalid bpf_context access",
1386 .result = REJECT,
1387 },
1388 {
1389 "check skb->hash byte load not permitted 2",
1390 .insns = {
1391 BPF_MOV64_IMM(BPF_REG_0, 0),
1392 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1393 offsetof(struct __sk_buff, hash) + 2),
1394 BPF_EXIT_INSN(),
1395 },
1396 .errstr = "invalid bpf_context access",
1397 .result = REJECT,
1398 },
1399 {
1400 "check skb->hash byte load not permitted 3",
1401 .insns = {
1402 BPF_MOV64_IMM(BPF_REG_0, 0),
1403 #if __BYTE_ORDER == __LITTLE_ENDIAN
1404 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1405 offsetof(struct __sk_buff, hash) + 3),
1406 #else
1407 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
1408 offsetof(struct __sk_buff, hash)),
1409 #endif
1410 BPF_EXIT_INSN(),
1411 },
1412 .errstr = "invalid bpf_context access",
1413 .result = REJECT,
1414 },
1415 {
1416 "check cb access: byte, wrong type",
1417 .insns = {
1418 BPF_MOV64_IMM(BPF_REG_0, 0),
1419 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
1420 offsetof(struct __sk_buff, cb[0])),
1421 BPF_EXIT_INSN(),
1422 },
1423 .errstr = "invalid bpf_context access",
1424 .result = REJECT,
1425 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1426 },
1427 {
1428 "check cb access: half",
1429 .insns = {
1430 BPF_MOV64_IMM(BPF_REG_0, 0),
1431 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1432 offsetof(struct __sk_buff, cb[0])),
1433 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1434 offsetof(struct __sk_buff, cb[0]) + 2),
1435 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1436 offsetof(struct __sk_buff, cb[1])),
1437 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1438 offsetof(struct __sk_buff, cb[1]) + 2),
1439 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1440 offsetof(struct __sk_buff, cb[2])),
1441 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1442 offsetof(struct __sk_buff, cb[2]) + 2),
1443 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1444 offsetof(struct __sk_buff, cb[3])),
1445 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1446 offsetof(struct __sk_buff, cb[3]) + 2),
1447 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1448 offsetof(struct __sk_buff, cb[4])),
1449 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1450 offsetof(struct __sk_buff, cb[4]) + 2),
1451 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1452 offsetof(struct __sk_buff, cb[0])),
1453 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1454 offsetof(struct __sk_buff, cb[0]) + 2),
1455 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1456 offsetof(struct __sk_buff, cb[1])),
1457 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1458 offsetof(struct __sk_buff, cb[1]) + 2),
1459 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1460 offsetof(struct __sk_buff, cb[2])),
1461 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1462 offsetof(struct __sk_buff, cb[2]) + 2),
1463 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1464 offsetof(struct __sk_buff, cb[3])),
1465 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1466 offsetof(struct __sk_buff, cb[3]) + 2),
1467 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1468 offsetof(struct __sk_buff, cb[4])),
1469 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1470 offsetof(struct __sk_buff, cb[4]) + 2),
1471 BPF_EXIT_INSN(),
1472 },
1473 .result = ACCEPT,
1474 },
1475 {
1476 "check cb access: half, unaligned",
1477 .insns = {
1478 BPF_MOV64_IMM(BPF_REG_0, 0),
1479 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1480 offsetof(struct __sk_buff, cb[0]) + 1),
1481 BPF_EXIT_INSN(),
1482 },
1483 .errstr = "misaligned context access",
1484 .result = REJECT,
1485 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1486 },
1487 {
1488 "check __sk_buff->hash, offset 0, half store not permitted",
1489 .insns = {
1490 BPF_MOV64_IMM(BPF_REG_0, 0),
1491 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1492 offsetof(struct __sk_buff, hash)),
1493 BPF_EXIT_INSN(),
1494 },
1495 .errstr = "invalid bpf_context access",
1496 .result = REJECT,
1497 },
1498 {
1499 "check __sk_buff->tc_index, offset 2, half store not permitted",
1500 .insns = {
1501 BPF_MOV64_IMM(BPF_REG_0, 0),
1502 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1503 offsetof(struct __sk_buff, tc_index) + 2),
1504 BPF_EXIT_INSN(),
1505 },
1506 .errstr = "invalid bpf_context access",
1507 .result = REJECT,
1508 },
1509 {
1510 "check skb->hash half load permitted",
1511 .insns = {
1512 BPF_MOV64_IMM(BPF_REG_0, 0),
1513 #if __BYTE_ORDER == __LITTLE_ENDIAN
1514 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1515 offsetof(struct __sk_buff, hash)),
1516 #else
1517 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1518 offsetof(struct __sk_buff, hash) + 2),
1519 #endif
1520 BPF_EXIT_INSN(),
1521 },
1522 .result = ACCEPT,
1523 },
1524 {
1525 "check skb->hash half load not permitted",
1526 .insns = {
1527 BPF_MOV64_IMM(BPF_REG_0, 0),
1528 #if __BYTE_ORDER == __LITTLE_ENDIAN
1529 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1530 offsetof(struct __sk_buff, hash) + 2),
1531 #else
1532 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
1533 offsetof(struct __sk_buff, hash)),
1534 #endif
1535 BPF_EXIT_INSN(),
1536 },
1537 .errstr = "invalid bpf_context access",
1538 .result = REJECT,
1539 },
1540 {
1541 "check cb access: half, wrong type",
1542 .insns = {
1543 BPF_MOV64_IMM(BPF_REG_0, 0),
1544 BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
1545 offsetof(struct __sk_buff, cb[0])),
1546 BPF_EXIT_INSN(),
1547 },
1548 .errstr = "invalid bpf_context access",
1549 .result = REJECT,
1550 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1551 },
1552 {
1553 "check cb access: word",
1554 .insns = {
1555 BPF_MOV64_IMM(BPF_REG_0, 0),
1556 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1557 offsetof(struct __sk_buff, cb[0])),
1558 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1559 offsetof(struct __sk_buff, cb[1])),
1560 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1561 offsetof(struct __sk_buff, cb[2])),
1562 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1563 offsetof(struct __sk_buff, cb[3])),
1564 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1565 offsetof(struct __sk_buff, cb[4])),
1566 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1567 offsetof(struct __sk_buff, cb[0])),
1568 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1569 offsetof(struct __sk_buff, cb[1])),
1570 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1571 offsetof(struct __sk_buff, cb[2])),
1572 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1573 offsetof(struct __sk_buff, cb[3])),
1574 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1575 offsetof(struct __sk_buff, cb[4])),
1576 BPF_EXIT_INSN(),
1577 },
1578 .result = ACCEPT,
1579 },
1580 {
1581 "check cb access: word, unaligned 1",
1582 .insns = {
1583 BPF_MOV64_IMM(BPF_REG_0, 0),
1584 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1585 offsetof(struct __sk_buff, cb[0]) + 2),
1586 BPF_EXIT_INSN(),
1587 },
1588 .errstr = "misaligned context access",
1589 .result = REJECT,
1590 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1591 },
1592 {
1593 "check cb access: word, unaligned 2",
1594 .insns = {
1595 BPF_MOV64_IMM(BPF_REG_0, 0),
1596 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1597 offsetof(struct __sk_buff, cb[4]) + 1),
1598 BPF_EXIT_INSN(),
1599 },
1600 .errstr = "misaligned context access",
1601 .result = REJECT,
1602 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1603 },
1604 {
1605 "check cb access: word, unaligned 3",
1606 .insns = {
1607 BPF_MOV64_IMM(BPF_REG_0, 0),
1608 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1609 offsetof(struct __sk_buff, cb[4]) + 2),
1610 BPF_EXIT_INSN(),
1611 },
1612 .errstr = "misaligned context access",
1613 .result = REJECT,
1614 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1615 },
1616 {
1617 "check cb access: word, unaligned 4",
1618 .insns = {
1619 BPF_MOV64_IMM(BPF_REG_0, 0),
1620 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1621 offsetof(struct __sk_buff, cb[4]) + 3),
1622 BPF_EXIT_INSN(),
1623 },
1624 .errstr = "misaligned context access",
1625 .result = REJECT,
1626 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1627 },
1628 {
1629 "check cb access: double",
1630 .insns = {
1631 BPF_MOV64_IMM(BPF_REG_0, 0),
1632 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1633 offsetof(struct __sk_buff, cb[0])),
1634 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1635 offsetof(struct __sk_buff, cb[2])),
1636 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1637 offsetof(struct __sk_buff, cb[0])),
1638 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1639 offsetof(struct __sk_buff, cb[2])),
1640 BPF_EXIT_INSN(),
1641 },
1642 .result = ACCEPT,
1643 },
1644 {
1645 "check cb access: double, unaligned 1",
1646 .insns = {
1647 BPF_MOV64_IMM(BPF_REG_0, 0),
1648 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1649 offsetof(struct __sk_buff, cb[1])),
1650 BPF_EXIT_INSN(),
1651 },
1652 .errstr = "misaligned context access",
1653 .result = REJECT,
1654 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1655 },
1656 {
1657 "check cb access: double, unaligned 2",
1658 .insns = {
1659 BPF_MOV64_IMM(BPF_REG_0, 0),
1660 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1661 offsetof(struct __sk_buff, cb[3])),
1662 BPF_EXIT_INSN(),
1663 },
1664 .errstr = "misaligned context access",
1665 .result = REJECT,
1666 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1667 },
1668 {
1669 "check cb access: double, oob 1",
1670 .insns = {
1671 BPF_MOV64_IMM(BPF_REG_0, 0),
1672 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1673 offsetof(struct __sk_buff, cb[4])),
1674 BPF_EXIT_INSN(),
1675 },
1676 .errstr = "invalid bpf_context access",
1677 .result = REJECT,
1678 },
1679 {
1680 "check cb access: double, oob 2",
1681 .insns = {
1682 BPF_MOV64_IMM(BPF_REG_0, 0),
1683 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1684 offsetof(struct __sk_buff, cb[4])),
1685 BPF_EXIT_INSN(),
1686 },
1687 .errstr = "invalid bpf_context access",
1688 .result = REJECT,
1689 },
1690 {
1691 "check __sk_buff->ifindex dw store not permitted",
1692 .insns = {
1693 BPF_MOV64_IMM(BPF_REG_0, 0),
1694 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1695 offsetof(struct __sk_buff, ifindex)),
1696 BPF_EXIT_INSN(),
1697 },
1698 .errstr = "invalid bpf_context access",
1699 .result = REJECT,
1700 },
1701 {
1702 "check __sk_buff->ifindex dw load not permitted",
1703 .insns = {
1704 BPF_MOV64_IMM(BPF_REG_0, 0),
1705 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
1706 offsetof(struct __sk_buff, ifindex)),
1707 BPF_EXIT_INSN(),
1708 },
1709 .errstr = "invalid bpf_context access",
1710 .result = REJECT,
1711 },
1712 {
1713 "check cb access: double, wrong type",
1714 .insns = {
1715 BPF_MOV64_IMM(BPF_REG_0, 0),
1716 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
1717 offsetof(struct __sk_buff, cb[0])),
1718 BPF_EXIT_INSN(),
1719 },
1720 .errstr = "invalid bpf_context access",
1721 .result = REJECT,
1722 .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
1723 },
1724 {
1725 "check out of range skb->cb access",
1726 .insns = {
1727 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1728 offsetof(struct __sk_buff, cb[0]) + 256),
1729 BPF_EXIT_INSN(),
1730 },
1731 .errstr = "invalid bpf_context access",
1732 .errstr_unpriv = "",
1733 .result = REJECT,
1734 .prog_type = BPF_PROG_TYPE_SCHED_ACT,
1735 },
1736 {
1737 "write skb fields from socket prog",
1738 .insns = {
1739 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1740 offsetof(struct __sk_buff, cb[4])),
1741 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1742 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1743 offsetof(struct __sk_buff, mark)),
1744 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1745 offsetof(struct __sk_buff, tc_index)),
1746 BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
1747 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1748 offsetof(struct __sk_buff, cb[0])),
1749 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
1750 offsetof(struct __sk_buff, cb[2])),
1751 BPF_EXIT_INSN(),
1752 },
1753 .result = ACCEPT,
1754 .errstr_unpriv = "R1 leaks addr",
1755 .result_unpriv = REJECT,
1756 },
1757 {
1758 "write skb fields from tc_cls_act prog",
1759 .insns = {
1760 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1761 offsetof(struct __sk_buff, cb[0])),
1762 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1763 offsetof(struct __sk_buff, mark)),
1764 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
1765 offsetof(struct __sk_buff, tc_index)),
1766 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1767 offsetof(struct __sk_buff, tc_index)),
1768 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
1769 offsetof(struct __sk_buff, cb[3])),
1770 BPF_EXIT_INSN(),
1771 },
1772 .errstr_unpriv = "",
1773 .result_unpriv = REJECT,
1774 .result = ACCEPT,
1775 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
1776 },
1777 {
1778 "PTR_TO_STACK store/load",
1779 .insns = {
1780 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1781 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1782 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1783 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1784 BPF_EXIT_INSN(),
1785 },
1786 .result = ACCEPT,
1787 },
1788 {
1789 "PTR_TO_STACK store/load - bad alignment on off",
1790 .insns = {
1791 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1792 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1793 BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
1794 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
1795 BPF_EXIT_INSN(),
1796 },
1797 .result = REJECT,
1798 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1799 },
1800 {
1801 "PTR_TO_STACK store/load - bad alignment on reg",
1802 .insns = {
1803 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1804 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
1805 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1806 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1807 BPF_EXIT_INSN(),
1808 },
1809 .result = REJECT,
1810 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1811 },
1812 {
1813 "PTR_TO_STACK store/load - out of bounds low",
1814 .insns = {
1815 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1816 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
1817 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1818 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1819 BPF_EXIT_INSN(),
1820 },
1821 .result = REJECT,
1822 .errstr = "invalid stack off=-79992 size=8",
1823 },
1824 {
1825 "PTR_TO_STACK store/load - out of bounds high",
1826 .insns = {
1827 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1828 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1829 BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
1830 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
1831 BPF_EXIT_INSN(),
1832 },
1833 .result = REJECT,
1834 .errstr = "invalid stack off=0 size=8",
1835 },
1836 {
1837 "unpriv: return pointer",
1838 .insns = {
1839 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1840 BPF_EXIT_INSN(),
1841 },
1842 .result = ACCEPT,
1843 .result_unpriv = REJECT,
1844 .errstr_unpriv = "R0 leaks addr",
1845 },
1846 {
1847 "unpriv: add const to pointer",
1848 .insns = {
1849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
1850 BPF_MOV64_IMM(BPF_REG_0, 0),
1851 BPF_EXIT_INSN(),
1852 },
1853 .result = ACCEPT,
1854 },
1855 {
1856 "unpriv: add pointer to pointer",
1857 .insns = {
1858 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
1859 BPF_MOV64_IMM(BPF_REG_0, 0),
1860 BPF_EXIT_INSN(),
1861 },
1862 .result = ACCEPT,
1863 .result_unpriv = REJECT,
1864 .errstr_unpriv = "R1 pointer += pointer",
1865 },
1866 {
1867 "unpriv: neg pointer",
1868 .insns = {
1869 BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
1870 BPF_MOV64_IMM(BPF_REG_0, 0),
1871 BPF_EXIT_INSN(),
1872 },
1873 .result = ACCEPT,
1874 .result_unpriv = REJECT,
1875 .errstr_unpriv = "R1 pointer arithmetic",
1876 },
1877 {
1878 "unpriv: cmp pointer with const",
1879 .insns = {
1880 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
1881 BPF_MOV64_IMM(BPF_REG_0, 0),
1882 BPF_EXIT_INSN(),
1883 },
1884 .result = ACCEPT,
1885 .result_unpriv = REJECT,
1886 .errstr_unpriv = "R1 pointer comparison",
1887 },
1888 {
1889 "unpriv: cmp pointer with pointer",
1890 .insns = {
1891 BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
1892 BPF_MOV64_IMM(BPF_REG_0, 0),
1893 BPF_EXIT_INSN(),
1894 },
1895 .result = ACCEPT,
1896 .result_unpriv = REJECT,
1897 .errstr_unpriv = "R10 pointer comparison",
1898 },
1899 {
1900 "unpriv: check that printk is disallowed",
1901 .insns = {
1902 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1903 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
1904 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
1905 BPF_MOV64_IMM(BPF_REG_2, 8),
1906 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
1907 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1908 BPF_FUNC_trace_printk),
1909 BPF_MOV64_IMM(BPF_REG_0, 0),
1910 BPF_EXIT_INSN(),
1911 },
1912 .errstr_unpriv = "unknown func bpf_trace_printk#6",
1913 .result_unpriv = REJECT,
1914 .result = ACCEPT,
1915 },
1916 {
1917 "unpriv: pass pointer to helper function",
1918 .insns = {
1919 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1922 BPF_LD_MAP_FD(BPF_REG_1, 0),
1923 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
1924 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
1925 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1926 BPF_FUNC_map_update_elem),
1927 BPF_MOV64_IMM(BPF_REG_0, 0),
1928 BPF_EXIT_INSN(),
1929 },
1930 .fixup_map1 = { 3 },
1931 .errstr_unpriv = "R4 leaks addr",
1932 .result_unpriv = REJECT,
1933 .result = ACCEPT,
1934 },
1935 {
1936 "unpriv: indirectly pass pointer on stack to helper function",
1937 .insns = {
1938 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1939 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
1940 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
1941 BPF_LD_MAP_FD(BPF_REG_1, 0),
1942 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
1943 BPF_FUNC_map_lookup_elem),
1944 BPF_MOV64_IMM(BPF_REG_0, 0),
1945 BPF_EXIT_INSN(),
1946 },
1947 .fixup_map1 = { 3 },
1948 .errstr = "invalid indirect read from stack off -8+0 size 8",
1949 .result = REJECT,
1950 },
1951 {
1952 "unpriv: mangle pointer on stack 1",
1953 .insns = {
1954 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1955 BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
1956 BPF_MOV64_IMM(BPF_REG_0, 0),
1957 BPF_EXIT_INSN(),
1958 },
1959 .errstr_unpriv = "attempt to corrupt spilled",
1960 .result_unpriv = REJECT,
1961 .result = ACCEPT,
1962 },
1963 {
1964 "unpriv: mangle pointer on stack 2",
1965 .insns = {
1966 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1967 BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
1968 BPF_MOV64_IMM(BPF_REG_0, 0),
1969 BPF_EXIT_INSN(),
1970 },
1971 .errstr_unpriv = "attempt to corrupt spilled",
1972 .result_unpriv = REJECT,
1973 .result = ACCEPT,
1974 },
1975 {
1976 "unpriv: read pointer from stack in small chunks",
1977 .insns = {
1978 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
1979 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
1980 BPF_MOV64_IMM(BPF_REG_0, 0),
1981 BPF_EXIT_INSN(),
1982 },
1983 .errstr = "invalid size",
1984 .result = REJECT,
1985 },
1986 {
1987 "unpriv: write pointer into ctx",
1988 .insns = {
1989 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
1990 BPF_MOV64_IMM(BPF_REG_0, 0),
1991 BPF_EXIT_INSN(),
1992 },
1993 .errstr_unpriv = "R1 leaks addr",
1994 .result_unpriv = REJECT,
1995 .errstr = "invalid bpf_context access",
1996 .result = REJECT,
1997 },
1998 {
1999 "unpriv: spill/fill of ctx",
2000 .insns = {
2001 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2003 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2004 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2005 BPF_MOV64_IMM(BPF_REG_0, 0),
2006 BPF_EXIT_INSN(),
2007 },
2008 .result = ACCEPT,
2009 },
2010 {
2011 "unpriv: spill/fill of ctx 2",
2012 .insns = {
2013 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2015 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2016 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2018 BPF_FUNC_get_hash_recalc),
2019 BPF_EXIT_INSN(),
2020 },
2021 .result = ACCEPT,
2022 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2023 },
2024 {
2025 "unpriv: spill/fill of ctx 3",
2026 .insns = {
2027 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2028 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2029 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2030 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2031 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2033 BPF_FUNC_get_hash_recalc),
2034 BPF_EXIT_INSN(),
2035 },
2036 .result = REJECT,
2037 .errstr = "R1 type=fp expected=ctx",
2038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2039 },
2040 {
2041 "unpriv: spill/fill of ctx 4",
2042 .insns = {
2043 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2044 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2045 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2046 BPF_MOV64_IMM(BPF_REG_0, 1),
2047 BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
2048 BPF_REG_0, -8, 0),
2049 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2050 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2051 BPF_FUNC_get_hash_recalc),
2052 BPF_EXIT_INSN(),
2053 },
2054 .result = REJECT,
2055 .errstr = "R1 type=inv expected=ctx",
2056 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2057 },
2058 {
2059 "unpriv: spill/fill of different pointers stx",
2060 .insns = {
2061 BPF_MOV64_IMM(BPF_REG_3, 42),
2062 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2063 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2064 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2065 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2066 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
2067 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2068 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2069 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2070 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2071 BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
2072 offsetof(struct __sk_buff, mark)),
2073 BPF_MOV64_IMM(BPF_REG_0, 0),
2074 BPF_EXIT_INSN(),
2075 },
2076 .result = REJECT,
2077 .errstr = "same insn cannot be used with different pointers",
2078 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2079 },
2080 {
2081 "unpriv: spill/fill of different pointers ldx",
2082 .insns = {
2083 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2084 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2085 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
2086 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2087 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
2088 -(__s32)offsetof(struct bpf_perf_event_data,
2089 sample_period) - 8),
2090 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
2091 BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
2092 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2093 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
2094 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
2095 offsetof(struct bpf_perf_event_data,
2096 sample_period)),
2097 BPF_MOV64_IMM(BPF_REG_0, 0),
2098 BPF_EXIT_INSN(),
2099 },
2100 .result = REJECT,
2101 .errstr = "same insn cannot be used with different pointers",
2102 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
2103 },
2104 {
2105 "unpriv: write pointer into map elem value",
2106 .insns = {
2107 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
2108 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2110 BPF_LD_MAP_FD(BPF_REG_1, 0),
2111 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2112 BPF_FUNC_map_lookup_elem),
2113 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
2114 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
2115 BPF_EXIT_INSN(),
2116 },
2117 .fixup_map1 = { 3 },
2118 .errstr_unpriv = "R0 leaks addr",
2119 .result_unpriv = REJECT,
2120 .result = ACCEPT,
2121 },
2122 {
2123 "unpriv: partial copy of pointer",
2124 .insns = {
2125 BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
2126 BPF_MOV64_IMM(BPF_REG_0, 0),
2127 BPF_EXIT_INSN(),
2128 },
2129 .errstr_unpriv = "R10 partial copy",
2130 .result_unpriv = REJECT,
2131 .result = ACCEPT,
2132 },
2133 {
2134 "unpriv: pass pointer to tail_call",
2135 .insns = {
2136 BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
2137 BPF_LD_MAP_FD(BPF_REG_2, 0),
2138 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2139 BPF_FUNC_tail_call),
2140 BPF_MOV64_IMM(BPF_REG_0, 0),
2141 BPF_EXIT_INSN(),
2142 },
2143 .fixup_prog = { 1 },
2144 .errstr_unpriv = "R3 leaks addr into helper",
2145 .result_unpriv = REJECT,
2146 .result = ACCEPT,
2147 },
2148 {
2149 "unpriv: cmp map pointer with zero",
2150 .insns = {
2151 BPF_MOV64_IMM(BPF_REG_1, 0),
2152 BPF_LD_MAP_FD(BPF_REG_1, 0),
2153 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
2154 BPF_MOV64_IMM(BPF_REG_0, 0),
2155 BPF_EXIT_INSN(),
2156 },
2157 .fixup_map1 = { 1 },
2158 .errstr_unpriv = "R1 pointer comparison",
2159 .result_unpriv = REJECT,
2160 .result = ACCEPT,
2161 },
2162 {
2163 "unpriv: write into frame pointer",
2164 .insns = {
2165 BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
2166 BPF_MOV64_IMM(BPF_REG_0, 0),
2167 BPF_EXIT_INSN(),
2168 },
2169 .errstr = "frame pointer is read only",
2170 .result = REJECT,
2171 },
2172 {
2173 "unpriv: spill/fill frame pointer",
2174 .insns = {
2175 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2176 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2177 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
2178 BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
2179 BPF_MOV64_IMM(BPF_REG_0, 0),
2180 BPF_EXIT_INSN(),
2181 },
2182 .errstr = "frame pointer is read only",
2183 .result = REJECT,
2184 },
2185 {
2186 "unpriv: cmp of frame pointer",
2187 .insns = {
2188 BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
2189 BPF_MOV64_IMM(BPF_REG_0, 0),
2190 BPF_EXIT_INSN(),
2191 },
2192 .errstr_unpriv = "R10 pointer comparison",
2193 .result_unpriv = REJECT,
2194 .result = ACCEPT,
2195 },
2196 {
2197 "unpriv: adding of fp",
2198 .insns = {
2199 BPF_MOV64_IMM(BPF_REG_0, 0),
2200 BPF_MOV64_IMM(BPF_REG_1, 0),
2201 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
2202 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
2203 BPF_EXIT_INSN(),
2204 },
2205 .result = ACCEPT,
2206 },
2207 {
2208 "unpriv: cmp of stack pointer",
2209 .insns = {
2210 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
2211 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
2212 BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
2213 BPF_MOV64_IMM(BPF_REG_0, 0),
2214 BPF_EXIT_INSN(),
2215 },
2216 .errstr_unpriv = "R2 pointer comparison",
2217 .result_unpriv = REJECT,
2218 .result = ACCEPT,
2219 },
2220 {
2221 "stack pointer arithmetic",
2222 .insns = {
2223 BPF_MOV64_IMM(BPF_REG_1, 4),
2224 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
2225 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
2226 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10),
2228 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2229 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1),
2230 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2231 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
2232 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2233 BPF_ST_MEM(0, BPF_REG_2, 4, 0),
2234 BPF_MOV64_IMM(BPF_REG_0, 0),
2235 BPF_EXIT_INSN(),
2236 },
2237 .result = ACCEPT,
2238 },
2239 {
2240 "raw_stack: no skb_load_bytes",
2241 .insns = {
2242 BPF_MOV64_IMM(BPF_REG_2, 4),
2243 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2245 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2246 BPF_MOV64_IMM(BPF_REG_4, 8),
2247 /* Call to skb_load_bytes() omitted. */
2248 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2249 BPF_EXIT_INSN(),
2250 },
2251 .result = REJECT,
2252 .errstr = "invalid read from stack off -8+0 size 8",
2253 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2254 },
2255 {
2256 "raw_stack: skb_load_bytes, negative len",
2257 .insns = {
2258 BPF_MOV64_IMM(BPF_REG_2, 4),
2259 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2260 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2261 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2262 BPF_MOV64_IMM(BPF_REG_4, -8),
2263 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2264 BPF_FUNC_skb_load_bytes),
2265 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2266 BPF_EXIT_INSN(),
2267 },
2268 .result = REJECT,
2269 .errstr = "R4 min value is negative",
2270 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2271 },
2272 {
2273 "raw_stack: skb_load_bytes, negative len 2",
2274 .insns = {
2275 BPF_MOV64_IMM(BPF_REG_2, 4),
2276 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2277 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2278 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2279 BPF_MOV64_IMM(BPF_REG_4, ~0),
2280 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2281 BPF_FUNC_skb_load_bytes),
2282 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2283 BPF_EXIT_INSN(),
2284 },
2285 .result = REJECT,
2286 .errstr = "R4 min value is negative",
2287 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2288 },
2289 {
2290 "raw_stack: skb_load_bytes, zero len",
2291 .insns = {
2292 BPF_MOV64_IMM(BPF_REG_2, 4),
2293 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2295 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2296 BPF_MOV64_IMM(BPF_REG_4, 0),
2297 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2298 BPF_FUNC_skb_load_bytes),
2299 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2300 BPF_EXIT_INSN(),
2301 },
2302 .result = REJECT,
2303 .errstr = "invalid stack type R3",
2304 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2305 },
2306 {
2307 "raw_stack: skb_load_bytes, no init",
2308 .insns = {
2309 BPF_MOV64_IMM(BPF_REG_2, 4),
2310 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2311 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2312 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2313 BPF_MOV64_IMM(BPF_REG_4, 8),
2314 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2315 BPF_FUNC_skb_load_bytes),
2316 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2317 BPF_EXIT_INSN(),
2318 },
2319 .result = ACCEPT,
2320 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2321 },
2322 {
2323 "raw_stack: skb_load_bytes, init",
2324 .insns = {
2325 BPF_MOV64_IMM(BPF_REG_2, 4),
2326 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2327 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2328 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
2329 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2330 BPF_MOV64_IMM(BPF_REG_4, 8),
2331 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2332 BPF_FUNC_skb_load_bytes),
2333 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2334 BPF_EXIT_INSN(),
2335 },
2336 .result = ACCEPT,
2337 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2338 },
2339 {
2340 "raw_stack: skb_load_bytes, spilled regs around bounds",
2341 .insns = {
2342 BPF_MOV64_IMM(BPF_REG_2, 4),
2343 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2344 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2345 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2346 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2347 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2348 BPF_MOV64_IMM(BPF_REG_4, 8),
2349 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2350 BPF_FUNC_skb_load_bytes),
2351 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2352 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2353 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2354 offsetof(struct __sk_buff, mark)),
2355 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2356 offsetof(struct __sk_buff, priority)),
2357 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2358 BPF_EXIT_INSN(),
2359 },
2360 .result = ACCEPT,
2361 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2362 },
2363 {
2364 "raw_stack: skb_load_bytes, spilled regs corruption",
2365 .insns = {
2366 BPF_MOV64_IMM(BPF_REG_2, 4),
2367 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
2369 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2370 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2371 BPF_MOV64_IMM(BPF_REG_4, 8),
2372 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2373 BPF_FUNC_skb_load_bytes),
2374 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2375 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2376 offsetof(struct __sk_buff, mark)),
2377 BPF_EXIT_INSN(),
2378 },
2379 .result = REJECT,
2380 .errstr = "R0 invalid mem access 'inv'",
2381 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2382 },
2383 {
2384 "raw_stack: skb_load_bytes, spilled regs corruption 2",
2385 .insns = {
2386 BPF_MOV64_IMM(BPF_REG_2, 4),
2387 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2389 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2390 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2391 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2392 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2393 BPF_MOV64_IMM(BPF_REG_4, 8),
2394 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2395 BPF_FUNC_skb_load_bytes),
2396 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2397 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2398 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2399 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2400 offsetof(struct __sk_buff, mark)),
2401 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2402 offsetof(struct __sk_buff, priority)),
2403 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2404 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
2405 offsetof(struct __sk_buff, pkt_type)),
2406 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2407 BPF_EXIT_INSN(),
2408 },
2409 .result = REJECT,
2410 .errstr = "R3 invalid mem access 'inv'",
2411 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2412 },
2413 {
2414 "raw_stack: skb_load_bytes, spilled regs + data",
2415 .insns = {
2416 BPF_MOV64_IMM(BPF_REG_2, 4),
2417 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2418 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
2419 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
2420 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
2421 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
2422 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2423 BPF_MOV64_IMM(BPF_REG_4, 8),
2424 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2425 BPF_FUNC_skb_load_bytes),
2426 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
2427 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8),
2428 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0),
2429 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
2430 offsetof(struct __sk_buff, mark)),
2431 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
2432 offsetof(struct __sk_buff, priority)),
2433 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2434 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
2435 BPF_EXIT_INSN(),
2436 },
2437 .result = ACCEPT,
2438 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2439 },
2440 {
2441 "raw_stack: skb_load_bytes, invalid access 1",
2442 .insns = {
2443 BPF_MOV64_IMM(BPF_REG_2, 4),
2444 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2445 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
2446 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2447 BPF_MOV64_IMM(BPF_REG_4, 8),
2448 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2449 BPF_FUNC_skb_load_bytes),
2450 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2451 BPF_EXIT_INSN(),
2452 },
2453 .result = REJECT,
2454 .errstr = "invalid stack type R3 off=-513 access_size=8",
2455 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2456 },
2457 {
2458 "raw_stack: skb_load_bytes, invalid access 2",
2459 .insns = {
2460 BPF_MOV64_IMM(BPF_REG_2, 4),
2461 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2462 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2463 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2464 BPF_MOV64_IMM(BPF_REG_4, 8),
2465 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2466 BPF_FUNC_skb_load_bytes),
2467 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2468 BPF_EXIT_INSN(),
2469 },
2470 .result = REJECT,
2471 .errstr = "invalid stack type R3 off=-1 access_size=8",
2472 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2473 },
2474 {
2475 "raw_stack: skb_load_bytes, invalid access 3",
2476 .insns = {
2477 BPF_MOV64_IMM(BPF_REG_2, 4),
2478 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2479 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
2480 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2481 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2483 BPF_FUNC_skb_load_bytes),
2484 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2485 BPF_EXIT_INSN(),
2486 },
2487 .result = REJECT,
2488 .errstr = "R4 min value is negative",
2489 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2490 },
2491 {
2492 "raw_stack: skb_load_bytes, invalid access 4",
2493 .insns = {
2494 BPF_MOV64_IMM(BPF_REG_2, 4),
2495 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2496 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
2497 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2498 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2499 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2500 BPF_FUNC_skb_load_bytes),
2501 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2502 BPF_EXIT_INSN(),
2503 },
2504 .result = REJECT,
2505 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2506 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2507 },
2508 {
2509 "raw_stack: skb_load_bytes, invalid access 5",
2510 .insns = {
2511 BPF_MOV64_IMM(BPF_REG_2, 4),
2512 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2514 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2515 BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
2516 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2517 BPF_FUNC_skb_load_bytes),
2518 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2519 BPF_EXIT_INSN(),
2520 },
2521 .result = REJECT,
2522 .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'",
2523 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2524 },
2525 {
2526 "raw_stack: skb_load_bytes, invalid access 6",
2527 .insns = {
2528 BPF_MOV64_IMM(BPF_REG_2, 4),
2529 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2531 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2532 BPF_MOV64_IMM(BPF_REG_4, 0),
2533 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2534 BPF_FUNC_skb_load_bytes),
2535 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2536 BPF_EXIT_INSN(),
2537 },
2538 .result = REJECT,
2539 .errstr = "invalid stack type R3 off=-512 access_size=0",
2540 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2541 },
2542 {
2543 "raw_stack: skb_load_bytes, large access",
2544 .insns = {
2545 BPF_MOV64_IMM(BPF_REG_2, 4),
2546 BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
2547 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
2548 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
2549 BPF_MOV64_IMM(BPF_REG_4, 512),
2550 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2551 BPF_FUNC_skb_load_bytes),
2552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
2553 BPF_EXIT_INSN(),
2554 },
2555 .result = ACCEPT,
2556 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2557 },
2558 {
2559 "direct packet access: test1",
2560 .insns = {
2561 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2562 offsetof(struct __sk_buff, data)),
2563 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2564 offsetof(struct __sk_buff, data_end)),
2565 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2567 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2568 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2569 BPF_MOV64_IMM(BPF_REG_0, 0),
2570 BPF_EXIT_INSN(),
2571 },
2572 .result = ACCEPT,
2573 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2574 },
2575 {
2576 "direct packet access: test2",
2577 .insns = {
2578 BPF_MOV64_IMM(BPF_REG_0, 1),
2579 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
2580 offsetof(struct __sk_buff, data_end)),
2581 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2582 offsetof(struct __sk_buff, data)),
2583 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
2585 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
2586 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
2587 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
2588 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
2589 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2590 offsetof(struct __sk_buff, data)),
2591 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2592 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
2593 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2594 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2595 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
2596 BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
2597 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
2598 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
2599 offsetof(struct __sk_buff, data_end)),
2600 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
2601 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
2602 BPF_MOV64_IMM(BPF_REG_0, 0),
2603 BPF_EXIT_INSN(),
2604 },
2605 .result = ACCEPT,
2606 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2607 },
2608 {
2609 "direct packet access: test3",
2610 .insns = {
2611 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2612 offsetof(struct __sk_buff, data)),
2613 BPF_MOV64_IMM(BPF_REG_0, 0),
2614 BPF_EXIT_INSN(),
2615 },
2616 .errstr = "invalid bpf_context access off=76",
2617 .result = REJECT,
2618 .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
2619 },
2620 {
2621 "direct packet access: test4 (write)",
2622 .insns = {
2623 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2624 offsetof(struct __sk_buff, data)),
2625 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2626 offsetof(struct __sk_buff, data_end)),
2627 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2628 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2629 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2630 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2631 BPF_MOV64_IMM(BPF_REG_0, 0),
2632 BPF_EXIT_INSN(),
2633 },
2634 .result = ACCEPT,
2635 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2636 },
2637 {
2638 "direct packet access: test5 (pkt_end >= reg, good access)",
2639 .insns = {
2640 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2641 offsetof(struct __sk_buff, data)),
2642 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2643 offsetof(struct __sk_buff, data_end)),
2644 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2646 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2647 BPF_MOV64_IMM(BPF_REG_0, 1),
2648 BPF_EXIT_INSN(),
2649 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2650 BPF_MOV64_IMM(BPF_REG_0, 0),
2651 BPF_EXIT_INSN(),
2652 },
2653 .result = ACCEPT,
2654 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2655 },
2656 {
2657 "direct packet access: test6 (pkt_end >= reg, bad access)",
2658 .insns = {
2659 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2660 offsetof(struct __sk_buff, data)),
2661 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2662 offsetof(struct __sk_buff, data_end)),
2663 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2665 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2666 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2667 BPF_MOV64_IMM(BPF_REG_0, 1),
2668 BPF_EXIT_INSN(),
2669 BPF_MOV64_IMM(BPF_REG_0, 0),
2670 BPF_EXIT_INSN(),
2671 },
2672 .errstr = "invalid access to packet",
2673 .result = REJECT,
2674 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2675 },
2676 {
2677 "direct packet access: test7 (pkt_end >= reg, both accesses)",
2678 .insns = {
2679 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2680 offsetof(struct __sk_buff, data)),
2681 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2682 offsetof(struct __sk_buff, data_end)),
2683 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2685 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3),
2686 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2687 BPF_MOV64_IMM(BPF_REG_0, 1),
2688 BPF_EXIT_INSN(),
2689 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2690 BPF_MOV64_IMM(BPF_REG_0, 0),
2691 BPF_EXIT_INSN(),
2692 },
2693 .errstr = "invalid access to packet",
2694 .result = REJECT,
2695 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2696 },
2697 {
2698 "direct packet access: test8 (double test, variant 1)",
2699 .insns = {
2700 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2701 offsetof(struct __sk_buff, data)),
2702 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2703 offsetof(struct __sk_buff, data_end)),
2704 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2705 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2706 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4),
2707 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2708 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2709 BPF_MOV64_IMM(BPF_REG_0, 1),
2710 BPF_EXIT_INSN(),
2711 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2712 BPF_MOV64_IMM(BPF_REG_0, 0),
2713 BPF_EXIT_INSN(),
2714 },
2715 .result = ACCEPT,
2716 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2717 },
2718 {
2719 "direct packet access: test9 (double test, variant 2)",
2720 .insns = {
2721 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2722 offsetof(struct __sk_buff, data)),
2723 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2724 offsetof(struct __sk_buff, data_end)),
2725 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2727 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
2728 BPF_MOV64_IMM(BPF_REG_0, 1),
2729 BPF_EXIT_INSN(),
2730 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2731 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2732 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
2733 BPF_MOV64_IMM(BPF_REG_0, 0),
2734 BPF_EXIT_INSN(),
2735 },
2736 .result = ACCEPT,
2737 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2738 },
2739 {
2740 "direct packet access: test10 (write invalid)",
2741 .insns = {
2742 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2743 offsetof(struct __sk_buff, data)),
2744 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2745 offsetof(struct __sk_buff, data_end)),
2746 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2747 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2748 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
2749 BPF_MOV64_IMM(BPF_REG_0, 0),
2750 BPF_EXIT_INSN(),
2751 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2752 BPF_MOV64_IMM(BPF_REG_0, 0),
2753 BPF_EXIT_INSN(),
2754 },
2755 .errstr = "invalid access to packet",
2756 .result = REJECT,
2757 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2758 },
2759 {
2760 "direct packet access: test11 (shift, good access)",
2761 .insns = {
2762 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2763 offsetof(struct __sk_buff, data)),
2764 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2765 offsetof(struct __sk_buff, data_end)),
2766 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2767 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2768 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2769 BPF_MOV64_IMM(BPF_REG_3, 144),
2770 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2772 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
2773 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2774 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2775 BPF_MOV64_IMM(BPF_REG_0, 1),
2776 BPF_EXIT_INSN(),
2777 BPF_MOV64_IMM(BPF_REG_0, 0),
2778 BPF_EXIT_INSN(),
2779 },
2780 .result = ACCEPT,
2781 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2782 },
2783 {
2784 "direct packet access: test12 (and, good access)",
2785 .insns = {
2786 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2787 offsetof(struct __sk_buff, data)),
2788 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2789 offsetof(struct __sk_buff, data_end)),
2790 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2792 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2793 BPF_MOV64_IMM(BPF_REG_3, 144),
2794 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2795 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2796 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2797 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2798 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2799 BPF_MOV64_IMM(BPF_REG_0, 1),
2800 BPF_EXIT_INSN(),
2801 BPF_MOV64_IMM(BPF_REG_0, 0),
2802 BPF_EXIT_INSN(),
2803 },
2804 .result = ACCEPT,
2805 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2806 },
2807 {
2808 "direct packet access: test13 (branches, good access)",
2809 .insns = {
2810 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2811 offsetof(struct __sk_buff, data)),
2812 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2813 offsetof(struct __sk_buff, data_end)),
2814 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2815 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2816 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
2817 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2818 offsetof(struct __sk_buff, mark)),
2819 BPF_MOV64_IMM(BPF_REG_4, 1),
2820 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
2821 BPF_MOV64_IMM(BPF_REG_3, 14),
2822 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2823 BPF_MOV64_IMM(BPF_REG_3, 24),
2824 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
2825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
2826 BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
2827 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2828 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2829 BPF_MOV64_IMM(BPF_REG_0, 1),
2830 BPF_EXIT_INSN(),
2831 BPF_MOV64_IMM(BPF_REG_0, 0),
2832 BPF_EXIT_INSN(),
2833 },
2834 .result = ACCEPT,
2835 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2836 },
2837 {
2838 "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
2839 .insns = {
2840 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2841 offsetof(struct __sk_buff, data)),
2842 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2843 offsetof(struct __sk_buff, data_end)),
2844 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
2846 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
2847 BPF_MOV64_IMM(BPF_REG_5, 12),
2848 BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
2849 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
2850 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
2851 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
2852 BPF_MOV64_IMM(BPF_REG_0, 1),
2853 BPF_EXIT_INSN(),
2854 BPF_MOV64_IMM(BPF_REG_0, 0),
2855 BPF_EXIT_INSN(),
2856 },
2857 .result = ACCEPT,
2858 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2859 },
2860 {
2861 "direct packet access: test15 (spill with xadd)",
2862 .insns = {
2863 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2864 offsetof(struct __sk_buff, data)),
2865 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2866 offsetof(struct __sk_buff, data_end)),
2867 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2869 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
2870 BPF_MOV64_IMM(BPF_REG_5, 4096),
2871 BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
2872 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
2873 BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
2874 BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
2875 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
2876 BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
2877 BPF_MOV64_IMM(BPF_REG_0, 0),
2878 BPF_EXIT_INSN(),
2879 },
2880 .errstr = "R2 invalid mem access 'inv'",
2881 .result = REJECT,
2882 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2883 },
2884 {
2885 "direct packet access: test16 (arith on data_end)",
2886 .insns = {
2887 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2888 offsetof(struct __sk_buff, data)),
2889 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2890 offsetof(struct __sk_buff, data_end)),
2891 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2892 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2893 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16),
2894 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2895 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2896 BPF_MOV64_IMM(BPF_REG_0, 0),
2897 BPF_EXIT_INSN(),
2898 },
2899 .errstr = "invalid access to packet",
2900 .result = REJECT,
2901 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2902 },
2903 {
2904 "direct packet access: test17 (pruning, alignment)",
2905 .insns = {
2906 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2907 offsetof(struct __sk_buff, data)),
2908 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2909 offsetof(struct __sk_buff, data_end)),
2910 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
2911 offsetof(struct __sk_buff, mark)),
2912 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2913 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14),
2914 BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4),
2915 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2916 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4),
2917 BPF_MOV64_IMM(BPF_REG_0, 0),
2918 BPF_EXIT_INSN(),
2919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
2920 BPF_JMP_A(-6),
2921 },
2922 .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4",
2923 .result = REJECT,
2924 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2925 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
2926 },
2927 {
2928 "direct packet access: test18 (imm += pkt_ptr, 1)",
2929 .insns = {
2930 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2931 offsetof(struct __sk_buff, data)),
2932 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2933 offsetof(struct __sk_buff, data_end)),
2934 BPF_MOV64_IMM(BPF_REG_0, 8),
2935 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
2936 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
2937 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
2938 BPF_MOV64_IMM(BPF_REG_0, 0),
2939 BPF_EXIT_INSN(),
2940 },
2941 .result = ACCEPT,
2942 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2943 },
2944 {
2945 "direct packet access: test19 (imm += pkt_ptr, 2)",
2946 .insns = {
2947 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2948 offsetof(struct __sk_buff, data)),
2949 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2950 offsetof(struct __sk_buff, data_end)),
2951 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2952 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2953 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3),
2954 BPF_MOV64_IMM(BPF_REG_4, 4),
2955 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2956 BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0),
2957 BPF_MOV64_IMM(BPF_REG_0, 0),
2958 BPF_EXIT_INSN(),
2959 },
2960 .result = ACCEPT,
2961 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2962 },
2963 {
2964 "direct packet access: test20 (x += pkt_ptr, 1)",
2965 .insns = {
2966 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2967 offsetof(struct __sk_buff, data)),
2968 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2969 offsetof(struct __sk_buff, data_end)),
2970 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
2971 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
2972 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2973 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff),
2974 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
2975 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
2976 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
2977 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
2978 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
2979 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
2980 BPF_MOV64_IMM(BPF_REG_0, 0),
2981 BPF_EXIT_INSN(),
2982 },
2983 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2984 .result = ACCEPT,
2985 },
2986 {
2987 "direct packet access: test21 (x += pkt_ptr, 2)",
2988 .insns = {
2989 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2990 offsetof(struct __sk_buff, data)),
2991 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2992 offsetof(struct __sk_buff, data_end)),
2993 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
2994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
2995 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9),
2996 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
2997 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
2998 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
2999 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff),
3000 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3001 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3002 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1),
3003 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1),
3004 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0),
3005 BPF_MOV64_IMM(BPF_REG_0, 0),
3006 BPF_EXIT_INSN(),
3007 },
3008 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3009 .result = ACCEPT,
3010 },
3011 {
3012 "direct packet access: test22 (x += pkt_ptr, 3)",
3013 .insns = {
3014 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3015 offsetof(struct __sk_buff, data)),
3016 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3017 offsetof(struct __sk_buff, data_end)),
3018 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3019 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3020 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8),
3021 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16),
3022 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16),
3023 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11),
3024 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
3025 BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
3026 BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8),
3027 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
3028 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49),
3029 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2),
3030 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
3031 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
3032 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2),
3033 BPF_MOV64_IMM(BPF_REG_2, 1),
3034 BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0),
3035 BPF_MOV64_IMM(BPF_REG_0, 0),
3036 BPF_EXIT_INSN(),
3037 },
3038 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3039 .result = ACCEPT,
3040 },
3041 {
3042 "direct packet access: test23 (x += pkt_ptr, 4)",
3043 .insns = {
3044 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3045 offsetof(struct __sk_buff, data)),
3046 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3047 offsetof(struct __sk_buff, data_end)),
3048 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3049 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3050 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3051 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
3052 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3053 BPF_MOV64_IMM(BPF_REG_0, 31),
3054 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3055 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3056 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3057 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1),
3058 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3059 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3060 BPF_MOV64_IMM(BPF_REG_0, 0),
3061 BPF_EXIT_INSN(),
3062 },
3063 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3064 .result = REJECT,
3065 .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
3066 },
3067 {
3068 "direct packet access: test24 (x += pkt_ptr, 5)",
3069 .insns = {
3070 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3071 offsetof(struct __sk_buff, data)),
3072 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3073 offsetof(struct __sk_buff, data_end)),
3074 BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
3075 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
3076 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
3077 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
3078 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3079 BPF_MOV64_IMM(BPF_REG_0, 64),
3080 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
3081 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
3082 BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
3083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1),
3084 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
3085 BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0),
3086 BPF_MOV64_IMM(BPF_REG_0, 0),
3087 BPF_EXIT_INSN(),
3088 },
3089 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3090 .result = ACCEPT,
3091 },
3092 {
3093 "direct packet access: test25 (marking on <, good access)",
3094 .insns = {
3095 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3096 offsetof(struct __sk_buff, data)),
3097 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3098 offsetof(struct __sk_buff, data_end)),
3099 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3101 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2),
3102 BPF_MOV64_IMM(BPF_REG_0, 0),
3103 BPF_EXIT_INSN(),
3104 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3105 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3106 },
3107 .result = ACCEPT,
3108 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3109 },
3110 {
3111 "direct packet access: test26 (marking on <, bad access)",
3112 .insns = {
3113 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3114 offsetof(struct __sk_buff, data)),
3115 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3116 offsetof(struct __sk_buff, data_end)),
3117 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3118 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3119 BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3),
3120 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3121 BPF_MOV64_IMM(BPF_REG_0, 0),
3122 BPF_EXIT_INSN(),
3123 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
3124 },
3125 .result = REJECT,
3126 .errstr = "invalid access to packet",
3127 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3128 },
3129 {
3130 "direct packet access: test27 (marking on <=, good access)",
3131 .insns = {
3132 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3133 offsetof(struct __sk_buff, data)),
3134 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3135 offsetof(struct __sk_buff, data_end)),
3136 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3137 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3138 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1),
3139 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3140 BPF_MOV64_IMM(BPF_REG_0, 1),
3141 BPF_EXIT_INSN(),
3142 },
3143 .result = ACCEPT,
3144 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3145 },
3146 {
3147 "direct packet access: test28 (marking on <=, bad access)",
3148 .insns = {
3149 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3150 offsetof(struct __sk_buff, data)),
3151 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3152 offsetof(struct __sk_buff, data_end)),
3153 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
3154 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
3155 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2),
3156 BPF_MOV64_IMM(BPF_REG_0, 1),
3157 BPF_EXIT_INSN(),
3158 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
3159 BPF_JMP_IMM(BPF_JA, 0, 0, -4),
3160 },
3161 .result = REJECT,
3162 .errstr = "invalid access to packet",
3163 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3164 },
3165 {
3166 "helper access to packet: test1, valid packet_ptr range",
3167 .insns = {
3168 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3169 offsetof(struct xdp_md, data)),
3170 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3171 offsetof(struct xdp_md, data_end)),
3172 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3174 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3175 BPF_LD_MAP_FD(BPF_REG_1, 0),
3176 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3177 BPF_MOV64_IMM(BPF_REG_4, 0),
3178 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3179 BPF_FUNC_map_update_elem),
3180 BPF_MOV64_IMM(BPF_REG_0, 0),
3181 BPF_EXIT_INSN(),
3182 },
3183 .fixup_map1 = { 5 },
3184 .result_unpriv = ACCEPT,
3185 .result = ACCEPT,
3186 .prog_type = BPF_PROG_TYPE_XDP,
3187 },
3188 {
3189 "helper access to packet: test2, unchecked packet_ptr",
3190 .insns = {
3191 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3192 offsetof(struct xdp_md, data)),
3193 BPF_LD_MAP_FD(BPF_REG_1, 0),
3194 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3195 BPF_FUNC_map_lookup_elem),
3196 BPF_MOV64_IMM(BPF_REG_0, 0),
3197 BPF_EXIT_INSN(),
3198 },
3199 .fixup_map1 = { 1 },
3200 .result = REJECT,
3201 .errstr = "invalid access to packet",
3202 .prog_type = BPF_PROG_TYPE_XDP,
3203 },
3204 {
3205 "helper access to packet: test3, variable add",
3206 .insns = {
3207 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3208 offsetof(struct xdp_md, data)),
3209 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3210 offsetof(struct xdp_md, data_end)),
3211 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3212 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3213 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3214 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3215 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3216 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3217 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3219 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3220 BPF_LD_MAP_FD(BPF_REG_1, 0),
3221 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3222 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3223 BPF_FUNC_map_lookup_elem),
3224 BPF_MOV64_IMM(BPF_REG_0, 0),
3225 BPF_EXIT_INSN(),
3226 },
3227 .fixup_map1 = { 11 },
3228 .result = ACCEPT,
3229 .prog_type = BPF_PROG_TYPE_XDP,
3230 },
3231 {
3232 "helper access to packet: test4, packet_ptr with bad range",
3233 .insns = {
3234 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3235 offsetof(struct xdp_md, data)),
3236 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3237 offsetof(struct xdp_md, data_end)),
3238 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3240 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3241 BPF_MOV64_IMM(BPF_REG_0, 0),
3242 BPF_EXIT_INSN(),
3243 BPF_LD_MAP_FD(BPF_REG_1, 0),
3244 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3245 BPF_FUNC_map_lookup_elem),
3246 BPF_MOV64_IMM(BPF_REG_0, 0),
3247 BPF_EXIT_INSN(),
3248 },
3249 .fixup_map1 = { 7 },
3250 .result = REJECT,
3251 .errstr = "invalid access to packet",
3252 .prog_type = BPF_PROG_TYPE_XDP,
3253 },
3254 {
3255 "helper access to packet: test5, packet_ptr with too short range",
3256 .insns = {
3257 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3258 offsetof(struct xdp_md, data)),
3259 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3260 offsetof(struct xdp_md, data_end)),
3261 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3262 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3263 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3264 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3265 BPF_LD_MAP_FD(BPF_REG_1, 0),
3266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3267 BPF_FUNC_map_lookup_elem),
3268 BPF_MOV64_IMM(BPF_REG_0, 0),
3269 BPF_EXIT_INSN(),
3270 },
3271 .fixup_map1 = { 6 },
3272 .result = REJECT,
3273 .errstr = "invalid access to packet",
3274 .prog_type = BPF_PROG_TYPE_XDP,
3275 },
3276 {
3277 "helper access to packet: test6, cls valid packet_ptr range",
3278 .insns = {
3279 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3280 offsetof(struct __sk_buff, data)),
3281 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3282 offsetof(struct __sk_buff, data_end)),
3283 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
3284 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
3285 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
3286 BPF_LD_MAP_FD(BPF_REG_1, 0),
3287 BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
3288 BPF_MOV64_IMM(BPF_REG_4, 0),
3289 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3290 BPF_FUNC_map_update_elem),
3291 BPF_MOV64_IMM(BPF_REG_0, 0),
3292 BPF_EXIT_INSN(),
3293 },
3294 .fixup_map1 = { 5 },
3295 .result = ACCEPT,
3296 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3297 },
3298 {
3299 "helper access to packet: test7, cls unchecked packet_ptr",
3300 .insns = {
3301 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3302 offsetof(struct __sk_buff, data)),
3303 BPF_LD_MAP_FD(BPF_REG_1, 0),
3304 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3305 BPF_FUNC_map_lookup_elem),
3306 BPF_MOV64_IMM(BPF_REG_0, 0),
3307 BPF_EXIT_INSN(),
3308 },
3309 .fixup_map1 = { 1 },
3310 .result = REJECT,
3311 .errstr = "invalid access to packet",
3312 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3313 },
3314 {
3315 "helper access to packet: test8, cls variable add",
3316 .insns = {
3317 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3318 offsetof(struct __sk_buff, data)),
3319 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3320 offsetof(struct __sk_buff, data_end)),
3321 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3322 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
3323 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10),
3324 BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0),
3325 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3326 BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5),
3327 BPF_MOV64_REG(BPF_REG_5, BPF_REG_4),
3328 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8),
3329 BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4),
3330 BPF_LD_MAP_FD(BPF_REG_1, 0),
3331 BPF_MOV64_REG(BPF_REG_2, BPF_REG_4),
3332 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3333 BPF_FUNC_map_lookup_elem),
3334 BPF_MOV64_IMM(BPF_REG_0, 0),
3335 BPF_EXIT_INSN(),
3336 },
3337 .fixup_map1 = { 11 },
3338 .result = ACCEPT,
3339 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3340 },
3341 {
3342 "helper access to packet: test9, cls packet_ptr with bad range",
3343 .insns = {
3344 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3345 offsetof(struct __sk_buff, data)),
3346 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3347 offsetof(struct __sk_buff, data_end)),
3348 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3349 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
3350 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2),
3351 BPF_MOV64_IMM(BPF_REG_0, 0),
3352 BPF_EXIT_INSN(),
3353 BPF_LD_MAP_FD(BPF_REG_1, 0),
3354 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3355 BPF_FUNC_map_lookup_elem),
3356 BPF_MOV64_IMM(BPF_REG_0, 0),
3357 BPF_EXIT_INSN(),
3358 },
3359 .fixup_map1 = { 7 },
3360 .result = REJECT,
3361 .errstr = "invalid access to packet",
3362 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3363 },
3364 {
3365 "helper access to packet: test10, cls packet_ptr with too short range",
3366 .insns = {
3367 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
3368 offsetof(struct __sk_buff, data)),
3369 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
3370 offsetof(struct __sk_buff, data_end)),
3371 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
3372 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
3373 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7),
3374 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3),
3375 BPF_LD_MAP_FD(BPF_REG_1, 0),
3376 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3377 BPF_FUNC_map_lookup_elem),
3378 BPF_MOV64_IMM(BPF_REG_0, 0),
3379 BPF_EXIT_INSN(),
3380 },
3381 .fixup_map1 = { 6 },
3382 .result = REJECT,
3383 .errstr = "invalid access to packet",
3384 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3385 },
3386 {
3387 "helper access to packet: test11, cls unsuitable helper 1",
3388 .insns = {
3389 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3390 offsetof(struct __sk_buff, data)),
3391 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3392 offsetof(struct __sk_buff, data_end)),
3393 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3394 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7),
3396 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4),
3397 BPF_MOV64_IMM(BPF_REG_2, 0),
3398 BPF_MOV64_IMM(BPF_REG_4, 42),
3399 BPF_MOV64_IMM(BPF_REG_5, 0),
3400 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3401 BPF_FUNC_skb_store_bytes),
3402 BPF_MOV64_IMM(BPF_REG_0, 0),
3403 BPF_EXIT_INSN(),
3404 },
3405 .result = REJECT,
3406 .errstr = "helper access to the packet",
3407 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3408 },
3409 {
3410 "helper access to packet: test12, cls unsuitable helper 2",
3411 .insns = {
3412 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3413 offsetof(struct __sk_buff, data)),
3414 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3415 offsetof(struct __sk_buff, data_end)),
3416 BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
3417 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8),
3418 BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3),
3419 BPF_MOV64_IMM(BPF_REG_2, 0),
3420 BPF_MOV64_IMM(BPF_REG_4, 4),
3421 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3422 BPF_FUNC_skb_load_bytes),
3423 BPF_MOV64_IMM(BPF_REG_0, 0),
3424 BPF_EXIT_INSN(),
3425 },
3426 .result = REJECT,
3427 .errstr = "helper access to the packet",
3428 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3429 },
3430 {
3431 "helper access to packet: test13, cls helper ok",
3432 .insns = {
3433 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3434 offsetof(struct __sk_buff, data)),
3435 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3436 offsetof(struct __sk_buff, data_end)),
3437 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3440 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3441 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3442 BPF_MOV64_IMM(BPF_REG_2, 4),
3443 BPF_MOV64_IMM(BPF_REG_3, 0),
3444 BPF_MOV64_IMM(BPF_REG_4, 0),
3445 BPF_MOV64_IMM(BPF_REG_5, 0),
3446 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3447 BPF_FUNC_csum_diff),
3448 BPF_MOV64_IMM(BPF_REG_0, 0),
3449 BPF_EXIT_INSN(),
3450 },
3451 .result = ACCEPT,
3452 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3453 },
3454 {
3455 "helper access to packet: test14, cls helper ok sub",
3456 .insns = {
3457 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3458 offsetof(struct __sk_buff, data)),
3459 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3460 offsetof(struct __sk_buff, data_end)),
3461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3462 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3464 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3465 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4),
3466 BPF_MOV64_IMM(BPF_REG_2, 4),
3467 BPF_MOV64_IMM(BPF_REG_3, 0),
3468 BPF_MOV64_IMM(BPF_REG_4, 0),
3469 BPF_MOV64_IMM(BPF_REG_5, 0),
3470 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3471 BPF_FUNC_csum_diff),
3472 BPF_MOV64_IMM(BPF_REG_0, 0),
3473 BPF_EXIT_INSN(),
3474 },
3475 .result = ACCEPT,
3476 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3477 },
3478 {
3479 "helper access to packet: test15, cls helper fail sub",
3480 .insns = {
3481 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3482 offsetof(struct __sk_buff, data)),
3483 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3484 offsetof(struct __sk_buff, data_end)),
3485 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3486 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3487 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3488 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3489 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12),
3490 BPF_MOV64_IMM(BPF_REG_2, 4),
3491 BPF_MOV64_IMM(BPF_REG_3, 0),
3492 BPF_MOV64_IMM(BPF_REG_4, 0),
3493 BPF_MOV64_IMM(BPF_REG_5, 0),
3494 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3495 BPF_FUNC_csum_diff),
3496 BPF_MOV64_IMM(BPF_REG_0, 0),
3497 BPF_EXIT_INSN(),
3498 },
3499 .result = REJECT,
3500 .errstr = "invalid access to packet",
3501 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3502 },
3503 {
3504 "helper access to packet: test16, cls helper fail range 1",
3505 .insns = {
3506 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3507 offsetof(struct __sk_buff, data)),
3508 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3509 offsetof(struct __sk_buff, data_end)),
3510 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3513 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3514 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3515 BPF_MOV64_IMM(BPF_REG_2, 8),
3516 BPF_MOV64_IMM(BPF_REG_3, 0),
3517 BPF_MOV64_IMM(BPF_REG_4, 0),
3518 BPF_MOV64_IMM(BPF_REG_5, 0),
3519 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3520 BPF_FUNC_csum_diff),
3521 BPF_MOV64_IMM(BPF_REG_0, 0),
3522 BPF_EXIT_INSN(),
3523 },
3524 .result = REJECT,
3525 .errstr = "invalid access to packet",
3526 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3527 },
3528 {
3529 "helper access to packet: test17, cls helper fail range 2",
3530 .insns = {
3531 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3532 offsetof(struct __sk_buff, data)),
3533 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3534 offsetof(struct __sk_buff, data_end)),
3535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3537 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3538 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3539 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3540 BPF_MOV64_IMM(BPF_REG_2, -9),
3541 BPF_MOV64_IMM(BPF_REG_3, 0),
3542 BPF_MOV64_IMM(BPF_REG_4, 0),
3543 BPF_MOV64_IMM(BPF_REG_5, 0),
3544 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3545 BPF_FUNC_csum_diff),
3546 BPF_MOV64_IMM(BPF_REG_0, 0),
3547 BPF_EXIT_INSN(),
3548 },
3549 .result = REJECT,
3550 .errstr = "R2 min value is negative",
3551 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3552 },
3553 {
3554 "helper access to packet: test18, cls helper fail range 3",
3555 .insns = {
3556 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3557 offsetof(struct __sk_buff, data)),
3558 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3559 offsetof(struct __sk_buff, data_end)),
3560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3561 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3562 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3563 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3564 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3565 BPF_MOV64_IMM(BPF_REG_2, ~0),
3566 BPF_MOV64_IMM(BPF_REG_3, 0),
3567 BPF_MOV64_IMM(BPF_REG_4, 0),
3568 BPF_MOV64_IMM(BPF_REG_5, 0),
3569 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3570 BPF_FUNC_csum_diff),
3571 BPF_MOV64_IMM(BPF_REG_0, 0),
3572 BPF_EXIT_INSN(),
3573 },
3574 .result = REJECT,
3575 .errstr = "R2 min value is negative",
3576 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3577 },
3578 {
3579 "helper access to packet: test19, cls helper fail range zero",
3580 .insns = {
3581 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3582 offsetof(struct __sk_buff, data)),
3583 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3584 offsetof(struct __sk_buff, data_end)),
3585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3586 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3587 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3588 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3590 BPF_MOV64_IMM(BPF_REG_2, 0),
3591 BPF_MOV64_IMM(BPF_REG_3, 0),
3592 BPF_MOV64_IMM(BPF_REG_4, 0),
3593 BPF_MOV64_IMM(BPF_REG_5, 0),
3594 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3595 BPF_FUNC_csum_diff),
3596 BPF_MOV64_IMM(BPF_REG_0, 0),
3597 BPF_EXIT_INSN(),
3598 },
3599 .result = REJECT,
3600 .errstr = "invalid access to packet",
3601 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3602 },
3603 {
3604 "helper access to packet: test20, pkt end as input",
3605 .insns = {
3606 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3607 offsetof(struct __sk_buff, data)),
3608 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3609 offsetof(struct __sk_buff, data_end)),
3610 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3611 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3612 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3613 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3614 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
3615 BPF_MOV64_IMM(BPF_REG_2, 4),
3616 BPF_MOV64_IMM(BPF_REG_3, 0),
3617 BPF_MOV64_IMM(BPF_REG_4, 0),
3618 BPF_MOV64_IMM(BPF_REG_5, 0),
3619 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3620 BPF_FUNC_csum_diff),
3621 BPF_MOV64_IMM(BPF_REG_0, 0),
3622 BPF_EXIT_INSN(),
3623 },
3624 .result = REJECT,
3625 .errstr = "R1 type=pkt_end expected=fp",
3626 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3627 },
3628 {
3629 "helper access to packet: test21, wrong reg",
3630 .insns = {
3631 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
3632 offsetof(struct __sk_buff, data)),
3633 BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
3634 offsetof(struct __sk_buff, data_end)),
3635 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
3636 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
3637 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7),
3638 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6),
3639 BPF_MOV64_IMM(BPF_REG_2, 4),
3640 BPF_MOV64_IMM(BPF_REG_3, 0),
3641 BPF_MOV64_IMM(BPF_REG_4, 0),
3642 BPF_MOV64_IMM(BPF_REG_5, 0),
3643 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3644 BPF_FUNC_csum_diff),
3645 BPF_MOV64_IMM(BPF_REG_0, 0),
3646 BPF_EXIT_INSN(),
3647 },
3648 .result = REJECT,
3649 .errstr = "invalid access to packet",
3650 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
3651 },
3652 {
3653 "valid map access into an array with a constant",
3654 .insns = {
3655 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3656 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3657 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3658 BPF_LD_MAP_FD(BPF_REG_1, 0),
3659 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3660 BPF_FUNC_map_lookup_elem),
3661 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3662 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3663 offsetof(struct test_val, foo)),
3664 BPF_EXIT_INSN(),
3665 },
3666 .fixup_map2 = { 3 },
3667 .errstr_unpriv = "R0 leaks addr",
3668 .result_unpriv = REJECT,
3669 .result = ACCEPT,
3670 },
3671 {
3672 "valid map access into an array with a register",
3673 .insns = {
3674 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3675 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3677 BPF_LD_MAP_FD(BPF_REG_1, 0),
3678 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3679 BPF_FUNC_map_lookup_elem),
3680 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3681 BPF_MOV64_IMM(BPF_REG_1, 4),
3682 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3683 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3684 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3685 offsetof(struct test_val, foo)),
3686 BPF_EXIT_INSN(),
3687 },
3688 .fixup_map2 = { 3 },
3689 .errstr_unpriv = "R0 leaks addr",
3690 .result_unpriv = REJECT,
3691 .result = ACCEPT,
3692 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3693 },
3694 {
3695 "valid map access into an array with a variable",
3696 .insns = {
3697 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3698 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3699 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3700 BPF_LD_MAP_FD(BPF_REG_1, 0),
3701 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3702 BPF_FUNC_map_lookup_elem),
3703 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
3704 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3705 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
3706 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3707 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3708 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3709 offsetof(struct test_val, foo)),
3710 BPF_EXIT_INSN(),
3711 },
3712 .fixup_map2 = { 3 },
3713 .errstr_unpriv = "R0 leaks addr",
3714 .result_unpriv = REJECT,
3715 .result = ACCEPT,
3716 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3717 },
3718 {
3719 "valid map access into an array with a signed variable",
3720 .insns = {
3721 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3722 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3723 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3724 BPF_LD_MAP_FD(BPF_REG_1, 0),
3725 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3726 BPF_FUNC_map_lookup_elem),
3727 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
3728 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3729 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
3730 BPF_MOV32_IMM(BPF_REG_1, 0),
3731 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3732 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3733 BPF_MOV32_IMM(BPF_REG_1, 0),
3734 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3735 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3736 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3737 offsetof(struct test_val, foo)),
3738 BPF_EXIT_INSN(),
3739 },
3740 .fixup_map2 = { 3 },
3741 .errstr_unpriv = "R0 leaks addr",
3742 .result_unpriv = REJECT,
3743 .result = ACCEPT,
3744 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3745 },
3746 {
3747 "invalid map access into an array with a constant",
3748 .insns = {
3749 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3750 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3751 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3752 BPF_LD_MAP_FD(BPF_REG_1, 0),
3753 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3754 BPF_FUNC_map_lookup_elem),
3755 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3756 BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
3757 offsetof(struct test_val, foo)),
3758 BPF_EXIT_INSN(),
3759 },
3760 .fixup_map2 = { 3 },
3761 .errstr = "invalid access to map value, value_size=48 off=48 size=8",
3762 .result = REJECT,
3763 },
3764 {
3765 "invalid map access into an array with a register",
3766 .insns = {
3767 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3768 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3770 BPF_LD_MAP_FD(BPF_REG_1, 0),
3771 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3772 BPF_FUNC_map_lookup_elem),
3773 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3774 BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
3775 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3776 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3777 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3778 offsetof(struct test_val, foo)),
3779 BPF_EXIT_INSN(),
3780 },
3781 .fixup_map2 = { 3 },
3782 .errstr = "R0 min value is outside of the array range",
3783 .result = REJECT,
3784 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3785 },
3786 {
3787 "invalid map access into an array with a variable",
3788 .insns = {
3789 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3790 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3791 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3792 BPF_LD_MAP_FD(BPF_REG_1, 0),
3793 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3794 BPF_FUNC_map_lookup_elem),
3795 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
3796 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3797 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
3798 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3799 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3800 offsetof(struct test_val, foo)),
3801 BPF_EXIT_INSN(),
3802 },
3803 .fixup_map2 = { 3 },
3804 .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
3805 .result = REJECT,
3806 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3807 },
3808 {
3809 "invalid map access into an array with no floor check",
3810 .insns = {
3811 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3812 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3814 BPF_LD_MAP_FD(BPF_REG_1, 0),
3815 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3816 BPF_FUNC_map_lookup_elem),
3817 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3818 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
3819 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
3820 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
3821 BPF_MOV32_IMM(BPF_REG_1, 0),
3822 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3823 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3824 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3825 offsetof(struct test_val, foo)),
3826 BPF_EXIT_INSN(),
3827 },
3828 .fixup_map2 = { 3 },
3829 .errstr_unpriv = "R0 leaks addr",
3830 .errstr = "R0 unbounded memory access",
3831 .result_unpriv = REJECT,
3832 .result = REJECT,
3833 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3834 },
3835 {
3836 "invalid map access into an array with a invalid max check",
3837 .insns = {
3838 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3839 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3840 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3841 BPF_LD_MAP_FD(BPF_REG_1, 0),
3842 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3843 BPF_FUNC_map_lookup_elem),
3844 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
3845 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
3846 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
3847 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
3848 BPF_MOV32_IMM(BPF_REG_1, 0),
3849 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
3850 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
3851 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
3852 offsetof(struct test_val, foo)),
3853 BPF_EXIT_INSN(),
3854 },
3855 .fixup_map2 = { 3 },
3856 .errstr_unpriv = "R0 leaks addr",
3857 .errstr = "invalid access to map value, value_size=48 off=44 size=8",
3858 .result_unpriv = REJECT,
3859 .result = REJECT,
3860 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3861 },
3862 {
3863 "invalid map access into an array with a invalid max check",
3864 .insns = {
3865 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3866 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3867 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3868 BPF_LD_MAP_FD(BPF_REG_1, 0),
3869 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3870 BPF_FUNC_map_lookup_elem),
3871 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
3872 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
3873 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
3874 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3875 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3876 BPF_LD_MAP_FD(BPF_REG_1, 0),
3877 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3878 BPF_FUNC_map_lookup_elem),
3879 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
3880 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
3881 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
3882 offsetof(struct test_val, foo)),
3883 BPF_EXIT_INSN(),
3884 },
3885 .fixup_map2 = { 3, 11 },
3886 .errstr_unpriv = "R0 pointer += pointer",
3887 .errstr = "R0 invalid mem access 'inv'",
3888 .result_unpriv = REJECT,
3889 .result = REJECT,
3890 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3891 },
3892 {
3893 "multiple registers share map_lookup_elem result",
3894 .insns = {
3895 BPF_MOV64_IMM(BPF_REG_1, 10),
3896 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3897 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3899 BPF_LD_MAP_FD(BPF_REG_1, 0),
3900 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3901 BPF_FUNC_map_lookup_elem),
3902 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3903 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3904 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3905 BPF_EXIT_INSN(),
3906 },
3907 .fixup_map1 = { 4 },
3908 .result = ACCEPT,
3909 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3910 },
3911 {
3912 "alu ops on ptr_to_map_value_or_null, 1",
3913 .insns = {
3914 BPF_MOV64_IMM(BPF_REG_1, 10),
3915 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3916 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3918 BPF_LD_MAP_FD(BPF_REG_1, 0),
3919 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3920 BPF_FUNC_map_lookup_elem),
3921 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3922 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2),
3923 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
3924 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3925 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3926 BPF_EXIT_INSN(),
3927 },
3928 .fixup_map1 = { 4 },
3929 .errstr = "R4 invalid mem access",
3930 .result = REJECT,
3931 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3932 },
3933 {
3934 "alu ops on ptr_to_map_value_or_null, 2",
3935 .insns = {
3936 BPF_MOV64_IMM(BPF_REG_1, 10),
3937 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3938 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3940 BPF_LD_MAP_FD(BPF_REG_1, 0),
3941 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3942 BPF_FUNC_map_lookup_elem),
3943 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3944 BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1),
3945 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3946 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3947 BPF_EXIT_INSN(),
3948 },
3949 .fixup_map1 = { 4 },
3950 .errstr = "R4 invalid mem access",
3951 .result = REJECT,
3952 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3953 },
3954 {
3955 "alu ops on ptr_to_map_value_or_null, 3",
3956 .insns = {
3957 BPF_MOV64_IMM(BPF_REG_1, 10),
3958 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3961 BPF_LD_MAP_FD(BPF_REG_1, 0),
3962 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3963 BPF_FUNC_map_lookup_elem),
3964 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3965 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1),
3966 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3967 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3968 BPF_EXIT_INSN(),
3969 },
3970 .fixup_map1 = { 4 },
3971 .errstr = "R4 invalid mem access",
3972 .result = REJECT,
3973 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3974 },
3975 {
3976 "invalid memory access with multiple map_lookup_elem calls",
3977 .insns = {
3978 BPF_MOV64_IMM(BPF_REG_1, 10),
3979 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
3980 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
3981 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
3982 BPF_LD_MAP_FD(BPF_REG_1, 0),
3983 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
3984 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
3985 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3986 BPF_FUNC_map_lookup_elem),
3987 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
3988 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
3989 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
3990 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
3991 BPF_FUNC_map_lookup_elem),
3992 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
3993 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
3994 BPF_EXIT_INSN(),
3995 },
3996 .fixup_map1 = { 4 },
3997 .result = REJECT,
3998 .errstr = "R4 !read_ok",
3999 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4000 },
4001 {
4002 "valid indirect map_lookup_elem access with 2nd lookup in branch",
4003 .insns = {
4004 BPF_MOV64_IMM(BPF_REG_1, 10),
4005 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
4006 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4007 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4008 BPF_LD_MAP_FD(BPF_REG_1, 0),
4009 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
4010 BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
4011 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4012 BPF_FUNC_map_lookup_elem),
4013 BPF_MOV64_IMM(BPF_REG_2, 10),
4014 BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
4015 BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
4016 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
4017 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4018 BPF_FUNC_map_lookup_elem),
4019 BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
4020 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
4021 BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
4022 BPF_EXIT_INSN(),
4023 },
4024 .fixup_map1 = { 4 },
4025 .result = ACCEPT,
4026 .prog_type = BPF_PROG_TYPE_SCHED_CLS
4027 },
4028 {
4029 "invalid map access from else condition",
4030 .insns = {
4031 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4032 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4033 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4034 BPF_LD_MAP_FD(BPF_REG_1, 0),
4035 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
4036 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4037 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
4038 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
4039 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
4040 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
4041 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
4042 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
4043 BPF_EXIT_INSN(),
4044 },
4045 .fixup_map2 = { 3 },
4046 .errstr = "R0 unbounded memory access",
4047 .result = REJECT,
4048 .errstr_unpriv = "R0 leaks addr",
4049 .result_unpriv = REJECT,
4050 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
4051 },
4052 {
4053 "constant register |= constant should keep constant type",
4054 .insns = {
4055 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4056 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4057 BPF_MOV64_IMM(BPF_REG_2, 34),
4058 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
4059 BPF_MOV64_IMM(BPF_REG_3, 0),
4060 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4061 BPF_EXIT_INSN(),
4062 },
4063 .result = ACCEPT,
4064 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4065 },
4066 {
4067 "constant register |= constant should not bypass stack boundary checks",
4068 .insns = {
4069 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4070 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4071 BPF_MOV64_IMM(BPF_REG_2, 34),
4072 BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
4073 BPF_MOV64_IMM(BPF_REG_3, 0),
4074 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4075 BPF_EXIT_INSN(),
4076 },
4077 .errstr = "invalid stack type R1 off=-48 access_size=58",
4078 .result = REJECT,
4079 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4080 },
4081 {
4082 "constant register |= constant register should keep constant type",
4083 .insns = {
4084 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4085 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4086 BPF_MOV64_IMM(BPF_REG_2, 34),
4087 BPF_MOV64_IMM(BPF_REG_4, 13),
4088 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4089 BPF_MOV64_IMM(BPF_REG_3, 0),
4090 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4091 BPF_EXIT_INSN(),
4092 },
4093 .result = ACCEPT,
4094 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4095 },
4096 {
4097 "constant register |= constant register should not bypass stack boundary checks",
4098 .insns = {
4099 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
4100 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
4101 BPF_MOV64_IMM(BPF_REG_2, 34),
4102 BPF_MOV64_IMM(BPF_REG_4, 24),
4103 BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
4104 BPF_MOV64_IMM(BPF_REG_3, 0),
4105 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4106 BPF_EXIT_INSN(),
4107 },
4108 .errstr = "invalid stack type R1 off=-48 access_size=58",
4109 .result = REJECT,
4110 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4111 },
4112 {
4113 "invalid direct packet write for LWT_IN",
4114 .insns = {
4115 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4116 offsetof(struct __sk_buff, data)),
4117 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4118 offsetof(struct __sk_buff, data_end)),
4119 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4120 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4121 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4122 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4123 BPF_MOV64_IMM(BPF_REG_0, 0),
4124 BPF_EXIT_INSN(),
4125 },
4126 .errstr = "cannot write into packet",
4127 .result = REJECT,
4128 .prog_type = BPF_PROG_TYPE_LWT_IN,
4129 },
4130 {
4131 "invalid direct packet write for LWT_OUT",
4132 .insns = {
4133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4134 offsetof(struct __sk_buff, data)),
4135 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4136 offsetof(struct __sk_buff, data_end)),
4137 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4139 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4140 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4141 BPF_MOV64_IMM(BPF_REG_0, 0),
4142 BPF_EXIT_INSN(),
4143 },
4144 .errstr = "cannot write into packet",
4145 .result = REJECT,
4146 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4147 },
4148 {
4149 "direct packet write for LWT_XMIT",
4150 .insns = {
4151 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4152 offsetof(struct __sk_buff, data)),
4153 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4154 offsetof(struct __sk_buff, data_end)),
4155 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4156 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4157 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4158 BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
4159 BPF_MOV64_IMM(BPF_REG_0, 0),
4160 BPF_EXIT_INSN(),
4161 },
4162 .result = ACCEPT,
4163 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4164 },
4165 {
4166 "direct packet read for LWT_IN",
4167 .insns = {
4168 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4169 offsetof(struct __sk_buff, data)),
4170 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4171 offsetof(struct __sk_buff, data_end)),
4172 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4174 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4175 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4176 BPF_MOV64_IMM(BPF_REG_0, 0),
4177 BPF_EXIT_INSN(),
4178 },
4179 .result = ACCEPT,
4180 .prog_type = BPF_PROG_TYPE_LWT_IN,
4181 },
4182 {
4183 "direct packet read for LWT_OUT",
4184 .insns = {
4185 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4186 offsetof(struct __sk_buff, data)),
4187 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4188 offsetof(struct __sk_buff, data_end)),
4189 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4191 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4192 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4193 BPF_MOV64_IMM(BPF_REG_0, 0),
4194 BPF_EXIT_INSN(),
4195 },
4196 .result = ACCEPT,
4197 .prog_type = BPF_PROG_TYPE_LWT_OUT,
4198 },
4199 {
4200 "direct packet read for LWT_XMIT",
4201 .insns = {
4202 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4203 offsetof(struct __sk_buff, data)),
4204 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4205 offsetof(struct __sk_buff, data_end)),
4206 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4207 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4208 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
4209 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
4210 BPF_MOV64_IMM(BPF_REG_0, 0),
4211 BPF_EXIT_INSN(),
4212 },
4213 .result = ACCEPT,
4214 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4215 },
4216 {
4217 "overlapping checks for direct packet access",
4218 .insns = {
4219 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
4220 offsetof(struct __sk_buff, data)),
4221 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
4222 offsetof(struct __sk_buff, data_end)),
4223 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
4224 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
4225 BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
4226 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
4227 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
4228 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
4229 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
4230 BPF_MOV64_IMM(BPF_REG_0, 0),
4231 BPF_EXIT_INSN(),
4232 },
4233 .result = ACCEPT,
4234 .prog_type = BPF_PROG_TYPE_LWT_XMIT,
4235 },
4236 {
4237 "invalid access of tc_classid for LWT_IN",
4238 .insns = {
4239 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4240 offsetof(struct __sk_buff, tc_classid)),
4241 BPF_EXIT_INSN(),
4242 },
4243 .result = REJECT,
4244 .errstr = "invalid bpf_context access",
4245 },
4246 {
4247 "invalid access of tc_classid for LWT_OUT",
4248 .insns = {
4249 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4250 offsetof(struct __sk_buff, tc_classid)),
4251 BPF_EXIT_INSN(),
4252 },
4253 .result = REJECT,
4254 .errstr = "invalid bpf_context access",
4255 },
4256 {
4257 "invalid access of tc_classid for LWT_XMIT",
4258 .insns = {
4259 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
4260 offsetof(struct __sk_buff, tc_classid)),
4261 BPF_EXIT_INSN(),
4262 },
4263 .result = REJECT,
4264 .errstr = "invalid bpf_context access",
4265 },
4266 {
4267 "leak pointer into ctx 1",
4268 .insns = {
4269 BPF_MOV64_IMM(BPF_REG_0, 0),
4270 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4271 offsetof(struct __sk_buff, cb[0])),
4272 BPF_LD_MAP_FD(BPF_REG_2, 0),
4273 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_2,
4274 offsetof(struct __sk_buff, cb[0])),
4275 BPF_EXIT_INSN(),
4276 },
4277 .fixup_map1 = { 2 },
4278 .errstr_unpriv = "R2 leaks addr into mem",
4279 .result_unpriv = REJECT,
4280 .result = ACCEPT,
4281 },
4282 {
4283 "leak pointer into ctx 2",
4284 .insns = {
4285 BPF_MOV64_IMM(BPF_REG_0, 0),
4286 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
4287 offsetof(struct __sk_buff, cb[0])),
4288 BPF_STX_XADD(BPF_DW, BPF_REG_1, BPF_REG_10,
4289 offsetof(struct __sk_buff, cb[0])),
4290 BPF_EXIT_INSN(),
4291 },
4292 .errstr_unpriv = "R10 leaks addr into mem",
4293 .result_unpriv = REJECT,
4294 .result = ACCEPT,
4295 },
4296 {
4297 "leak pointer into ctx 3",
4298 .insns = {
4299 BPF_MOV64_IMM(BPF_REG_0, 0),
4300 BPF_LD_MAP_FD(BPF_REG_2, 0),
4301 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
4302 offsetof(struct __sk_buff, cb[0])),
4303 BPF_EXIT_INSN(),
4304 },
4305 .fixup_map1 = { 1 },
4306 .errstr_unpriv = "R2 leaks addr into ctx",
4307 .result_unpriv = REJECT,
4308 .result = ACCEPT,
4309 },
4310 {
4311 "leak pointer into map val",
4312 .insns = {
4313 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
4314 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
4315 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4316 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4317 BPF_LD_MAP_FD(BPF_REG_1, 0),
4318 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4319 BPF_FUNC_map_lookup_elem),
4320 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
4321 BPF_MOV64_IMM(BPF_REG_3, 0),
4322 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
4323 BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
4324 BPF_MOV64_IMM(BPF_REG_0, 0),
4325 BPF_EXIT_INSN(),
4326 },
4327 .fixup_map1 = { 4 },
4328 .errstr_unpriv = "R6 leaks addr into mem",
4329 .result_unpriv = REJECT,
4330 .result = ACCEPT,
4331 },
4332 {
4333 "helper access to map: full range",
4334 .insns = {
4335 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4336 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4337 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4338 BPF_LD_MAP_FD(BPF_REG_1, 0),
4339 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4340 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4341 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4342 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
4343 BPF_MOV64_IMM(BPF_REG_3, 0),
4344 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4345 BPF_EXIT_INSN(),
4346 },
4347 .fixup_map2 = { 3 },
4348 .result = ACCEPT,
4349 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4350 },
4351 {
4352 "helper access to map: partial range",
4353 .insns = {
4354 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4355 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4356 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4357 BPF_LD_MAP_FD(BPF_REG_1, 0),
4358 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4359 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4360 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4361 BPF_MOV64_IMM(BPF_REG_2, 8),
4362 BPF_MOV64_IMM(BPF_REG_3, 0),
4363 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4364 BPF_EXIT_INSN(),
4365 },
4366 .fixup_map2 = { 3 },
4367 .result = ACCEPT,
4368 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4369 },
4370 {
4371 "helper access to map: empty range",
4372 .insns = {
4373 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4374 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4375 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4376 BPF_LD_MAP_FD(BPF_REG_1, 0),
4377 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4378 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4379 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4380 BPF_MOV64_IMM(BPF_REG_2, 0),
4381 BPF_MOV64_IMM(BPF_REG_3, 0),
4382 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4383 BPF_EXIT_INSN(),
4384 },
4385 .fixup_map2 = { 3 },
4386 .errstr = "invalid access to map value, value_size=48 off=0 size=0",
4387 .result = REJECT,
4388 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4389 },
4390 {
4391 "helper access to map: out-of-bound range",
4392 .insns = {
4393 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4394 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4395 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4396 BPF_LD_MAP_FD(BPF_REG_1, 0),
4397 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4398 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4399 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4400 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
4401 BPF_MOV64_IMM(BPF_REG_3, 0),
4402 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4403 BPF_EXIT_INSN(),
4404 },
4405 .fixup_map2 = { 3 },
4406 .errstr = "invalid access to map value, value_size=48 off=0 size=56",
4407 .result = REJECT,
4408 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4409 },
4410 {
4411 "helper access to map: negative range",
4412 .insns = {
4413 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4414 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4415 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4416 BPF_LD_MAP_FD(BPF_REG_1, 0),
4417 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4418 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4420 BPF_MOV64_IMM(BPF_REG_2, -8),
4421 BPF_MOV64_IMM(BPF_REG_3, 0),
4422 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4423 BPF_EXIT_INSN(),
4424 },
4425 .fixup_map2 = { 3 },
4426 .errstr = "R2 min value is negative",
4427 .result = REJECT,
4428 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4429 },
4430 {
4431 "helper access to adjusted map (via const imm): full range",
4432 .insns = {
4433 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4435 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4436 BPF_LD_MAP_FD(BPF_REG_1, 0),
4437 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4438 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4439 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4440 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4441 offsetof(struct test_val, foo)),
4442 BPF_MOV64_IMM(BPF_REG_2,
4443 sizeof(struct test_val) -
4444 offsetof(struct test_val, foo)),
4445 BPF_MOV64_IMM(BPF_REG_3, 0),
4446 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4447 BPF_EXIT_INSN(),
4448 },
4449 .fixup_map2 = { 3 },
4450 .result = ACCEPT,
4451 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4452 },
4453 {
4454 "helper access to adjusted map (via const imm): partial range",
4455 .insns = {
4456 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4458 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4459 BPF_LD_MAP_FD(BPF_REG_1, 0),
4460 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4462 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4463 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4464 offsetof(struct test_val, foo)),
4465 BPF_MOV64_IMM(BPF_REG_2, 8),
4466 BPF_MOV64_IMM(BPF_REG_3, 0),
4467 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4468 BPF_EXIT_INSN(),
4469 },
4470 .fixup_map2 = { 3 },
4471 .result = ACCEPT,
4472 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4473 },
4474 {
4475 "helper access to adjusted map (via const imm): empty range",
4476 .insns = {
4477 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4478 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4479 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4480 BPF_LD_MAP_FD(BPF_REG_1, 0),
4481 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4482 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4483 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4484 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4485 offsetof(struct test_val, foo)),
4486 BPF_MOV64_IMM(BPF_REG_2, 0),
4487 BPF_MOV64_IMM(BPF_REG_3, 0),
4488 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4489 BPF_EXIT_INSN(),
4490 },
4491 .fixup_map2 = { 3 },
4492 .errstr = "invalid access to map value, value_size=48 off=4 size=0",
4493 .result = REJECT,
4494 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4495 },
4496 {
4497 "helper access to adjusted map (via const imm): out-of-bound range",
4498 .insns = {
4499 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4500 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4501 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4502 BPF_LD_MAP_FD(BPF_REG_1, 0),
4503 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4504 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4505 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4506 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4507 offsetof(struct test_val, foo)),
4508 BPF_MOV64_IMM(BPF_REG_2,
4509 sizeof(struct test_val) -
4510 offsetof(struct test_val, foo) + 8),
4511 BPF_MOV64_IMM(BPF_REG_3, 0),
4512 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4513 BPF_EXIT_INSN(),
4514 },
4515 .fixup_map2 = { 3 },
4516 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4517 .result = REJECT,
4518 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4519 },
4520 {
4521 "helper access to adjusted map (via const imm): negative range (> adjustment)",
4522 .insns = {
4523 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4524 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4525 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4526 BPF_LD_MAP_FD(BPF_REG_1, 0),
4527 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4528 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4531 offsetof(struct test_val, foo)),
4532 BPF_MOV64_IMM(BPF_REG_2, -8),
4533 BPF_MOV64_IMM(BPF_REG_3, 0),
4534 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4535 BPF_EXIT_INSN(),
4536 },
4537 .fixup_map2 = { 3 },
4538 .errstr = "R2 min value is negative",
4539 .result = REJECT,
4540 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4541 },
4542 {
4543 "helper access to adjusted map (via const imm): negative range (< adjustment)",
4544 .insns = {
4545 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4546 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4547 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4548 BPF_LD_MAP_FD(BPF_REG_1, 0),
4549 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4550 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
4551 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4552 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
4553 offsetof(struct test_val, foo)),
4554 BPF_MOV64_IMM(BPF_REG_2, -1),
4555 BPF_MOV64_IMM(BPF_REG_3, 0),
4556 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4557 BPF_EXIT_INSN(),
4558 },
4559 .fixup_map2 = { 3 },
4560 .errstr = "R2 min value is negative",
4561 .result = REJECT,
4562 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4563 },
4564 {
4565 "helper access to adjusted map (via const reg): full range",
4566 .insns = {
4567 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4569 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4570 BPF_LD_MAP_FD(BPF_REG_1, 0),
4571 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4572 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4573 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4574 BPF_MOV64_IMM(BPF_REG_3,
4575 offsetof(struct test_val, foo)),
4576 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4577 BPF_MOV64_IMM(BPF_REG_2,
4578 sizeof(struct test_val) -
4579 offsetof(struct test_val, foo)),
4580 BPF_MOV64_IMM(BPF_REG_3, 0),
4581 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4582 BPF_EXIT_INSN(),
4583 },
4584 .fixup_map2 = { 3 },
4585 .result = ACCEPT,
4586 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4587 },
4588 {
4589 "helper access to adjusted map (via const reg): partial range",
4590 .insns = {
4591 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4592 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4593 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4594 BPF_LD_MAP_FD(BPF_REG_1, 0),
4595 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4596 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4597 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4598 BPF_MOV64_IMM(BPF_REG_3,
4599 offsetof(struct test_val, foo)),
4600 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4601 BPF_MOV64_IMM(BPF_REG_2, 8),
4602 BPF_MOV64_IMM(BPF_REG_3, 0),
4603 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4604 BPF_EXIT_INSN(),
4605 },
4606 .fixup_map2 = { 3 },
4607 .result = ACCEPT,
4608 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4609 },
4610 {
4611 "helper access to adjusted map (via const reg): empty range",
4612 .insns = {
4613 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4614 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4615 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4616 BPF_LD_MAP_FD(BPF_REG_1, 0),
4617 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4618 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4619 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4620 BPF_MOV64_IMM(BPF_REG_3, 0),
4621 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4622 BPF_MOV64_IMM(BPF_REG_2, 0),
4623 BPF_MOV64_IMM(BPF_REG_3, 0),
4624 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4625 BPF_EXIT_INSN(),
4626 },
4627 .fixup_map2 = { 3 },
4628 .errstr = "R1 min value is outside of the array range",
4629 .result = REJECT,
4630 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4631 },
4632 {
4633 "helper access to adjusted map (via const reg): out-of-bound range",
4634 .insns = {
4635 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4636 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4637 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4638 BPF_LD_MAP_FD(BPF_REG_1, 0),
4639 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4640 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4642 BPF_MOV64_IMM(BPF_REG_3,
4643 offsetof(struct test_val, foo)),
4644 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4645 BPF_MOV64_IMM(BPF_REG_2,
4646 sizeof(struct test_val) -
4647 offsetof(struct test_val, foo) + 8),
4648 BPF_MOV64_IMM(BPF_REG_3, 0),
4649 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4650 BPF_EXIT_INSN(),
4651 },
4652 .fixup_map2 = { 3 },
4653 .errstr = "invalid access to map value, value_size=48 off=4 size=52",
4654 .result = REJECT,
4655 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4656 },
4657 {
4658 "helper access to adjusted map (via const reg): negative range (> adjustment)",
4659 .insns = {
4660 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4661 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4662 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4663 BPF_LD_MAP_FD(BPF_REG_1, 0),
4664 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4665 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4666 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4667 BPF_MOV64_IMM(BPF_REG_3,
4668 offsetof(struct test_val, foo)),
4669 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4670 BPF_MOV64_IMM(BPF_REG_2, -8),
4671 BPF_MOV64_IMM(BPF_REG_3, 0),
4672 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4673 BPF_EXIT_INSN(),
4674 },
4675 .fixup_map2 = { 3 },
4676 .errstr = "R2 min value is negative",
4677 .result = REJECT,
4678 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4679 },
4680 {
4681 "helper access to adjusted map (via const reg): negative range (< adjustment)",
4682 .insns = {
4683 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4684 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4685 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4686 BPF_LD_MAP_FD(BPF_REG_1, 0),
4687 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4688 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4689 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4690 BPF_MOV64_IMM(BPF_REG_3,
4691 offsetof(struct test_val, foo)),
4692 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4693 BPF_MOV64_IMM(BPF_REG_2, -1),
4694 BPF_MOV64_IMM(BPF_REG_3, 0),
4695 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4696 BPF_EXIT_INSN(),
4697 },
4698 .fixup_map2 = { 3 },
4699 .errstr = "R2 min value is negative",
4700 .result = REJECT,
4701 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4702 },
4703 {
4704 "helper access to adjusted map (via variable): full range",
4705 .insns = {
4706 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4707 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4708 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4709 BPF_LD_MAP_FD(BPF_REG_1, 0),
4710 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4711 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4712 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4713 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4714 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4715 offsetof(struct test_val, foo), 4),
4716 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4717 BPF_MOV64_IMM(BPF_REG_2,
4718 sizeof(struct test_val) -
4719 offsetof(struct test_val, foo)),
4720 BPF_MOV64_IMM(BPF_REG_3, 0),
4721 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4722 BPF_EXIT_INSN(),
4723 },
4724 .fixup_map2 = { 3 },
4725 .result = ACCEPT,
4726 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4727 },
4728 {
4729 "helper access to adjusted map (via variable): partial range",
4730 .insns = {
4731 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4732 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4733 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4734 BPF_LD_MAP_FD(BPF_REG_1, 0),
4735 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4736 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4737 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4738 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4739 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4740 offsetof(struct test_val, foo), 4),
4741 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4742 BPF_MOV64_IMM(BPF_REG_2, 8),
4743 BPF_MOV64_IMM(BPF_REG_3, 0),
4744 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4745 BPF_EXIT_INSN(),
4746 },
4747 .fixup_map2 = { 3 },
4748 .result = ACCEPT,
4749 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4750 },
4751 {
4752 "helper access to adjusted map (via variable): empty range",
4753 .insns = {
4754 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4755 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4756 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4757 BPF_LD_MAP_FD(BPF_REG_1, 0),
4758 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4759 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4760 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4761 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4762 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4763 offsetof(struct test_val, foo), 4),
4764 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4765 BPF_MOV64_IMM(BPF_REG_2, 0),
4766 BPF_MOV64_IMM(BPF_REG_3, 0),
4767 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4768 BPF_EXIT_INSN(),
4769 },
4770 .fixup_map2 = { 3 },
4771 .errstr = "R1 min value is outside of the array range",
4772 .result = REJECT,
4773 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4774 },
4775 {
4776 "helper access to adjusted map (via variable): no max check",
4777 .insns = {
4778 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4779 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4780 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4781 BPF_LD_MAP_FD(BPF_REG_1, 0),
4782 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4783 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
4784 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4785 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4786 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4787 BPF_MOV64_IMM(BPF_REG_2, 1),
4788 BPF_MOV64_IMM(BPF_REG_3, 0),
4789 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4790 BPF_EXIT_INSN(),
4791 },
4792 .fixup_map2 = { 3 },
4793 .errstr = "R1 unbounded memory access",
4794 .result = REJECT,
4795 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4796 },
4797 {
4798 "helper access to adjusted map (via variable): wrong max check",
4799 .insns = {
4800 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4801 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4802 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4803 BPF_LD_MAP_FD(BPF_REG_1, 0),
4804 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4805 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
4806 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4807 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4808 BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
4809 offsetof(struct test_val, foo), 4),
4810 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4811 BPF_MOV64_IMM(BPF_REG_2,
4812 sizeof(struct test_val) -
4813 offsetof(struct test_val, foo) + 1),
4814 BPF_MOV64_IMM(BPF_REG_3, 0),
4815 BPF_EMIT_CALL(BPF_FUNC_probe_read),
4816 BPF_EXIT_INSN(),
4817 },
4818 .fixup_map2 = { 3 },
4819 .errstr = "invalid access to map value, value_size=48 off=4 size=45",
4820 .result = REJECT,
4821 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4822 },
4823 {
4824 "helper access to map: bounds check using <, good access",
4825 .insns = {
4826 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4828 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4829 BPF_LD_MAP_FD(BPF_REG_1, 0),
4830 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4831 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4833 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4834 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2),
4835 BPF_MOV64_IMM(BPF_REG_0, 0),
4836 BPF_EXIT_INSN(),
4837 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4838 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4839 BPF_MOV64_IMM(BPF_REG_0, 0),
4840 BPF_EXIT_INSN(),
4841 },
4842 .fixup_map2 = { 3 },
4843 .result = ACCEPT,
4844 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4845 },
4846 {
4847 "helper access to map: bounds check using <, bad access",
4848 .insns = {
4849 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4850 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4851 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4852 BPF_LD_MAP_FD(BPF_REG_1, 0),
4853 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4854 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4855 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4856 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4857 BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4),
4858 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4859 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4860 BPF_MOV64_IMM(BPF_REG_0, 0),
4861 BPF_EXIT_INSN(),
4862 BPF_MOV64_IMM(BPF_REG_0, 0),
4863 BPF_EXIT_INSN(),
4864 },
4865 .fixup_map2 = { 3 },
4866 .result = REJECT,
4867 .errstr = "R1 unbounded memory access",
4868 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4869 },
4870 {
4871 "helper access to map: bounds check using <=, good access",
4872 .insns = {
4873 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4874 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4875 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4876 BPF_LD_MAP_FD(BPF_REG_1, 0),
4877 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4878 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4879 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4880 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4881 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2),
4882 BPF_MOV64_IMM(BPF_REG_0, 0),
4883 BPF_EXIT_INSN(),
4884 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4885 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4886 BPF_MOV64_IMM(BPF_REG_0, 0),
4887 BPF_EXIT_INSN(),
4888 },
4889 .fixup_map2 = { 3 },
4890 .result = ACCEPT,
4891 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4892 },
4893 {
4894 "helper access to map: bounds check using <=, bad access",
4895 .insns = {
4896 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4897 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4898 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4899 BPF_LD_MAP_FD(BPF_REG_1, 0),
4900 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4901 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4902 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4903 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4904 BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4),
4905 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4906 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4907 BPF_MOV64_IMM(BPF_REG_0, 0),
4908 BPF_EXIT_INSN(),
4909 BPF_MOV64_IMM(BPF_REG_0, 0),
4910 BPF_EXIT_INSN(),
4911 },
4912 .fixup_map2 = { 3 },
4913 .result = REJECT,
4914 .errstr = "R1 unbounded memory access",
4915 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4916 },
4917 {
4918 "helper access to map: bounds check using s<, good access",
4919 .insns = {
4920 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4921 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4922 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4923 BPF_LD_MAP_FD(BPF_REG_1, 0),
4924 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4925 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4926 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4927 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4928 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4929 BPF_MOV64_IMM(BPF_REG_0, 0),
4930 BPF_EXIT_INSN(),
4931 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3),
4932 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4933 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4934 BPF_MOV64_IMM(BPF_REG_0, 0),
4935 BPF_EXIT_INSN(),
4936 },
4937 .fixup_map2 = { 3 },
4938 .result = ACCEPT,
4939 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4940 },
4941 {
4942 "helper access to map: bounds check using s<, good access 2",
4943 .insns = {
4944 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4945 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4946 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4947 BPF_LD_MAP_FD(BPF_REG_1, 0),
4948 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4949 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4950 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4951 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
4952 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4953 BPF_MOV64_IMM(BPF_REG_0, 0),
4954 BPF_EXIT_INSN(),
4955 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4956 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4957 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4958 BPF_MOV64_IMM(BPF_REG_0, 0),
4959 BPF_EXIT_INSN(),
4960 },
4961 .fixup_map2 = { 3 },
4962 .result = ACCEPT,
4963 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4964 },
4965 {
4966 "helper access to map: bounds check using s<, bad access",
4967 .insns = {
4968 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4970 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4971 BPF_LD_MAP_FD(BPF_REG_1, 0),
4972 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4973 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4974 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
4975 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
4976 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2),
4977 BPF_MOV64_IMM(BPF_REG_0, 0),
4978 BPF_EXIT_INSN(),
4979 BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3),
4980 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
4981 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
4982 BPF_MOV64_IMM(BPF_REG_0, 0),
4983 BPF_EXIT_INSN(),
4984 },
4985 .fixup_map2 = { 3 },
4986 .result = REJECT,
4987 .errstr = "R1 min value is negative",
4988 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
4989 },
4990 {
4991 "helper access to map: bounds check using s<=, good access",
4992 .insns = {
4993 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
4994 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
4995 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
4996 BPF_LD_MAP_FD(BPF_REG_1, 0),
4997 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
4998 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
4999 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5000 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5001 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5002 BPF_MOV64_IMM(BPF_REG_0, 0),
5003 BPF_EXIT_INSN(),
5004 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3),
5005 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5006 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5007 BPF_MOV64_IMM(BPF_REG_0, 0),
5008 BPF_EXIT_INSN(),
5009 },
5010 .fixup_map2 = { 3 },
5011 .result = ACCEPT,
5012 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5013 },
5014 {
5015 "helper access to map: bounds check using s<=, good access 2",
5016 .insns = {
5017 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5018 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5019 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5020 BPF_LD_MAP_FD(BPF_REG_1, 0),
5021 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5022 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5023 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5024 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
5025 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5026 BPF_MOV64_IMM(BPF_REG_0, 0),
5027 BPF_EXIT_INSN(),
5028 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5029 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5030 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5031 BPF_MOV64_IMM(BPF_REG_0, 0),
5032 BPF_EXIT_INSN(),
5033 },
5034 .fixup_map2 = { 3 },
5035 .result = ACCEPT,
5036 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5037 },
5038 {
5039 "helper access to map: bounds check using s<=, bad access",
5040 .insns = {
5041 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5042 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5043 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5044 BPF_LD_MAP_FD(BPF_REG_1, 0),
5045 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5046 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5047 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5048 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
5049 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2),
5050 BPF_MOV64_IMM(BPF_REG_0, 0),
5051 BPF_EXIT_INSN(),
5052 BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3),
5053 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
5054 BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0),
5055 BPF_MOV64_IMM(BPF_REG_0, 0),
5056 BPF_EXIT_INSN(),
5057 },
5058 .fixup_map2 = { 3 },
5059 .result = REJECT,
5060 .errstr = "R1 min value is negative",
5061 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5062 },
5063 {
5064 "map element value is preserved across register spilling",
5065 .insns = {
5066 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5067 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5068 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5069 BPF_LD_MAP_FD(BPF_REG_1, 0),
5070 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5071 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5072 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5073 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5074 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5075 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5076 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5077 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5078 BPF_EXIT_INSN(),
5079 },
5080 .fixup_map2 = { 3 },
5081 .errstr_unpriv = "R0 leaks addr",
5082 .result = ACCEPT,
5083 .result_unpriv = REJECT,
5084 },
5085 {
5086 "map element value or null is marked on register spilling",
5087 .insns = {
5088 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5089 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5090 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5091 BPF_LD_MAP_FD(BPF_REG_1, 0),
5092 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5093 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
5095 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5096 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5097 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5098 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5099 BPF_EXIT_INSN(),
5100 },
5101 .fixup_map2 = { 3 },
5102 .errstr_unpriv = "R0 leaks addr",
5103 .result = ACCEPT,
5104 .result_unpriv = REJECT,
5105 },
5106 {
5107 "map element value store of cleared call register",
5108 .insns = {
5109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5111 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5112 BPF_LD_MAP_FD(BPF_REG_1, 0),
5113 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5114 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
5115 BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
5116 BPF_EXIT_INSN(),
5117 },
5118 .fixup_map2 = { 3 },
5119 .errstr_unpriv = "R1 !read_ok",
5120 .errstr = "R1 !read_ok",
5121 .result = REJECT,
5122 .result_unpriv = REJECT,
5123 },
5124 {
5125 "map element value with unaligned store",
5126 .insns = {
5127 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5128 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5129 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5130 BPF_LD_MAP_FD(BPF_REG_1, 0),
5131 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5132 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
5133 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5134 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5135 BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
5136 BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
5137 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5138 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
5139 BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
5140 BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
5141 BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
5142 BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
5143 BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
5144 BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
5145 BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
5146 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
5147 BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
5148 BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
5149 BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
5150 BPF_EXIT_INSN(),
5151 },
5152 .fixup_map2 = { 3 },
5153 .errstr_unpriv = "R0 leaks addr",
5154 .result = ACCEPT,
5155 .result_unpriv = REJECT,
5156 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5157 },
5158 {
5159 "map element value with unaligned load",
5160 .insns = {
5161 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5163 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5164 BPF_LD_MAP_FD(BPF_REG_1, 0),
5165 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5166 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5167 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5168 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
5169 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
5170 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5171 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
5172 BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
5173 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
5174 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
5175 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
5176 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
5177 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
5178 BPF_EXIT_INSN(),
5179 },
5180 .fixup_map2 = { 3 },
5181 .errstr_unpriv = "R0 leaks addr",
5182 .result = ACCEPT,
5183 .result_unpriv = REJECT,
5184 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5185 },
5186 {
5187 "map element value illegal alu op, 1",
5188 .insns = {
5189 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5190 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5191 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5192 BPF_LD_MAP_FD(BPF_REG_1, 0),
5193 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5194 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5195 BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
5196 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5197 BPF_EXIT_INSN(),
5198 },
5199 .fixup_map2 = { 3 },
5200 .errstr_unpriv = "R0 bitwise operator &= on pointer",
5201 .errstr = "invalid mem access 'inv'",
5202 .result = REJECT,
5203 .result_unpriv = REJECT,
5204 },
5205 {
5206 "map element value illegal alu op, 2",
5207 .insns = {
5208 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5209 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5210 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5211 BPF_LD_MAP_FD(BPF_REG_1, 0),
5212 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5213 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5214 BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
5215 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5216 BPF_EXIT_INSN(),
5217 },
5218 .fixup_map2 = { 3 },
5219 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited",
5220 .errstr = "invalid mem access 'inv'",
5221 .result = REJECT,
5222 .result_unpriv = REJECT,
5223 },
5224 {
5225 "map element value illegal alu op, 3",
5226 .insns = {
5227 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5228 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5229 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5230 BPF_LD_MAP_FD(BPF_REG_1, 0),
5231 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5232 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5233 BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
5234 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5235 BPF_EXIT_INSN(),
5236 },
5237 .fixup_map2 = { 3 },
5238 .errstr_unpriv = "R0 pointer arithmetic with /= operator",
5239 .errstr = "invalid mem access 'inv'",
5240 .result = REJECT,
5241 .result_unpriv = REJECT,
5242 },
5243 {
5244 "map element value illegal alu op, 4",
5245 .insns = {
5246 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5247 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5248 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5249 BPF_LD_MAP_FD(BPF_REG_1, 0),
5250 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5251 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
5252 BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
5253 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5254 BPF_EXIT_INSN(),
5255 },
5256 .fixup_map2 = { 3 },
5257 .errstr_unpriv = "R0 pointer arithmetic prohibited",
5258 .errstr = "invalid mem access 'inv'",
5259 .result = REJECT,
5260 .result_unpriv = REJECT,
5261 },
5262 {
5263 "map element value illegal alu op, 5",
5264 .insns = {
5265 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5266 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5267 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5268 BPF_LD_MAP_FD(BPF_REG_1, 0),
5269 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5270 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5271 BPF_MOV64_IMM(BPF_REG_3, 4096),
5272 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5274 BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
5275 BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
5276 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
5277 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
5278 BPF_EXIT_INSN(),
5279 },
5280 .fixup_map2 = { 3 },
5281 .errstr = "R0 invalid mem access 'inv'",
5282 .result = REJECT,
5283 },
5284 {
5285 "map element value is preserved across register spilling",
5286 .insns = {
5287 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5288 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5289 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5290 BPF_LD_MAP_FD(BPF_REG_1, 0),
5291 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5292 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
5293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
5294 offsetof(struct test_val, foo)),
5295 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
5296 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5297 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
5298 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
5299 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
5300 BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
5301 BPF_EXIT_INSN(),
5302 },
5303 .fixup_map2 = { 3 },
5304 .errstr_unpriv = "R0 leaks addr",
5305 .result = ACCEPT,
5306 .result_unpriv = REJECT,
5307 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5308 },
5309 {
5310 "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
5311 .insns = {
5312 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5314 BPF_MOV64_IMM(BPF_REG_0, 0),
5315 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5316 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5317 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5318 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5319 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5320 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5321 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5322 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5323 BPF_MOV64_IMM(BPF_REG_2, 16),
5324 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5325 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5326 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5327 BPF_MOV64_IMM(BPF_REG_4, 0),
5328 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5329 BPF_MOV64_IMM(BPF_REG_3, 0),
5330 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5331 BPF_MOV64_IMM(BPF_REG_0, 0),
5332 BPF_EXIT_INSN(),
5333 },
5334 .result = ACCEPT,
5335 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5336 },
5337 {
5338 "helper access to variable memory: stack, bitwise AND, zero included",
5339 .insns = {
5340 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5341 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5342 BPF_MOV64_IMM(BPF_REG_2, 16),
5343 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5344 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5345 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5346 BPF_MOV64_IMM(BPF_REG_3, 0),
5347 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5348 BPF_EXIT_INSN(),
5349 },
5350 .errstr = "invalid stack type R1 off=-64 access_size=0",
5351 .result = REJECT,
5352 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5353 },
5354 {
5355 "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
5356 .insns = {
5357 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5358 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5359 BPF_MOV64_IMM(BPF_REG_2, 16),
5360 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5361 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5362 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
5363 BPF_MOV64_IMM(BPF_REG_4, 0),
5364 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5365 BPF_MOV64_IMM(BPF_REG_3, 0),
5366 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5367 BPF_MOV64_IMM(BPF_REG_0, 0),
5368 BPF_EXIT_INSN(),
5369 },
5370 .errstr = "invalid stack type R1 off=-64 access_size=65",
5371 .result = REJECT,
5372 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5373 },
5374 {
5375 "helper access to variable memory: stack, JMP, correct bounds",
5376 .insns = {
5377 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5378 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5379 BPF_MOV64_IMM(BPF_REG_0, 0),
5380 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5381 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5382 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5383 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5384 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5385 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5386 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5387 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5388 BPF_MOV64_IMM(BPF_REG_2, 16),
5389 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5390 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5391 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
5392 BPF_MOV64_IMM(BPF_REG_4, 0),
5393 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5394 BPF_MOV64_IMM(BPF_REG_3, 0),
5395 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5396 BPF_MOV64_IMM(BPF_REG_0, 0),
5397 BPF_EXIT_INSN(),
5398 },
5399 .result = ACCEPT,
5400 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5401 },
5402 {
5403 "helper access to variable memory: stack, JMP (signed), correct bounds",
5404 .insns = {
5405 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5406 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5407 BPF_MOV64_IMM(BPF_REG_0, 0),
5408 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5409 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5410 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5411 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5412 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5413 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5414 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5415 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5416 BPF_MOV64_IMM(BPF_REG_2, 16),
5417 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5418 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5419 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
5420 BPF_MOV64_IMM(BPF_REG_4, 0),
5421 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5422 BPF_MOV64_IMM(BPF_REG_3, 0),
5423 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5424 BPF_MOV64_IMM(BPF_REG_0, 0),
5425 BPF_EXIT_INSN(),
5426 },
5427 .result = ACCEPT,
5428 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5429 },
5430 {
5431 "helper access to variable memory: stack, JMP, bounds + offset",
5432 .insns = {
5433 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5434 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5435 BPF_MOV64_IMM(BPF_REG_2, 16),
5436 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5437 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5438 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
5439 BPF_MOV64_IMM(BPF_REG_4, 0),
5440 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
5441 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5442 BPF_MOV64_IMM(BPF_REG_3, 0),
5443 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5444 BPF_MOV64_IMM(BPF_REG_0, 0),
5445 BPF_EXIT_INSN(),
5446 },
5447 .errstr = "invalid stack type R1 off=-64 access_size=65",
5448 .result = REJECT,
5449 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5450 },
5451 {
5452 "helper access to variable memory: stack, JMP, wrong max",
5453 .insns = {
5454 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5456 BPF_MOV64_IMM(BPF_REG_2, 16),
5457 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5458 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5459 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
5460 BPF_MOV64_IMM(BPF_REG_4, 0),
5461 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5462 BPF_MOV64_IMM(BPF_REG_3, 0),
5463 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5464 BPF_MOV64_IMM(BPF_REG_0, 0),
5465 BPF_EXIT_INSN(),
5466 },
5467 .errstr = "invalid stack type R1 off=-64 access_size=65",
5468 .result = REJECT,
5469 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5470 },
5471 {
5472 "helper access to variable memory: stack, JMP, no max check",
5473 .insns = {
5474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5476 BPF_MOV64_IMM(BPF_REG_2, 16),
5477 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5478 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5479 BPF_MOV64_IMM(BPF_REG_4, 0),
5480 BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
5481 BPF_MOV64_IMM(BPF_REG_3, 0),
5482 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5483 BPF_MOV64_IMM(BPF_REG_0, 0),
5484 BPF_EXIT_INSN(),
5485 },
5486 /* because max wasn't checked, signed min is negative */
5487 .errstr = "R2 min value is negative, either use unsigned or 'var &= const'",
5488 .result = REJECT,
5489 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5490 },
5491 {
5492 "helper access to variable memory: stack, JMP, no min check",
5493 .insns = {
5494 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5495 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5496 BPF_MOV64_IMM(BPF_REG_2, 16),
5497 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5498 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5499 BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
5500 BPF_MOV64_IMM(BPF_REG_3, 0),
5501 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5502 BPF_MOV64_IMM(BPF_REG_0, 0),
5503 BPF_EXIT_INSN(),
5504 },
5505 .errstr = "invalid stack type R1 off=-64 access_size=0",
5506 .result = REJECT,
5507 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5508 },
5509 {
5510 "helper access to variable memory: stack, JMP (signed), no min check",
5511 .insns = {
5512 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5514 BPF_MOV64_IMM(BPF_REG_2, 16),
5515 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
5516 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
5517 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
5518 BPF_MOV64_IMM(BPF_REG_3, 0),
5519 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5520 BPF_MOV64_IMM(BPF_REG_0, 0),
5521 BPF_EXIT_INSN(),
5522 },
5523 .errstr = "R2 min value is negative",
5524 .result = REJECT,
5525 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5526 },
5527 {
5528 "helper access to variable memory: map, JMP, correct bounds",
5529 .insns = {
5530 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5531 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5532 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5533 BPF_LD_MAP_FD(BPF_REG_1, 0),
5534 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5535 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5536 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5537 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5538 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5539 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5540 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5541 sizeof(struct test_val), 4),
5542 BPF_MOV64_IMM(BPF_REG_4, 0),
5543 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5544 BPF_MOV64_IMM(BPF_REG_3, 0),
5545 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5546 BPF_MOV64_IMM(BPF_REG_0, 0),
5547 BPF_EXIT_INSN(),
5548 },
5549 .fixup_map2 = { 3 },
5550 .result = ACCEPT,
5551 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5552 },
5553 {
5554 "helper access to variable memory: map, JMP, wrong max",
5555 .insns = {
5556 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5557 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5558 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5559 BPF_LD_MAP_FD(BPF_REG_1, 0),
5560 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5561 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
5562 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5563 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5564 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5565 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5566 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5567 sizeof(struct test_val) + 1, 4),
5568 BPF_MOV64_IMM(BPF_REG_4, 0),
5569 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5570 BPF_MOV64_IMM(BPF_REG_3, 0),
5571 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5572 BPF_MOV64_IMM(BPF_REG_0, 0),
5573 BPF_EXIT_INSN(),
5574 },
5575 .fixup_map2 = { 3 },
5576 .errstr = "invalid access to map value, value_size=48 off=0 size=49",
5577 .result = REJECT,
5578 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5579 },
5580 {
5581 "helper access to variable memory: map adjusted, JMP, correct bounds",
5582 .insns = {
5583 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5584 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5585 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5586 BPF_LD_MAP_FD(BPF_REG_1, 0),
5587 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5588 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5589 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5590 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5591 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5592 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5593 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5594 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5595 sizeof(struct test_val) - 20, 4),
5596 BPF_MOV64_IMM(BPF_REG_4, 0),
5597 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5598 BPF_MOV64_IMM(BPF_REG_3, 0),
5599 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5600 BPF_MOV64_IMM(BPF_REG_0, 0),
5601 BPF_EXIT_INSN(),
5602 },
5603 .fixup_map2 = { 3 },
5604 .result = ACCEPT,
5605 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5606 },
5607 {
5608 "helper access to variable memory: map adjusted, JMP, wrong max",
5609 .insns = {
5610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5612 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
5613 BPF_LD_MAP_FD(BPF_REG_1, 0),
5614 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
5615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
5616 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5617 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
5618 BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
5619 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5620 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5621 BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
5622 sizeof(struct test_val) - 19, 4),
5623 BPF_MOV64_IMM(BPF_REG_4, 0),
5624 BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
5625 BPF_MOV64_IMM(BPF_REG_3, 0),
5626 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5627 BPF_MOV64_IMM(BPF_REG_0, 0),
5628 BPF_EXIT_INSN(),
5629 },
5630 .fixup_map2 = { 3 },
5631 .errstr = "R1 min value is outside of the array range",
5632 .result = REJECT,
5633 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5634 },
5635 {
5636 "helper access to variable memory: size = 0 allowed on NULL",
5637 .insns = {
5638 BPF_MOV64_IMM(BPF_REG_1, 0),
5639 BPF_MOV64_IMM(BPF_REG_2, 0),
5640 BPF_MOV64_IMM(BPF_REG_3, 0),
5641 BPF_MOV64_IMM(BPF_REG_4, 0),
5642 BPF_MOV64_IMM(BPF_REG_5, 0),
5643 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5644 BPF_EXIT_INSN(),
5645 },
5646 .result = ACCEPT,
5647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5648 },
5649 {
5650 "helper access to variable memory: size > 0 not allowed on NULL",
5651 .insns = {
5652 BPF_MOV64_IMM(BPF_REG_1, 0),
5653 BPF_MOV64_IMM(BPF_REG_2, 0),
5654 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5655 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5656 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
5657 BPF_MOV64_IMM(BPF_REG_3, 0),
5658 BPF_MOV64_IMM(BPF_REG_4, 0),
5659 BPF_MOV64_IMM(BPF_REG_5, 0),
5660 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5661 BPF_EXIT_INSN(),
5662 },
5663 .errstr = "R1 type=inv expected=fp",
5664 .result = REJECT,
5665 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5666 },
5667 {
5668 "helper access to variable memory: size = 0 not allowed on != NULL",
5669 .insns = {
5670 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5671 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
5672 BPF_MOV64_IMM(BPF_REG_2, 0),
5673 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
5674 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
5675 BPF_MOV64_IMM(BPF_REG_3, 0),
5676 BPF_MOV64_IMM(BPF_REG_4, 0),
5677 BPF_MOV64_IMM(BPF_REG_5, 0),
5678 BPF_EMIT_CALL(BPF_FUNC_csum_diff),
5679 BPF_EXIT_INSN(),
5680 },
5681 .errstr = "invalid stack type R1 off=-8 access_size=0",
5682 .result = REJECT,
5683 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
5684 },
5685 {
5686 "helper access to variable memory: 8 bytes leak",
5687 .insns = {
5688 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5689 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5690 BPF_MOV64_IMM(BPF_REG_0, 0),
5691 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5692 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5693 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5694 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5695 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5696 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5697 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5698 BPF_MOV64_IMM(BPF_REG_2, 0),
5699 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5700 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5701 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
5702 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
5703 BPF_MOV64_IMM(BPF_REG_3, 0),
5704 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5705 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5706 BPF_EXIT_INSN(),
5707 },
5708 .errstr = "invalid indirect read from stack off -64+32 size 64",
5709 .result = REJECT,
5710 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5711 },
5712 {
5713 "helper access to variable memory: 8 bytes no leak (init memory)",
5714 .insns = {
5715 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
5716 BPF_MOV64_IMM(BPF_REG_0, 0),
5717 BPF_MOV64_IMM(BPF_REG_0, 0),
5718 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
5719 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
5720 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
5721 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
5722 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
5723 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5724 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5725 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5726 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
5727 BPF_MOV64_IMM(BPF_REG_2, 0),
5728 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
5729 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
5730 BPF_MOV64_IMM(BPF_REG_3, 0),
5731 BPF_EMIT_CALL(BPF_FUNC_probe_read),
5732 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
5733 BPF_EXIT_INSN(),
5734 },
5735 .result = ACCEPT,
5736 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
5737 },
5738 {
5739 "invalid and of negative number",
5740 .insns = {
5741 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5742 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5743 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5744 BPF_LD_MAP_FD(BPF_REG_1, 0),
5745 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5746 BPF_FUNC_map_lookup_elem),
5747 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
5748 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
5749 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
5750 BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
5751 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
5752 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
5753 offsetof(struct test_val, foo)),
5754 BPF_EXIT_INSN(),
5755 },
5756 .fixup_map2 = { 3 },
5757 .errstr = "R0 max value is outside of the array range",
5758 .result = REJECT,
5759 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5760 },
5761 {
5762 "invalid range check",
5763 .insns = {
5764 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5765 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5766 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
5767 BPF_LD_MAP_FD(BPF_REG_1, 0),
5768 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5769 BPF_FUNC_map_lookup_elem),
5770 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
5771 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
5772 BPF_MOV64_IMM(BPF_REG_9, 1),
5773 BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
5774 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
5775 BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
5776 BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
5777 BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
5778 BPF_MOV32_IMM(BPF_REG_3, 1),
5779 BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
5780 BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
5781 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
5782 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
5783 BPF_MOV64_REG(BPF_REG_0, 0),
5784 BPF_EXIT_INSN(),
5785 },
5786 .fixup_map2 = { 3 },
5787 .errstr = "R0 max value is outside of the array range",
5788 .result = REJECT,
5789 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
5790 },
5791 {
5792 "map in map access",
5793 .insns = {
5794 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5795 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5796 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5797 BPF_LD_MAP_FD(BPF_REG_1, 0),
5798 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5799 BPF_FUNC_map_lookup_elem),
5800 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
5801 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5802 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5803 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5804 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5805 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5806 BPF_FUNC_map_lookup_elem),
5807 BPF_MOV64_REG(BPF_REG_0, 0),
5808 BPF_EXIT_INSN(),
5809 },
5810 .fixup_map_in_map = { 3 },
5811 .result = ACCEPT,
5812 },
5813 {
5814 "invalid inner map pointer",
5815 .insns = {
5816 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5817 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5818 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5819 BPF_LD_MAP_FD(BPF_REG_1, 0),
5820 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5821 BPF_FUNC_map_lookup_elem),
5822 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
5823 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5824 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5825 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5826 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5828 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5829 BPF_FUNC_map_lookup_elem),
5830 BPF_MOV64_REG(BPF_REG_0, 0),
5831 BPF_EXIT_INSN(),
5832 },
5833 .fixup_map_in_map = { 3 },
5834 .errstr = "R1 type=inv expected=map_ptr",
5835 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
5836 .result = REJECT,
5837 },
5838 {
5839 "forgot null checking on the inner map pointer",
5840 .insns = {
5841 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5842 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5844 BPF_LD_MAP_FD(BPF_REG_1, 0),
5845 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5846 BPF_FUNC_map_lookup_elem),
5847 BPF_ST_MEM(0, BPF_REG_10, -4, 0),
5848 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
5849 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
5850 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5851 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5852 BPF_FUNC_map_lookup_elem),
5853 BPF_MOV64_REG(BPF_REG_0, 0),
5854 BPF_EXIT_INSN(),
5855 },
5856 .fixup_map_in_map = { 3 },
5857 .errstr = "R1 type=map_value_or_null expected=map_ptr",
5858 .result = REJECT,
5859 },
5860 {
5861 "ld_abs: check calling conv, r1",
5862 .insns = {
5863 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5864 BPF_MOV64_IMM(BPF_REG_1, 0),
5865 BPF_LD_ABS(BPF_W, -0x200000),
5866 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5867 BPF_EXIT_INSN(),
5868 },
5869 .errstr = "R1 !read_ok",
5870 .result = REJECT,
5871 },
5872 {
5873 "ld_abs: check calling conv, r2",
5874 .insns = {
5875 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5876 BPF_MOV64_IMM(BPF_REG_2, 0),
5877 BPF_LD_ABS(BPF_W, -0x200000),
5878 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5879 BPF_EXIT_INSN(),
5880 },
5881 .errstr = "R2 !read_ok",
5882 .result = REJECT,
5883 },
5884 {
5885 "ld_abs: check calling conv, r3",
5886 .insns = {
5887 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5888 BPF_MOV64_IMM(BPF_REG_3, 0),
5889 BPF_LD_ABS(BPF_W, -0x200000),
5890 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5891 BPF_EXIT_INSN(),
5892 },
5893 .errstr = "R3 !read_ok",
5894 .result = REJECT,
5895 },
5896 {
5897 "ld_abs: check calling conv, r4",
5898 .insns = {
5899 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5900 BPF_MOV64_IMM(BPF_REG_4, 0),
5901 BPF_LD_ABS(BPF_W, -0x200000),
5902 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5903 BPF_EXIT_INSN(),
5904 },
5905 .errstr = "R4 !read_ok",
5906 .result = REJECT,
5907 },
5908 {
5909 "ld_abs: check calling conv, r5",
5910 .insns = {
5911 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5912 BPF_MOV64_IMM(BPF_REG_5, 0),
5913 BPF_LD_ABS(BPF_W, -0x200000),
5914 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5915 BPF_EXIT_INSN(),
5916 },
5917 .errstr = "R5 !read_ok",
5918 .result = REJECT,
5919 },
5920 {
5921 "ld_abs: check calling conv, r7",
5922 .insns = {
5923 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5924 BPF_MOV64_IMM(BPF_REG_7, 0),
5925 BPF_LD_ABS(BPF_W, -0x200000),
5926 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5927 BPF_EXIT_INSN(),
5928 },
5929 .result = ACCEPT,
5930 },
5931 {
5932 "ld_ind: check calling conv, r1",
5933 .insns = {
5934 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5935 BPF_MOV64_IMM(BPF_REG_1, 1),
5936 BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000),
5937 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
5938 BPF_EXIT_INSN(),
5939 },
5940 .errstr = "R1 !read_ok",
5941 .result = REJECT,
5942 },
5943 {
5944 "ld_ind: check calling conv, r2",
5945 .insns = {
5946 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5947 BPF_MOV64_IMM(BPF_REG_2, 1),
5948 BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000),
5949 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
5950 BPF_EXIT_INSN(),
5951 },
5952 .errstr = "R2 !read_ok",
5953 .result = REJECT,
5954 },
5955 {
5956 "ld_ind: check calling conv, r3",
5957 .insns = {
5958 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5959 BPF_MOV64_IMM(BPF_REG_3, 1),
5960 BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000),
5961 BPF_MOV64_REG(BPF_REG_0, BPF_REG_3),
5962 BPF_EXIT_INSN(),
5963 },
5964 .errstr = "R3 !read_ok",
5965 .result = REJECT,
5966 },
5967 {
5968 "ld_ind: check calling conv, r4",
5969 .insns = {
5970 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5971 BPF_MOV64_IMM(BPF_REG_4, 1),
5972 BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000),
5973 BPF_MOV64_REG(BPF_REG_0, BPF_REG_4),
5974 BPF_EXIT_INSN(),
5975 },
5976 .errstr = "R4 !read_ok",
5977 .result = REJECT,
5978 },
5979 {
5980 "ld_ind: check calling conv, r5",
5981 .insns = {
5982 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5983 BPF_MOV64_IMM(BPF_REG_5, 1),
5984 BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000),
5985 BPF_MOV64_REG(BPF_REG_0, BPF_REG_5),
5986 BPF_EXIT_INSN(),
5987 },
5988 .errstr = "R5 !read_ok",
5989 .result = REJECT,
5990 },
5991 {
5992 "ld_ind: check calling conv, r7",
5993 .insns = {
5994 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
5995 BPF_MOV64_IMM(BPF_REG_7, 1),
5996 BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
5997 BPF_MOV64_REG(BPF_REG_0, BPF_REG_7),
5998 BPF_EXIT_INSN(),
5999 },
6000 .result = ACCEPT,
6001 },
6002 {
6003 "check bpf_perf_event_data->sample_period byte load permitted",
6004 .insns = {
6005 BPF_MOV64_IMM(BPF_REG_0, 0),
6006 #if __BYTE_ORDER == __LITTLE_ENDIAN
6007 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6008 offsetof(struct bpf_perf_event_data, sample_period)),
6009 #else
6010 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
6011 offsetof(struct bpf_perf_event_data, sample_period) + 7),
6012 #endif
6013 BPF_EXIT_INSN(),
6014 },
6015 .result = ACCEPT,
6016 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6017 },
6018 {
6019 "check bpf_perf_event_data->sample_period half load permitted",
6020 .insns = {
6021 BPF_MOV64_IMM(BPF_REG_0, 0),
6022 #if __BYTE_ORDER == __LITTLE_ENDIAN
6023 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6024 offsetof(struct bpf_perf_event_data, sample_period)),
6025 #else
6026 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6027 offsetof(struct bpf_perf_event_data, sample_period) + 6),
6028 #endif
6029 BPF_EXIT_INSN(),
6030 },
6031 .result = ACCEPT,
6032 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6033 },
6034 {
6035 "check bpf_perf_event_data->sample_period word load permitted",
6036 .insns = {
6037 BPF_MOV64_IMM(BPF_REG_0, 0),
6038 #if __BYTE_ORDER == __LITTLE_ENDIAN
6039 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6040 offsetof(struct bpf_perf_event_data, sample_period)),
6041 #else
6042 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
6043 offsetof(struct bpf_perf_event_data, sample_period) + 4),
6044 #endif
6045 BPF_EXIT_INSN(),
6046 },
6047 .result = ACCEPT,
6048 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6049 },
6050 {
6051 "check bpf_perf_event_data->sample_period dword load permitted",
6052 .insns = {
6053 BPF_MOV64_IMM(BPF_REG_0, 0),
6054 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
6055 offsetof(struct bpf_perf_event_data, sample_period)),
6056 BPF_EXIT_INSN(),
6057 },
6058 .result = ACCEPT,
6059 .prog_type = BPF_PROG_TYPE_PERF_EVENT,
6060 },
6061 {
6062 "check skb->data half load not permitted",
6063 .insns = {
6064 BPF_MOV64_IMM(BPF_REG_0, 0),
6065 #if __BYTE_ORDER == __LITTLE_ENDIAN
6066 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6067 offsetof(struct __sk_buff, data)),
6068 #else
6069 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6070 offsetof(struct __sk_buff, data) + 2),
6071 #endif
6072 BPF_EXIT_INSN(),
6073 },
6074 .result = REJECT,
6075 .errstr = "invalid bpf_context access",
6076 },
6077 {
6078 "check skb->tc_classid half load not permitted for lwt prog",
6079 .insns = {
6080 BPF_MOV64_IMM(BPF_REG_0, 0),
6081 #if __BYTE_ORDER == __LITTLE_ENDIAN
6082 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6083 offsetof(struct __sk_buff, tc_classid)),
6084 #else
6085 BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
6086 offsetof(struct __sk_buff, tc_classid) + 2),
6087 #endif
6088 BPF_EXIT_INSN(),
6089 },
6090 .result = REJECT,
6091 .errstr = "invalid bpf_context access",
6092 .prog_type = BPF_PROG_TYPE_LWT_IN,
6093 },
6094 {
6095 "bounds checks mixing signed and unsigned, positive bounds",
6096 .insns = {
6097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6098 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6099 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6100 BPF_LD_MAP_FD(BPF_REG_1, 0),
6101 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6102 BPF_FUNC_map_lookup_elem),
6103 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6104 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6105 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6106 BPF_MOV64_IMM(BPF_REG_2, 2),
6107 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
6108 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
6109 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6110 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6111 BPF_MOV64_IMM(BPF_REG_0, 0),
6112 BPF_EXIT_INSN(),
6113 },
6114 .fixup_map1 = { 3 },
6115 .errstr = "unbounded min value",
6116 .result = REJECT,
6117 },
6118 {
6119 "bounds checks mixing signed and unsigned",
6120 .insns = {
6121 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6122 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6123 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6124 BPF_LD_MAP_FD(BPF_REG_1, 0),
6125 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6126 BPF_FUNC_map_lookup_elem),
6127 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6129 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6130 BPF_MOV64_IMM(BPF_REG_2, -1),
6131 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6132 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6133 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6134 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6135 BPF_MOV64_IMM(BPF_REG_0, 0),
6136 BPF_EXIT_INSN(),
6137 },
6138 .fixup_map1 = { 3 },
6139 .errstr = "unbounded min value",
6140 .result = REJECT,
6141 },
6142 {
6143 "bounds checks mixing signed and unsigned, variant 2",
6144 .insns = {
6145 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6146 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6147 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6148 BPF_LD_MAP_FD(BPF_REG_1, 0),
6149 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6150 BPF_FUNC_map_lookup_elem),
6151 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6152 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6153 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6154 BPF_MOV64_IMM(BPF_REG_2, -1),
6155 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6156 BPF_MOV64_IMM(BPF_REG_8, 0),
6157 BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
6158 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6159 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6160 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6161 BPF_MOV64_IMM(BPF_REG_0, 0),
6162 BPF_EXIT_INSN(),
6163 },
6164 .fixup_map1 = { 3 },
6165 .errstr = "unbounded min value",
6166 .result = REJECT,
6167 },
6168 {
6169 "bounds checks mixing signed and unsigned, variant 3",
6170 .insns = {
6171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6174 BPF_LD_MAP_FD(BPF_REG_1, 0),
6175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6176 BPF_FUNC_map_lookup_elem),
6177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6178 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6179 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6180 BPF_MOV64_IMM(BPF_REG_2, -1),
6181 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
6182 BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
6183 BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
6184 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
6185 BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
6186 BPF_MOV64_IMM(BPF_REG_0, 0),
6187 BPF_EXIT_INSN(),
6188 },
6189 .fixup_map1 = { 3 },
6190 .errstr = "unbounded min value",
6191 .result = REJECT,
6192 },
6193 {
6194 "bounds checks mixing signed and unsigned, variant 4",
6195 .insns = {
6196 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6197 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6198 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6199 BPF_LD_MAP_FD(BPF_REG_1, 0),
6200 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6201 BPF_FUNC_map_lookup_elem),
6202 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6203 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6204 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6205 BPF_MOV64_IMM(BPF_REG_2, 1),
6206 BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
6207 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6208 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6209 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6210 BPF_MOV64_IMM(BPF_REG_0, 0),
6211 BPF_EXIT_INSN(),
6212 },
6213 .fixup_map1 = { 3 },
6214 .result = ACCEPT,
6215 },
6216 {
6217 "bounds checks mixing signed and unsigned, variant 5",
6218 .insns = {
6219 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6220 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6221 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6222 BPF_LD_MAP_FD(BPF_REG_1, 0),
6223 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6224 BPF_FUNC_map_lookup_elem),
6225 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6226 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6227 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6228 BPF_MOV64_IMM(BPF_REG_2, -1),
6229 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
6230 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
6231 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
6232 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
6233 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6234 BPF_MOV64_IMM(BPF_REG_0, 0),
6235 BPF_EXIT_INSN(),
6236 },
6237 .fixup_map1 = { 3 },
6238 .errstr = "unbounded min value",
6239 .result = REJECT,
6240 },
6241 {
6242 "bounds checks mixing signed and unsigned, variant 6",
6243 .insns = {
6244 BPF_MOV64_IMM(BPF_REG_2, 0),
6245 BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
6246 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
6247 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6248 BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
6249 BPF_MOV64_IMM(BPF_REG_6, -1),
6250 BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
6251 BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
6252 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
6253 BPF_MOV64_IMM(BPF_REG_5, 0),
6254 BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
6255 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6256 BPF_FUNC_skb_load_bytes),
6257 BPF_MOV64_IMM(BPF_REG_0, 0),
6258 BPF_EXIT_INSN(),
6259 },
6260 .errstr = "R4 min value is negative, either use unsigned",
6261 .result = REJECT,
6262 },
6263 {
6264 "bounds checks mixing signed and unsigned, variant 7",
6265 .insns = {
6266 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6267 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6269 BPF_LD_MAP_FD(BPF_REG_1, 0),
6270 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6271 BPF_FUNC_map_lookup_elem),
6272 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
6273 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6274 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6275 BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
6276 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
6277 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6278 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6279 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6280 BPF_MOV64_IMM(BPF_REG_0, 0),
6281 BPF_EXIT_INSN(),
6282 },
6283 .fixup_map1 = { 3 },
6284 .result = ACCEPT,
6285 },
6286 {
6287 "bounds checks mixing signed and unsigned, variant 8",
6288 .insns = {
6289 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6290 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6292 BPF_LD_MAP_FD(BPF_REG_1, 0),
6293 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6294 BPF_FUNC_map_lookup_elem),
6295 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6296 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6297 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6298 BPF_MOV64_IMM(BPF_REG_2, -1),
6299 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6300 BPF_MOV64_IMM(BPF_REG_0, 0),
6301 BPF_EXIT_INSN(),
6302 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6303 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6304 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6305 BPF_MOV64_IMM(BPF_REG_0, 0),
6306 BPF_EXIT_INSN(),
6307 },
6308 .fixup_map1 = { 3 },
6309 .errstr = "unbounded min value",
6310 .result = REJECT,
6311 },
6312 {
6313 "bounds checks mixing signed and unsigned, variant 9",
6314 .insns = {
6315 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6316 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6317 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6318 BPF_LD_MAP_FD(BPF_REG_1, 0),
6319 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6320 BPF_FUNC_map_lookup_elem),
6321 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
6322 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6323 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6324 BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
6325 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6326 BPF_MOV64_IMM(BPF_REG_0, 0),
6327 BPF_EXIT_INSN(),
6328 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6329 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6330 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6331 BPF_MOV64_IMM(BPF_REG_0, 0),
6332 BPF_EXIT_INSN(),
6333 },
6334 .fixup_map1 = { 3 },
6335 .result = ACCEPT,
6336 },
6337 {
6338 "bounds checks mixing signed and unsigned, variant 10",
6339 .insns = {
6340 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6341 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6342 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6343 BPF_LD_MAP_FD(BPF_REG_1, 0),
6344 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6345 BPF_FUNC_map_lookup_elem),
6346 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6347 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6348 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6349 BPF_MOV64_IMM(BPF_REG_2, 0),
6350 BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
6351 BPF_MOV64_IMM(BPF_REG_0, 0),
6352 BPF_EXIT_INSN(),
6353 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6354 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6355 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6356 BPF_MOV64_IMM(BPF_REG_0, 0),
6357 BPF_EXIT_INSN(),
6358 },
6359 .fixup_map1 = { 3 },
6360 .errstr = "unbounded min value",
6361 .result = REJECT,
6362 },
6363 {
6364 "bounds checks mixing signed and unsigned, variant 11",
6365 .insns = {
6366 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6367 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6368 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6369 BPF_LD_MAP_FD(BPF_REG_1, 0),
6370 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6371 BPF_FUNC_map_lookup_elem),
6372 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6373 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6374 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6375 BPF_MOV64_IMM(BPF_REG_2, -1),
6376 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6377 /* Dead branch. */
6378 BPF_MOV64_IMM(BPF_REG_0, 0),
6379 BPF_EXIT_INSN(),
6380 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6381 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6382 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6383 BPF_MOV64_IMM(BPF_REG_0, 0),
6384 BPF_EXIT_INSN(),
6385 },
6386 .fixup_map1 = { 3 },
6387 .errstr = "unbounded min value",
6388 .result = REJECT,
6389 },
6390 {
6391 "bounds checks mixing signed and unsigned, variant 12",
6392 .insns = {
6393 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6394 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6395 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6396 BPF_LD_MAP_FD(BPF_REG_1, 0),
6397 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6398 BPF_FUNC_map_lookup_elem),
6399 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6400 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6401 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6402 BPF_MOV64_IMM(BPF_REG_2, -6),
6403 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6404 BPF_MOV64_IMM(BPF_REG_0, 0),
6405 BPF_EXIT_INSN(),
6406 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6407 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6408 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6409 BPF_MOV64_IMM(BPF_REG_0, 0),
6410 BPF_EXIT_INSN(),
6411 },
6412 .fixup_map1 = { 3 },
6413 .errstr = "unbounded min value",
6414 .result = REJECT,
6415 },
6416 {
6417 "bounds checks mixing signed and unsigned, variant 13",
6418 .insns = {
6419 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6420 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6421 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6422 BPF_LD_MAP_FD(BPF_REG_1, 0),
6423 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6424 BPF_FUNC_map_lookup_elem),
6425 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6426 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6427 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6428 BPF_MOV64_IMM(BPF_REG_2, 2),
6429 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6430 BPF_MOV64_IMM(BPF_REG_7, 1),
6431 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
6432 BPF_MOV64_IMM(BPF_REG_0, 0),
6433 BPF_EXIT_INSN(),
6434 BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
6435 BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
6436 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
6437 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6438 BPF_MOV64_IMM(BPF_REG_0, 0),
6439 BPF_EXIT_INSN(),
6440 },
6441 .fixup_map1 = { 3 },
6442 .errstr = "unbounded min value",
6443 .result = REJECT,
6444 },
6445 {
6446 "bounds checks mixing signed and unsigned, variant 14",
6447 .insns = {
6448 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
6449 offsetof(struct __sk_buff, mark)),
6450 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6451 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6453 BPF_LD_MAP_FD(BPF_REG_1, 0),
6454 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6455 BPF_FUNC_map_lookup_elem),
6456 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6457 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6458 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6459 BPF_MOV64_IMM(BPF_REG_2, -1),
6460 BPF_MOV64_IMM(BPF_REG_8, 2),
6461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
6462 BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
6463 BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
6464 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6465 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6466 BPF_MOV64_IMM(BPF_REG_0, 0),
6467 BPF_EXIT_INSN(),
6468 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
6469 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6470 },
6471 .fixup_map1 = { 4 },
6472 .errstr = "unbounded min value",
6473 .result = REJECT,
6474 },
6475 {
6476 "bounds checks mixing signed and unsigned, variant 15",
6477 .insns = {
6478 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6479 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6480 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6481 BPF_LD_MAP_FD(BPF_REG_1, 0),
6482 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6483 BPF_FUNC_map_lookup_elem),
6484 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6485 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
6486 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
6487 BPF_MOV64_IMM(BPF_REG_2, -6),
6488 BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
6489 BPF_MOV64_IMM(BPF_REG_0, 0),
6490 BPF_EXIT_INSN(),
6491 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6492 BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
6493 BPF_MOV64_IMM(BPF_REG_0, 0),
6494 BPF_EXIT_INSN(),
6495 BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
6496 BPF_MOV64_IMM(BPF_REG_0, 0),
6497 BPF_EXIT_INSN(),
6498 },
6499 .fixup_map1 = { 3 },
6500 .errstr = "unbounded min value",
6501 .result = REJECT,
6502 .result_unpriv = REJECT,
6503 },
6504 {
6505 "subtraction bounds (map value) variant 1",
6506 .insns = {
6507 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6508 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6509 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6510 BPF_LD_MAP_FD(BPF_REG_1, 0),
6511 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6512 BPF_FUNC_map_lookup_elem),
6513 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6514 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6515 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7),
6516 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6517 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5),
6518 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6519 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56),
6520 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6521 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6522 BPF_EXIT_INSN(),
6523 BPF_MOV64_IMM(BPF_REG_0, 0),
6524 BPF_EXIT_INSN(),
6525 },
6526 .fixup_map1 = { 3 },
6527 .errstr = "R0 max value is outside of the array range",
6528 .result = REJECT,
6529 },
6530 {
6531 "subtraction bounds (map value) variant 2",
6532 .insns = {
6533 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6534 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6535 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6536 BPF_LD_MAP_FD(BPF_REG_1, 0),
6537 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6538 BPF_FUNC_map_lookup_elem),
6539 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6540 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6541 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6),
6542 BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1),
6543 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4),
6544 BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3),
6545 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6546 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6547 BPF_EXIT_INSN(),
6548 BPF_MOV64_IMM(BPF_REG_0, 0),
6549 BPF_EXIT_INSN(),
6550 },
6551 .fixup_map1 = { 3 },
6552 .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
6553 .result = REJECT,
6554 },
6555 {
6556 "bounds check based on zero-extended MOV",
6557 .insns = {
6558 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6559 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6560 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6561 BPF_LD_MAP_FD(BPF_REG_1, 0),
6562 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6563 BPF_FUNC_map_lookup_elem),
6564 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6565 /* r2 = 0x0000'0000'ffff'ffff */
6566 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6567 /* r2 = 0 */
6568 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6569 /* no-op */
6570 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6571 /* access at offset 0 */
6572 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6573 /* exit */
6574 BPF_MOV64_IMM(BPF_REG_0, 0),
6575 BPF_EXIT_INSN(),
6576 },
6577 .fixup_map1 = { 3 },
6578 .result = ACCEPT
6579 },
6580 {
6581 "bounds check based on sign-extended MOV. test1",
6582 .insns = {
6583 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6584 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6585 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6586 BPF_LD_MAP_FD(BPF_REG_1, 0),
6587 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6588 BPF_FUNC_map_lookup_elem),
6589 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6590 /* r2 = 0xffff'ffff'ffff'ffff */
6591 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6592 /* r2 = 0xffff'ffff */
6593 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6594 /* r0 = <oob pointer> */
6595 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6596 /* access to OOB pointer */
6597 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6598 /* exit */
6599 BPF_MOV64_IMM(BPF_REG_0, 0),
6600 BPF_EXIT_INSN(),
6601 },
6602 .fixup_map1 = { 3 },
6603 .errstr = "map_value pointer and 4294967295",
6604 .result = REJECT
6605 },
6606 {
6607 "bounds check based on sign-extended MOV. test2",
6608 .insns = {
6609 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6610 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6611 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6612 BPF_LD_MAP_FD(BPF_REG_1, 0),
6613 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6614 BPF_FUNC_map_lookup_elem),
6615 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6616 /* r2 = 0xffff'ffff'ffff'ffff */
6617 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6618 /* r2 = 0xfff'ffff */
6619 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6620 /* r0 = <oob pointer> */
6621 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6622 /* access to OOB pointer */
6623 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6624 /* exit */
6625 BPF_MOV64_IMM(BPF_REG_0, 0),
6626 BPF_EXIT_INSN(),
6627 },
6628 .fixup_map1 = { 3 },
6629 .errstr = "R0 min value is outside of the array range",
6630 .result = REJECT
6631 },
6632 {
6633 "bounds check based on reg_off + var_off + insn_off. test1",
6634 .insns = {
6635 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6636 offsetof(struct __sk_buff, mark)),
6637 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6638 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6639 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6640 BPF_LD_MAP_FD(BPF_REG_1, 0),
6641 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6642 BPF_FUNC_map_lookup_elem),
6643 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6644 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6645 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6646 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6647 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6648 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6649 BPF_MOV64_IMM(BPF_REG_0, 0),
6650 BPF_EXIT_INSN(),
6651 },
6652 .fixup_map1 = { 4 },
6653 .errstr = "value_size=8 off=1073741825",
6654 .result = REJECT,
6655 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6656 },
6657 {
6658 "bounds check based on reg_off + var_off + insn_off. test2",
6659 .insns = {
6660 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6661 offsetof(struct __sk_buff, mark)),
6662 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6663 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6664 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6665 BPF_LD_MAP_FD(BPF_REG_1, 0),
6666 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6667 BPF_FUNC_map_lookup_elem),
6668 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6669 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6670 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6671 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6672 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6673 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6674 BPF_MOV64_IMM(BPF_REG_0, 0),
6675 BPF_EXIT_INSN(),
6676 },
6677 .fixup_map1 = { 4 },
6678 .errstr = "value 1073741823",
6679 .result = REJECT,
6680 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6681 },
6682 {
6683 "bounds check after truncation of non-boundary-crossing range",
6684 .insns = {
6685 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6686 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6687 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6688 BPF_LD_MAP_FD(BPF_REG_1, 0),
6689 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6690 BPF_FUNC_map_lookup_elem),
6691 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6692 /* r1 = [0x00, 0xff] */
6693 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6694 BPF_MOV64_IMM(BPF_REG_2, 1),
6695 /* r2 = 0x10'0000'0000 */
6696 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6697 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6698 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6699 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6700 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6701 /* r1 = [0x00, 0xff] */
6702 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6703 /* r1 = 0 */
6704 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6705 /* no-op */
6706 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6707 /* access at offset 0 */
6708 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6709 /* exit */
6710 BPF_MOV64_IMM(BPF_REG_0, 0),
6711 BPF_EXIT_INSN(),
6712 },
6713 .fixup_map1 = { 3 },
6714 .result = ACCEPT
6715 },
6716 {
6717 "bounds check after truncation of boundary-crossing range (1)",
6718 .insns = {
6719 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6720 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6721 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6722 BPF_LD_MAP_FD(BPF_REG_1, 0),
6723 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6724 BPF_FUNC_map_lookup_elem),
6725 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6726 /* r1 = [0x00, 0xff] */
6727 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6728 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6729 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6730 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6731 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6732 * [0x0000'0000, 0x0000'007f]
6733 */
6734 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6735 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6736 /* r1 = [0x00, 0xff] or
6737 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6738 */
6739 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6740 /* r1 = 0 or
6741 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6742 */
6743 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6744 /* no-op or OOB pointer computation */
6745 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6746 /* potentially OOB access */
6747 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6748 /* exit */
6749 BPF_MOV64_IMM(BPF_REG_0, 0),
6750 BPF_EXIT_INSN(),
6751 },
6752 .fixup_map1 = { 3 },
6753 /* not actually fully unbounded, but the bound is very high */
6754 .errstr = "R0 unbounded memory access",
6755 .result = REJECT
6756 },
6757 {
6758 "bounds check after truncation of boundary-crossing range (2)",
6759 .insns = {
6760 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6761 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6762 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6763 BPF_LD_MAP_FD(BPF_REG_1, 0),
6764 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6765 BPF_FUNC_map_lookup_elem),
6766 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6767 /* r1 = [0x00, 0xff] */
6768 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6769 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6770 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6771 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6772 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6773 * [0x0000'0000, 0x0000'007f]
6774 * difference to previous test: truncation via MOV32
6775 * instead of ALU32.
6776 */
6777 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6778 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6779 /* r1 = [0x00, 0xff] or
6780 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6781 */
6782 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6783 /* r1 = 0 or
6784 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6785 */
6786 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6787 /* no-op or OOB pointer computation */
6788 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6789 /* potentially OOB access */
6790 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6791 /* exit */
6792 BPF_MOV64_IMM(BPF_REG_0, 0),
6793 BPF_EXIT_INSN(),
6794 },
6795 .fixup_map1 = { 3 },
6796 /* not actually fully unbounded, but the bound is very high */
6797 .errstr = "R0 unbounded memory access",
6798 .result = REJECT
6799 },
6800 {
6801 "bounds check after wrapping 32-bit addition",
6802 .insns = {
6803 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6804 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6805 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6806 BPF_LD_MAP_FD(BPF_REG_1, 0),
6807 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6808 BPF_FUNC_map_lookup_elem),
6809 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
6810 /* r1 = 0x7fff'ffff */
6811 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
6812 /* r1 = 0xffff'fffe */
6813 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6814 /* r1 = 0 */
6815 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
6816 /* no-op */
6817 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6818 /* access at offset 0 */
6819 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6820 /* exit */
6821 BPF_MOV64_IMM(BPF_REG_0, 0),
6822 BPF_EXIT_INSN(),
6823 },
6824 .fixup_map1 = { 3 },
6825 .result = ACCEPT
6826 },
6827 {
6828 "bounds check after shift with oversized count operand",
6829 .insns = {
6830 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6831 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6832 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6833 BPF_LD_MAP_FD(BPF_REG_1, 0),
6834 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6835 BPF_FUNC_map_lookup_elem),
6836 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6837 BPF_MOV64_IMM(BPF_REG_2, 32),
6838 BPF_MOV64_IMM(BPF_REG_1, 1),
6839 /* r1 = (u32)1 << (u32)32 = ? */
6840 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
6841 /* r1 = [0x0000, 0xffff] */
6842 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
6843 /* computes unknown pointer, potentially OOB */
6844 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6845 /* potentially OOB access */
6846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6847 /* exit */
6848 BPF_MOV64_IMM(BPF_REG_0, 0),
6849 BPF_EXIT_INSN(),
6850 },
6851 .fixup_map1 = { 3 },
6852 .errstr = "R0 max value is outside of the array range",
6853 .result = REJECT
6854 },
6855 {
6856 "bounds check after right shift of maybe-negative number",
6857 .insns = {
6858 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6859 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6860 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6861 BPF_LD_MAP_FD(BPF_REG_1, 0),
6862 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6863 BPF_FUNC_map_lookup_elem),
6864 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
6865 /* r1 = [0x00, 0xff] */
6866 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6867 /* r1 = [-0x01, 0xfe] */
6868 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
6869 /* r1 = 0 or 0xff'ffff'ffff'ffff */
6870 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6871 /* r1 = 0 or 0xffff'ffff'ffff */
6872 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6873 /* computes unknown pointer, potentially OOB */
6874 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6875 /* potentially OOB access */
6876 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6877 /* exit */
6878 BPF_MOV64_IMM(BPF_REG_0, 0),
6879 BPF_EXIT_INSN(),
6880 },
6881 .fixup_map1 = { 3 },
6882 .errstr = "R0 unbounded memory access",
6883 .result = REJECT
6884 },
6885 {
6886 "bounds check map access with off+size signed 32bit overflow. test1",
6887 .insns = {
6888 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6889 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6890 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6891 BPF_LD_MAP_FD(BPF_REG_1, 0),
6892 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6893 BPF_FUNC_map_lookup_elem),
6894 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6895 BPF_EXIT_INSN(),
6896 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
6897 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
6898 BPF_JMP_A(0),
6899 BPF_EXIT_INSN(),
6900 },
6901 .fixup_map1 = { 3 },
6902 .errstr = "map_value pointer and 2147483646",
6903 .result = REJECT
6904 },
6905 {
6906 "bounds check map access with off+size signed 32bit overflow. test2",
6907 .insns = {
6908 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6909 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6910 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6911 BPF_LD_MAP_FD(BPF_REG_1, 0),
6912 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6913 BPF_FUNC_map_lookup_elem),
6914 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6915 BPF_EXIT_INSN(),
6916 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
6917 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
6918 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
6919 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
6920 BPF_JMP_A(0),
6921 BPF_EXIT_INSN(),
6922 },
6923 .fixup_map1 = { 3 },
6924 .errstr = "pointer offset 1073741822",
6925 .result = REJECT
6926 },
6927 {
6928 "bounds check map access with off+size signed 32bit overflow. test3",
6929 .insns = {
6930 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6931 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6932 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6933 BPF_LD_MAP_FD(BPF_REG_1, 0),
6934 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6935 BPF_FUNC_map_lookup_elem),
6936 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6937 BPF_EXIT_INSN(),
6938 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
6939 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
6940 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
6941 BPF_JMP_A(0),
6942 BPF_EXIT_INSN(),
6943 },
6944 .fixup_map1 = { 3 },
6945 .errstr = "pointer offset -1073741822",
6946 .result = REJECT
6947 },
6948 {
6949 "bounds check map access with off+size signed 32bit overflow. test4",
6950 .insns = {
6951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6952 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6953 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6954 BPF_LD_MAP_FD(BPF_REG_1, 0),
6955 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6956 BPF_FUNC_map_lookup_elem),
6957 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
6958 BPF_EXIT_INSN(),
6959 BPF_MOV64_IMM(BPF_REG_1, 1000000),
6960 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
6961 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6962 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
6963 BPF_JMP_A(0),
6964 BPF_EXIT_INSN(),
6965 },
6966 .fixup_map1 = { 3 },
6967 .errstr = "map_value pointer and 1000000000000",
6968 .result = REJECT
6969 },
6970 {
6971 "pointer/scalar confusion in state equality check (way 1)",
6972 .insns = {
6973 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6974 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6975 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6976 BPF_LD_MAP_FD(BPF_REG_1, 0),
6977 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6978 BPF_FUNC_map_lookup_elem),
6979 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
6980 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
6981 BPF_JMP_A(1),
6982 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
6983 BPF_JMP_A(0),
6984 BPF_EXIT_INSN(),
6985 },
6986 .fixup_map1 = { 3 },
6987 .result = ACCEPT,
6988 .result_unpriv = REJECT,
6989 .errstr_unpriv = "R0 leaks addr as return value"
6990 },
6991 {
6992 "pointer/scalar confusion in state equality check (way 2)",
6993 .insns = {
6994 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6995 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6996 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6997 BPF_LD_MAP_FD(BPF_REG_1, 0),
6998 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6999 BPF_FUNC_map_lookup_elem),
7000 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7001 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7002 BPF_JMP_A(1),
7003 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7004 BPF_EXIT_INSN(),
7005 },
7006 .fixup_map1 = { 3 },
7007 .result = ACCEPT,
7008 .result_unpriv = REJECT,
7009 .errstr_unpriv = "R0 leaks addr as return value"
7010 },
7011 {
7012 "variable-offset ctx access",
7013 .insns = {
7014 /* Get an unknown value */
7015 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7016 /* Make it small and 4-byte aligned */
7017 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7018 /* add it to skb. We now have either &skb->len or
7019 * &skb->pkt_type, but we don't know which
7020 */
7021 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
7022 /* dereference it */
7023 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
7024 BPF_EXIT_INSN(),
7025 },
7026 .errstr = "variable ctx access var_off=(0x0; 0x4)",
7027 .result = REJECT,
7028 .prog_type = BPF_PROG_TYPE_LWT_IN,
7029 },
7030 {
7031 "variable-offset stack access",
7032 .insns = {
7033 /* Fill the top 8 bytes of the stack */
7034 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7035 /* Get an unknown value */
7036 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7037 /* Make it small and 4-byte aligned */
7038 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7039 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7040 /* add it to fp. We now have either fp-4 or fp-8, but
7041 * we don't know which
7042 */
7043 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7044 /* dereference it */
7045 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
7046 BPF_EXIT_INSN(),
7047 },
7048 .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)",
7049 .result = REJECT,
7050 .prog_type = BPF_PROG_TYPE_LWT_IN,
7051 },
7052 {
7053 "indirect variable-offset stack access",
7054 .insns = {
7055 /* Fill the top 8 bytes of the stack */
7056 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7057 /* Get an unknown value */
7058 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7059 /* Make it small and 4-byte aligned */
7060 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7061 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7062 /* add it to fp. We now have either fp-4 or fp-8, but
7063 * we don't know which
7064 */
7065 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7066 /* dereference it indirectly */
7067 BPF_LD_MAP_FD(BPF_REG_1, 0),
7068 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7069 BPF_FUNC_map_lookup_elem),
7070 BPF_MOV64_IMM(BPF_REG_0, 0),
7071 BPF_EXIT_INSN(),
7072 },
7073 .fixup_map1 = { 5 },
7074 .errstr = "variable stack read R2",
7075 .result = REJECT,
7076 .prog_type = BPF_PROG_TYPE_LWT_IN,
7077 },
7078 {
7079 "direct stack access with 32-bit wraparound. test1",
7080 .insns = {
7081 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7082 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7083 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7084 BPF_MOV32_IMM(BPF_REG_0, 0),
7085 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7086 BPF_EXIT_INSN()
7087 },
7088 .errstr = "fp pointer and 2147483647",
7089 .result = REJECT
7090 },
7091 {
7092 "direct stack access with 32-bit wraparound. test2",
7093 .insns = {
7094 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7095 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7096 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7097 BPF_MOV32_IMM(BPF_REG_0, 0),
7098 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7099 BPF_EXIT_INSN()
7100 },
7101 .errstr = "fp pointer and 1073741823",
7102 .result = REJECT
7103 },
7104 {
7105 "direct stack access with 32-bit wraparound. test3",
7106 .insns = {
7107 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7110 BPF_MOV32_IMM(BPF_REG_0, 0),
7111 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7112 BPF_EXIT_INSN()
7113 },
7114 .errstr = "fp pointer offset 1073741822",
7115 .result = REJECT
7116 },
7117 {
7118 "liveness pruning and write screening",
7119 .insns = {
7120 /* Get an unknown value */
7121 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7122 /* branch conditions teach us nothing about R2 */
7123 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7124 BPF_MOV64_IMM(BPF_REG_0, 0),
7125 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
7126 BPF_MOV64_IMM(BPF_REG_0, 0),
7127 BPF_EXIT_INSN(),
7128 },
7129 .errstr = "R0 !read_ok",
7130 .result = REJECT,
7131 .prog_type = BPF_PROG_TYPE_LWT_IN,
7132 },
7133 {
7134 "varlen_map_value_access pruning",
7135 .insns = {
7136 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7137 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7138 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7139 BPF_LD_MAP_FD(BPF_REG_1, 0),
7140 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7141 BPF_FUNC_map_lookup_elem),
7142 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
7143 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
7144 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
7145 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
7146 BPF_MOV32_IMM(BPF_REG_1, 0),
7147 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
7148 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7149 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
7150 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
7151 offsetof(struct test_val, foo)),
7152 BPF_EXIT_INSN(),
7153 },
7154 .fixup_map2 = { 3 },
7155 .errstr_unpriv = "R0 leaks addr",
7156 .errstr = "R0 unbounded memory access",
7157 .result_unpriv = REJECT,
7158 .result = REJECT,
7159 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7160 },
7161 {
7162 "invalid 64-bit BPF_END",
7163 .insns = {
7164 BPF_MOV32_IMM(BPF_REG_0, 0),
7165 {
7166 .code = BPF_ALU64 | BPF_END | BPF_TO_LE,
7167 .dst_reg = BPF_REG_0,
7168 .src_reg = 0,
7169 .off = 0,
7170 .imm = 32,
7171 },
7172 BPF_EXIT_INSN(),
7173 },
7174 .errstr = "BPF_END uses reserved fields",
7175 .result = REJECT,
7176 },
7177 {
7178 "arithmetic ops make PTR_TO_CTX unusable",
7179 .insns = {
7180 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
7181 offsetof(struct __sk_buff, data) -
7182 offsetof(struct __sk_buff, mark)),
7183 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7184 offsetof(struct __sk_buff, mark)),
7185 BPF_EXIT_INSN(),
7186 },
7187 .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
7188 .result = REJECT,
7189 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7190 },
7191 {
7192 "XDP pkt read, pkt_end mangling, bad access 1",
7193 .insns = {
7194 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7195 offsetof(struct xdp_md, data)),
7196 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7197 offsetof(struct xdp_md, data_end)),
7198 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7199 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7200 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
7201 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7202 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7203 BPF_MOV64_IMM(BPF_REG_0, 0),
7204 BPF_EXIT_INSN(),
7205 },
7206 .errstr = "R1 offset is outside of the packet",
7207 .result = REJECT,
7208 .prog_type = BPF_PROG_TYPE_XDP,
7209 },
7210 {
7211 "XDP pkt read, pkt_end mangling, bad access 2",
7212 .insns = {
7213 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7214 offsetof(struct xdp_md, data)),
7215 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7216 offsetof(struct xdp_md, data_end)),
7217 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7218 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7219 BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
7220 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7221 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7222 BPF_MOV64_IMM(BPF_REG_0, 0),
7223 BPF_EXIT_INSN(),
7224 },
7225 .errstr = "R1 offset is outside of the packet",
7226 .result = REJECT,
7227 .prog_type = BPF_PROG_TYPE_XDP,
7228 },
7229 {
7230 "XDP pkt read, pkt_data' > pkt_end, good access",
7231 .insns = {
7232 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7233 offsetof(struct xdp_md, data)),
7234 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7235 offsetof(struct xdp_md, data_end)),
7236 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7238 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7239 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7240 BPF_MOV64_IMM(BPF_REG_0, 0),
7241 BPF_EXIT_INSN(),
7242 },
7243 .result = ACCEPT,
7244 .prog_type = BPF_PROG_TYPE_XDP,
7245 },
7246 {
7247 "XDP pkt read, pkt_data' > pkt_end, bad access 1",
7248 .insns = {
7249 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7250 offsetof(struct xdp_md, data)),
7251 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7252 offsetof(struct xdp_md, data_end)),
7253 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7254 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7255 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
7256 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7257 BPF_MOV64_IMM(BPF_REG_0, 0),
7258 BPF_EXIT_INSN(),
7259 },
7260 .errstr = "R1 offset is outside of the packet",
7261 .result = REJECT,
7262 .prog_type = BPF_PROG_TYPE_XDP,
7263 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7264 },
7265 {
7266 "XDP pkt read, pkt_data' > pkt_end, bad access 2",
7267 .insns = {
7268 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7269 offsetof(struct xdp_md, data)),
7270 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7271 offsetof(struct xdp_md, data_end)),
7272 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7273 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7274 BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
7275 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7276 BPF_MOV64_IMM(BPF_REG_0, 0),
7277 BPF_EXIT_INSN(),
7278 },
7279 .errstr = "R1 offset is outside of the packet",
7280 .result = REJECT,
7281 .prog_type = BPF_PROG_TYPE_XDP,
7282 },
7283 {
7284 "XDP pkt read, pkt_end > pkt_data', good access",
7285 .insns = {
7286 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7287 offsetof(struct xdp_md, data)),
7288 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7289 offsetof(struct xdp_md, data_end)),
7290 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7292 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7293 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7294 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7295 BPF_MOV64_IMM(BPF_REG_0, 0),
7296 BPF_EXIT_INSN(),
7297 },
7298 .result = ACCEPT,
7299 .prog_type = BPF_PROG_TYPE_XDP,
7300 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7301 },
7302 {
7303 "XDP pkt read, pkt_end > pkt_data', bad access 1",
7304 .insns = {
7305 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7306 offsetof(struct xdp_md, data)),
7307 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7308 offsetof(struct xdp_md, data_end)),
7309 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7310 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7311 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7312 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7313 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7314 BPF_MOV64_IMM(BPF_REG_0, 0),
7315 BPF_EXIT_INSN(),
7316 },
7317 .errstr = "R1 offset is outside of the packet",
7318 .result = REJECT,
7319 .prog_type = BPF_PROG_TYPE_XDP,
7320 },
7321 {
7322 "XDP pkt read, pkt_end > pkt_data', bad access 2",
7323 .insns = {
7324 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7325 offsetof(struct xdp_md, data)),
7326 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7327 offsetof(struct xdp_md, data_end)),
7328 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7329 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7330 BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
7331 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7332 BPF_MOV64_IMM(BPF_REG_0, 0),
7333 BPF_EXIT_INSN(),
7334 },
7335 .errstr = "R1 offset is outside of the packet",
7336 .result = REJECT,
7337 .prog_type = BPF_PROG_TYPE_XDP,
7338 },
7339 {
7340 "XDP pkt read, pkt_data' < pkt_end, good access",
7341 .insns = {
7342 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7343 offsetof(struct xdp_md, data)),
7344 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7345 offsetof(struct xdp_md, data_end)),
7346 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7347 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7348 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7349 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7350 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7351 BPF_MOV64_IMM(BPF_REG_0, 0),
7352 BPF_EXIT_INSN(),
7353 },
7354 .result = ACCEPT,
7355 .prog_type = BPF_PROG_TYPE_XDP,
7356 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7357 },
7358 {
7359 "XDP pkt read, pkt_data' < pkt_end, bad access 1",
7360 .insns = {
7361 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7362 offsetof(struct xdp_md, data)),
7363 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7364 offsetof(struct xdp_md, data_end)),
7365 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7366 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7367 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7368 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7369 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7370 BPF_MOV64_IMM(BPF_REG_0, 0),
7371 BPF_EXIT_INSN(),
7372 },
7373 .errstr = "R1 offset is outside of the packet",
7374 .result = REJECT,
7375 .prog_type = BPF_PROG_TYPE_XDP,
7376 },
7377 {
7378 "XDP pkt read, pkt_data' < pkt_end, bad access 2",
7379 .insns = {
7380 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7381 offsetof(struct xdp_md, data)),
7382 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7383 offsetof(struct xdp_md, data_end)),
7384 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7386 BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
7387 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7388 BPF_MOV64_IMM(BPF_REG_0, 0),
7389 BPF_EXIT_INSN(),
7390 },
7391 .errstr = "R1 offset is outside of the packet",
7392 .result = REJECT,
7393 .prog_type = BPF_PROG_TYPE_XDP,
7394 },
7395 {
7396 "XDP pkt read, pkt_end < pkt_data', good access",
7397 .insns = {
7398 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7399 offsetof(struct xdp_md, data)),
7400 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7401 offsetof(struct xdp_md, data_end)),
7402 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7403 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7404 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7405 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7406 BPF_MOV64_IMM(BPF_REG_0, 0),
7407 BPF_EXIT_INSN(),
7408 },
7409 .result = ACCEPT,
7410 .prog_type = BPF_PROG_TYPE_XDP,
7411 },
7412 {
7413 "XDP pkt read, pkt_end < pkt_data', bad access 1",
7414 .insns = {
7415 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7416 offsetof(struct xdp_md, data)),
7417 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7418 offsetof(struct xdp_md, data_end)),
7419 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7420 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7421 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
7422 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7423 BPF_MOV64_IMM(BPF_REG_0, 0),
7424 BPF_EXIT_INSN(),
7425 },
7426 .errstr = "R1 offset is outside of the packet",
7427 .result = REJECT,
7428 .prog_type = BPF_PROG_TYPE_XDP,
7429 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7430 },
7431 {
7432 "XDP pkt read, pkt_end < pkt_data', bad access 2",
7433 .insns = {
7434 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7435 offsetof(struct xdp_md, data)),
7436 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7437 offsetof(struct xdp_md, data_end)),
7438 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7439 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7440 BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
7441 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7442 BPF_MOV64_IMM(BPF_REG_0, 0),
7443 BPF_EXIT_INSN(),
7444 },
7445 .errstr = "R1 offset is outside of the packet",
7446 .result = REJECT,
7447 .prog_type = BPF_PROG_TYPE_XDP,
7448 },
7449 {
7450 "XDP pkt read, pkt_data' >= pkt_end, good access",
7451 .insns = {
7452 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7453 offsetof(struct xdp_md, data)),
7454 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7455 offsetof(struct xdp_md, data_end)),
7456 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7457 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7458 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7459 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7460 BPF_MOV64_IMM(BPF_REG_0, 0),
7461 BPF_EXIT_INSN(),
7462 },
7463 .result = ACCEPT,
7464 .prog_type = BPF_PROG_TYPE_XDP,
7465 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7466 },
7467 {
7468 "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
7469 .insns = {
7470 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7471 offsetof(struct xdp_md, data)),
7472 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7473 offsetof(struct xdp_md, data_end)),
7474 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7475 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7476 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
7477 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7478 BPF_MOV64_IMM(BPF_REG_0, 0),
7479 BPF_EXIT_INSN(),
7480 },
7481 .errstr = "R1 offset is outside of the packet",
7482 .result = REJECT,
7483 .prog_type = BPF_PROG_TYPE_XDP,
7484 },
7485 {
7486 "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
7487 .insns = {
7488 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7489 offsetof(struct xdp_md, data)),
7490 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7491 offsetof(struct xdp_md, data_end)),
7492 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7493 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7494 BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
7495 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7496 BPF_MOV64_IMM(BPF_REG_0, 0),
7497 BPF_EXIT_INSN(),
7498 },
7499 .errstr = "R1 offset is outside of the packet",
7500 .result = REJECT,
7501 .prog_type = BPF_PROG_TYPE_XDP,
7502 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7503 },
7504 {
7505 "XDP pkt read, pkt_end >= pkt_data', good access",
7506 .insns = {
7507 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7508 offsetof(struct xdp_md, data)),
7509 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7510 offsetof(struct xdp_md, data_end)),
7511 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7512 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7513 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7514 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7515 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7516 BPF_MOV64_IMM(BPF_REG_0, 0),
7517 BPF_EXIT_INSN(),
7518 },
7519 .result = ACCEPT,
7520 .prog_type = BPF_PROG_TYPE_XDP,
7521 },
7522 {
7523 "XDP pkt read, pkt_end >= pkt_data', bad access 1",
7524 .insns = {
7525 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7526 offsetof(struct xdp_md, data)),
7527 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7528 offsetof(struct xdp_md, data_end)),
7529 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7530 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7531 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7532 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7533 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7534 BPF_MOV64_IMM(BPF_REG_0, 0),
7535 BPF_EXIT_INSN(),
7536 },
7537 .errstr = "R1 offset is outside of the packet",
7538 .result = REJECT,
7539 .prog_type = BPF_PROG_TYPE_XDP,
7540 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7541 },
7542 {
7543 "XDP pkt read, pkt_end >= pkt_data', bad access 2",
7544 .insns = {
7545 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7546 offsetof(struct xdp_md, data)),
7547 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7548 offsetof(struct xdp_md, data_end)),
7549 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7550 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7551 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
7552 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7553 BPF_MOV64_IMM(BPF_REG_0, 0),
7554 BPF_EXIT_INSN(),
7555 },
7556 .errstr = "R1 offset is outside of the packet",
7557 .result = REJECT,
7558 .prog_type = BPF_PROG_TYPE_XDP,
7559 },
7560 {
7561 "XDP pkt read, pkt_data' <= pkt_end, good access",
7562 .insns = {
7563 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7564 offsetof(struct xdp_md, data)),
7565 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7566 offsetof(struct xdp_md, data_end)),
7567 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7568 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7569 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7570 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7571 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7572 BPF_MOV64_IMM(BPF_REG_0, 0),
7573 BPF_EXIT_INSN(),
7574 },
7575 .result = ACCEPT,
7576 .prog_type = BPF_PROG_TYPE_XDP,
7577 },
7578 {
7579 "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
7580 .insns = {
7581 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7582 offsetof(struct xdp_md, data)),
7583 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7584 offsetof(struct xdp_md, data_end)),
7585 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7586 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7587 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7588 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
7589 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
7590 BPF_MOV64_IMM(BPF_REG_0, 0),
7591 BPF_EXIT_INSN(),
7592 },
7593 .errstr = "R1 offset is outside of the packet",
7594 .result = REJECT,
7595 .prog_type = BPF_PROG_TYPE_XDP,
7596 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7597 },
7598 {
7599 "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
7600 .insns = {
7601 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7602 offsetof(struct xdp_md, data)),
7603 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7604 offsetof(struct xdp_md, data_end)),
7605 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7606 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7607 BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
7608 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7609 BPF_MOV64_IMM(BPF_REG_0, 0),
7610 BPF_EXIT_INSN(),
7611 },
7612 .errstr = "R1 offset is outside of the packet",
7613 .result = REJECT,
7614 .prog_type = BPF_PROG_TYPE_XDP,
7615 },
7616 {
7617 "XDP pkt read, pkt_end <= pkt_data', good access",
7618 .insns = {
7619 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7620 offsetof(struct xdp_md, data)),
7621 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7622 offsetof(struct xdp_md, data_end)),
7623 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7624 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7625 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7626 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7627 BPF_MOV64_IMM(BPF_REG_0, 0),
7628 BPF_EXIT_INSN(),
7629 },
7630 .result = ACCEPT,
7631 .prog_type = BPF_PROG_TYPE_XDP,
7632 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7633 },
7634 {
7635 "XDP pkt read, pkt_end <= pkt_data', bad access 1",
7636 .insns = {
7637 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7638 offsetof(struct xdp_md, data)),
7639 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7640 offsetof(struct xdp_md, data_end)),
7641 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7642 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7643 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
7644 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
7645 BPF_MOV64_IMM(BPF_REG_0, 0),
7646 BPF_EXIT_INSN(),
7647 },
7648 .errstr = "R1 offset is outside of the packet",
7649 .result = REJECT,
7650 .prog_type = BPF_PROG_TYPE_XDP,
7651 },
7652 {
7653 "XDP pkt read, pkt_end <= pkt_data', bad access 2",
7654 .insns = {
7655 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7656 offsetof(struct xdp_md, data)),
7657 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
7658 offsetof(struct xdp_md, data_end)),
7659 BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
7660 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
7661 BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
7662 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
7663 BPF_MOV64_IMM(BPF_REG_0, 0),
7664 BPF_EXIT_INSN(),
7665 },
7666 .errstr = "R1 offset is outside of the packet",
7667 .result = REJECT,
7668 .prog_type = BPF_PROG_TYPE_XDP,
7669 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
7670 },
7671 };
7672
7673 static int probe_filter_length(const struct bpf_insn *fp)
7674 {
7675 int len;
7676
7677 for (len = MAX_INSNS - 1; len > 0; --len)
7678 if (fp[len].code != 0 || fp[len].imm != 0)
7679 break;
7680 return len + 1;
7681 }
7682
7683 static int create_map(uint32_t size_value, uint32_t max_elem)
7684 {
7685 int fd;
7686
7687 fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
7688 size_value, max_elem, BPF_F_NO_PREALLOC);
7689 if (fd < 0)
7690 printf("Failed to create hash map '%s'!\n", strerror(errno));
7691
7692 return fd;
7693 }
7694
7695 static int create_prog_array(void)
7696 {
7697 int fd;
7698
7699 fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
7700 sizeof(int), 4, 0);
7701 if (fd < 0)
7702 printf("Failed to create prog array '%s'!\n", strerror(errno));
7703
7704 return fd;
7705 }
7706
7707 static int create_map_in_map(void)
7708 {
7709 int inner_map_fd, outer_map_fd;
7710
7711 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
7712 sizeof(int), 1, 0);
7713 if (inner_map_fd < 0) {
7714 printf("Failed to create array '%s'!\n", strerror(errno));
7715 return inner_map_fd;
7716 }
7717
7718 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS,
7719 sizeof(int), inner_map_fd, 1, 0);
7720 if (outer_map_fd < 0)
7721 printf("Failed to create array of maps '%s'!\n",
7722 strerror(errno));
7723
7724 close(inner_map_fd);
7725
7726 return outer_map_fd;
7727 }
7728
7729 static char bpf_vlog[32768];
7730
7731 static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
7732 int *map_fds)
7733 {
7734 int *fixup_map1 = test->fixup_map1;
7735 int *fixup_map2 = test->fixup_map2;
7736 int *fixup_prog = test->fixup_prog;
7737 int *fixup_map_in_map = test->fixup_map_in_map;
7738
7739 /* Allocating HTs with 1 elem is fine here, since we only test
7740 * for verifier and not do a runtime lookup, so the only thing
7741 * that really matters is value size in this case.
7742 */
7743 if (*fixup_map1) {
7744 map_fds[0] = create_map(sizeof(long long), 1);
7745 do {
7746 prog[*fixup_map1].imm = map_fds[0];
7747 fixup_map1++;
7748 } while (*fixup_map1);
7749 }
7750
7751 if (*fixup_map2) {
7752 map_fds[1] = create_map(sizeof(struct test_val), 1);
7753 do {
7754 prog[*fixup_map2].imm = map_fds[1];
7755 fixup_map2++;
7756 } while (*fixup_map2);
7757 }
7758
7759 if (*fixup_prog) {
7760 map_fds[2] = create_prog_array();
7761 do {
7762 prog[*fixup_prog].imm = map_fds[2];
7763 fixup_prog++;
7764 } while (*fixup_prog);
7765 }
7766
7767 if (*fixup_map_in_map) {
7768 map_fds[3] = create_map_in_map();
7769 do {
7770 prog[*fixup_map_in_map].imm = map_fds[3];
7771 fixup_map_in_map++;
7772 } while (*fixup_map_in_map);
7773 }
7774 }
7775
7776 static void do_test_single(struct bpf_test *test, bool unpriv,
7777 int *passes, int *errors)
7778 {
7779 int fd_prog, expected_ret, reject_from_alignment;
7780 struct bpf_insn *prog = test->insns;
7781 int prog_len = probe_filter_length(prog);
7782 int prog_type = test->prog_type;
7783 int map_fds[MAX_NR_MAPS];
7784 const char *expected_err;
7785 int i;
7786
7787 for (i = 0; i < MAX_NR_MAPS; i++)
7788 map_fds[i] = -1;
7789
7790 do_test_fixup(test, prog, map_fds);
7791
7792 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
7793 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
7794 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
7795
7796 expected_ret = unpriv && test->result_unpriv != UNDEF ?
7797 test->result_unpriv : test->result;
7798 expected_err = unpriv && test->errstr_unpriv ?
7799 test->errstr_unpriv : test->errstr;
7800
7801 reject_from_alignment = fd_prog < 0 &&
7802 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
7803 strstr(bpf_vlog, "Unknown alignment.");
7804 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
7805 if (reject_from_alignment) {
7806 printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
7807 strerror(errno));
7808 goto fail_log;
7809 }
7810 #endif
7811 if (expected_ret == ACCEPT) {
7812 if (fd_prog < 0 && !reject_from_alignment) {
7813 printf("FAIL\nFailed to load prog '%s'!\n",
7814 strerror(errno));
7815 goto fail_log;
7816 }
7817 } else {
7818 if (fd_prog >= 0) {
7819 printf("FAIL\nUnexpected success to load!\n");
7820 goto fail_log;
7821 }
7822 if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
7823 printf("FAIL\nUnexpected error message!\n");
7824 goto fail_log;
7825 }
7826 }
7827
7828 (*passes)++;
7829 printf("OK%s\n", reject_from_alignment ?
7830 " (NOTE: reject due to unknown alignment)" : "");
7831 close_fds:
7832 close(fd_prog);
7833 for (i = 0; i < MAX_NR_MAPS; i++)
7834 close(map_fds[i]);
7835 sched_yield();
7836 return;
7837 fail_log:
7838 (*errors)++;
7839 printf("%s", bpf_vlog);
7840 goto close_fds;
7841 }
7842
7843 static bool is_admin(void)
7844 {
7845 cap_t caps;
7846 cap_flag_value_t sysadmin = CAP_CLEAR;
7847 const cap_value_t cap_val = CAP_SYS_ADMIN;
7848
7849 #ifdef CAP_IS_SUPPORTED
7850 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
7851 perror("cap_get_flag");
7852 return false;
7853 }
7854 #endif
7855 caps = cap_get_proc();
7856 if (!caps) {
7857 perror("cap_get_proc");
7858 return false;
7859 }
7860 if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
7861 perror("cap_get_flag");
7862 if (cap_free(caps))
7863 perror("cap_free");
7864 return (sysadmin == CAP_SET);
7865 }
7866
7867 static int set_admin(bool admin)
7868 {
7869 cap_t caps;
7870 const cap_value_t cap_val = CAP_SYS_ADMIN;
7871 int ret = -1;
7872
7873 caps = cap_get_proc();
7874 if (!caps) {
7875 perror("cap_get_proc");
7876 return -1;
7877 }
7878 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
7879 admin ? CAP_SET : CAP_CLEAR)) {
7880 perror("cap_set_flag");
7881 goto out;
7882 }
7883 if (cap_set_proc(caps)) {
7884 perror("cap_set_proc");
7885 goto out;
7886 }
7887 ret = 0;
7888 out:
7889 if (cap_free(caps))
7890 perror("cap_free");
7891 return ret;
7892 }
7893
7894 static int do_test(bool unpriv, unsigned int from, unsigned int to)
7895 {
7896 int i, passes = 0, errors = 0;
7897
7898 for (i = from; i < to; i++) {
7899 struct bpf_test *test = &tests[i];
7900
7901 /* Program types that are not supported by non-root we
7902 * skip right away.
7903 */
7904 if (!test->prog_type) {
7905 if (!unpriv)
7906 set_admin(false);
7907 printf("#%d/u %s ", i, test->descr);
7908 do_test_single(test, true, &passes, &errors);
7909 if (!unpriv)
7910 set_admin(true);
7911 }
7912
7913 if (!unpriv) {
7914 printf("#%d/p %s ", i, test->descr);
7915 do_test_single(test, false, &passes, &errors);
7916 }
7917 }
7918
7919 printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
7920 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
7921 }
7922
7923 int main(int argc, char **argv)
7924 {
7925 struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
7926 struct rlimit rlim = { 1 << 20, 1 << 20 };
7927 unsigned int from = 0, to = ARRAY_SIZE(tests);
7928 bool unpriv = !is_admin();
7929
7930 if (argc == 3) {
7931 unsigned int l = atoi(argv[argc - 2]);
7932 unsigned int u = atoi(argv[argc - 1]);
7933
7934 if (l < to && u < to) {
7935 from = l;
7936 to = u + 1;
7937 }
7938 } else if (argc == 2) {
7939 unsigned int t = atoi(argv[argc - 1]);
7940
7941 if (t < to) {
7942 from = t;
7943 to = t + 1;
7944 }
7945 }
7946
7947 setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
7948 return do_test(unpriv, from, to);
7949 }