]> git.ipfire.org Git - people/ms/linux.git/blob - kernel/bpf/verifier.c
Merge tag 'ntb-4.4' of git://github.com/jonmason/ntb
[people/ms/linux.git] / kernel / bpf / verifier.c
1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/filter.h>
17 #include <net/netlink.h>
18 #include <linux/file.h>
19 #include <linux/vmalloc.h>
20
21 /* bpf_check() is a static code analyzer that walks eBPF program
22 * instruction by instruction and updates register/stack state.
23 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
24 *
25 * The first pass is depth-first-search to check that the program is a DAG.
26 * It rejects the following programs:
27 * - larger than BPF_MAXINSNS insns
28 * - if loop is present (detected via back-edge)
29 * - unreachable insns exist (shouldn't be a forest. program = one function)
30 * - out of bounds or malformed jumps
31 * The second pass is all possible path descent from the 1st insn.
32 * Since it's analyzing all pathes through the program, the length of the
33 * analysis is limited to 32k insn, which may be hit even if total number of
34 * insn is less then 4K, but there are too many branches that change stack/regs.
35 * Number of 'branches to be analyzed' is limited to 1k
36 *
37 * On entry to each instruction, each register has a type, and the instruction
38 * changes the types of the registers depending on instruction semantics.
39 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
40 * copied to R1.
41 *
42 * All registers are 64-bit.
43 * R0 - return register
44 * R1-R5 argument passing registers
45 * R6-R9 callee saved registers
46 * R10 - frame pointer read-only
47 *
48 * At the start of BPF program the register R1 contains a pointer to bpf_context
49 * and has type PTR_TO_CTX.
50 *
51 * Verifier tracks arithmetic operations on pointers in case:
52 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
53 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
54 * 1st insn copies R10 (which has FRAME_PTR) type into R1
55 * and 2nd arithmetic instruction is pattern matched to recognize
56 * that it wants to construct a pointer to some element within stack.
57 * So after 2nd insn, the register R1 has type PTR_TO_STACK
58 * (and -20 constant is saved for further stack bounds checking).
59 * Meaning that this reg is a pointer to stack plus known immediate constant.
60 *
61 * Most of the time the registers have UNKNOWN_VALUE type, which
62 * means the register has some value, but it's not a valid pointer.
63 * (like pointer plus pointer becomes UNKNOWN_VALUE type)
64 *
65 * When verifier sees load or store instructions the type of base register
66 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer
67 * types recognized by check_mem_access() function.
68 *
69 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
70 * and the range of [ptr, ptr + map's value_size) is accessible.
71 *
72 * registers used to pass values to function calls are checked against
73 * function argument constraints.
74 *
75 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
76 * It means that the register type passed to this function must be
77 * PTR_TO_STACK and it will be used inside the function as
78 * 'pointer to map element key'
79 *
80 * For example the argument constraints for bpf_map_lookup_elem():
81 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
82 * .arg1_type = ARG_CONST_MAP_PTR,
83 * .arg2_type = ARG_PTR_TO_MAP_KEY,
84 *
85 * ret_type says that this function returns 'pointer to map elem value or null'
86 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
87 * 2nd argument should be a pointer to stack, which will be used inside
88 * the helper function as a pointer to map element key.
89 *
90 * On the kernel side the helper function looks like:
91 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
92 * {
93 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
94 * void *key = (void *) (unsigned long) r2;
95 * void *value;
96 *
97 * here kernel can access 'key' and 'map' pointers safely, knowing that
98 * [key, key + map->key_size) bytes are valid and were initialized on
99 * the stack of eBPF program.
100 * }
101 *
102 * Corresponding eBPF program may look like:
103 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
104 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
105 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
106 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
107 * here verifier looks at prototype of map_lookup_elem() and sees:
108 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
109 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
110 *
111 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
112 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
113 * and were initialized prior to this call.
114 * If it's ok, then verifier allows this BPF_CALL insn and looks at
115 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
116 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
117 * returns ether pointer to map value or NULL.
118 *
119 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
120 * insn, the register holding that pointer in the true branch changes state to
121 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
122 * branch. See check_cond_jmp_op().
123 *
124 * After the call R0 is set to return type of the function and registers R1-R5
125 * are set to NOT_INIT to indicate that they are no longer readable.
126 */
127
128 /* types of values stored in eBPF registers */
129 enum bpf_reg_type {
130 NOT_INIT = 0, /* nothing was written into register */
131 UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */
132 PTR_TO_CTX, /* reg points to bpf_context */
133 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
134 PTR_TO_MAP_VALUE, /* reg points to map element value */
135 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
136 FRAME_PTR, /* reg == frame_pointer */
137 PTR_TO_STACK, /* reg == frame_pointer + imm */
138 CONST_IMM, /* constant integer value */
139 };
140
141 struct reg_state {
142 enum bpf_reg_type type;
143 union {
144 /* valid when type == CONST_IMM | PTR_TO_STACK */
145 int imm;
146
147 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
148 * PTR_TO_MAP_VALUE_OR_NULL
149 */
150 struct bpf_map *map_ptr;
151 };
152 };
153
154 enum bpf_stack_slot_type {
155 STACK_INVALID, /* nothing was stored in this stack slot */
156 STACK_SPILL, /* register spilled into stack */
157 STACK_MISC /* BPF program wrote some data into this slot */
158 };
159
160 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
161
162 /* state of the program:
163 * type of all registers and stack info
164 */
165 struct verifier_state {
166 struct reg_state regs[MAX_BPF_REG];
167 u8 stack_slot_type[MAX_BPF_STACK];
168 struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
169 };
170
171 /* linked list of verifier states used to prune search */
172 struct verifier_state_list {
173 struct verifier_state state;
174 struct verifier_state_list *next;
175 };
176
177 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
178 struct verifier_stack_elem {
179 /* verifer state is 'st'
180 * before processing instruction 'insn_idx'
181 * and after processing instruction 'prev_insn_idx'
182 */
183 struct verifier_state st;
184 int insn_idx;
185 int prev_insn_idx;
186 struct verifier_stack_elem *next;
187 };
188
189 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
190
191 /* single container for all structs
192 * one verifier_env per bpf_check() call
193 */
194 struct verifier_env {
195 struct bpf_prog *prog; /* eBPF program being verified */
196 struct verifier_stack_elem *head; /* stack of verifier states to be processed */
197 int stack_size; /* number of states to be processed */
198 struct verifier_state cur_state; /* current verifier state */
199 struct verifier_state_list **explored_states; /* search pruning optimization */
200 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
201 u32 used_map_cnt; /* number of used maps */
202 bool allow_ptr_leaks;
203 };
204
205 /* verbose verifier prints what it's seeing
206 * bpf_check() is called under lock, so no race to access these global vars
207 */
208 static u32 log_level, log_size, log_len;
209 static char *log_buf;
210
211 static DEFINE_MUTEX(bpf_verifier_lock);
212
213 /* log_level controls verbosity level of eBPF verifier.
214 * verbose() is used to dump the verification trace to the log, so the user
215 * can figure out what's wrong with the program
216 */
217 static __printf(1, 2) void verbose(const char *fmt, ...)
218 {
219 va_list args;
220
221 if (log_level == 0 || log_len >= log_size - 1)
222 return;
223
224 va_start(args, fmt);
225 log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args);
226 va_end(args);
227 }
228
229 /* string representation of 'enum bpf_reg_type' */
230 static const char * const reg_type_str[] = {
231 [NOT_INIT] = "?",
232 [UNKNOWN_VALUE] = "inv",
233 [PTR_TO_CTX] = "ctx",
234 [CONST_PTR_TO_MAP] = "map_ptr",
235 [PTR_TO_MAP_VALUE] = "map_value",
236 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
237 [FRAME_PTR] = "fp",
238 [PTR_TO_STACK] = "fp",
239 [CONST_IMM] = "imm",
240 };
241
242 static const struct {
243 int map_type;
244 int func_id;
245 } func_limit[] = {
246 {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
247 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
248 {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
249 };
250
251 static void print_verifier_state(struct verifier_env *env)
252 {
253 enum bpf_reg_type t;
254 int i;
255
256 for (i = 0; i < MAX_BPF_REG; i++) {
257 t = env->cur_state.regs[i].type;
258 if (t == NOT_INIT)
259 continue;
260 verbose(" R%d=%s", i, reg_type_str[t]);
261 if (t == CONST_IMM || t == PTR_TO_STACK)
262 verbose("%d", env->cur_state.regs[i].imm);
263 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE ||
264 t == PTR_TO_MAP_VALUE_OR_NULL)
265 verbose("(ks=%d,vs=%d)",
266 env->cur_state.regs[i].map_ptr->key_size,
267 env->cur_state.regs[i].map_ptr->value_size);
268 }
269 for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
270 if (env->cur_state.stack_slot_type[i] == STACK_SPILL)
271 verbose(" fp%d=%s", -MAX_BPF_STACK + i,
272 reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]);
273 }
274 verbose("\n");
275 }
276
277 static const char *const bpf_class_string[] = {
278 [BPF_LD] = "ld",
279 [BPF_LDX] = "ldx",
280 [BPF_ST] = "st",
281 [BPF_STX] = "stx",
282 [BPF_ALU] = "alu",
283 [BPF_JMP] = "jmp",
284 [BPF_RET] = "BUG",
285 [BPF_ALU64] = "alu64",
286 };
287
288 static const char *const bpf_alu_string[16] = {
289 [BPF_ADD >> 4] = "+=",
290 [BPF_SUB >> 4] = "-=",
291 [BPF_MUL >> 4] = "*=",
292 [BPF_DIV >> 4] = "/=",
293 [BPF_OR >> 4] = "|=",
294 [BPF_AND >> 4] = "&=",
295 [BPF_LSH >> 4] = "<<=",
296 [BPF_RSH >> 4] = ">>=",
297 [BPF_NEG >> 4] = "neg",
298 [BPF_MOD >> 4] = "%=",
299 [BPF_XOR >> 4] = "^=",
300 [BPF_MOV >> 4] = "=",
301 [BPF_ARSH >> 4] = "s>>=",
302 [BPF_END >> 4] = "endian",
303 };
304
305 static const char *const bpf_ldst_string[] = {
306 [BPF_W >> 3] = "u32",
307 [BPF_H >> 3] = "u16",
308 [BPF_B >> 3] = "u8",
309 [BPF_DW >> 3] = "u64",
310 };
311
312 static const char *const bpf_jmp_string[16] = {
313 [BPF_JA >> 4] = "jmp",
314 [BPF_JEQ >> 4] = "==",
315 [BPF_JGT >> 4] = ">",
316 [BPF_JGE >> 4] = ">=",
317 [BPF_JSET >> 4] = "&",
318 [BPF_JNE >> 4] = "!=",
319 [BPF_JSGT >> 4] = "s>",
320 [BPF_JSGE >> 4] = "s>=",
321 [BPF_CALL >> 4] = "call",
322 [BPF_EXIT >> 4] = "exit",
323 };
324
325 static void print_bpf_insn(struct bpf_insn *insn)
326 {
327 u8 class = BPF_CLASS(insn->code);
328
329 if (class == BPF_ALU || class == BPF_ALU64) {
330 if (BPF_SRC(insn->code) == BPF_X)
331 verbose("(%02x) %sr%d %s %sr%d\n",
332 insn->code, class == BPF_ALU ? "(u32) " : "",
333 insn->dst_reg,
334 bpf_alu_string[BPF_OP(insn->code) >> 4],
335 class == BPF_ALU ? "(u32) " : "",
336 insn->src_reg);
337 else
338 verbose("(%02x) %sr%d %s %s%d\n",
339 insn->code, class == BPF_ALU ? "(u32) " : "",
340 insn->dst_reg,
341 bpf_alu_string[BPF_OP(insn->code) >> 4],
342 class == BPF_ALU ? "(u32) " : "",
343 insn->imm);
344 } else if (class == BPF_STX) {
345 if (BPF_MODE(insn->code) == BPF_MEM)
346 verbose("(%02x) *(%s *)(r%d %+d) = r%d\n",
347 insn->code,
348 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
349 insn->dst_reg,
350 insn->off, insn->src_reg);
351 else if (BPF_MODE(insn->code) == BPF_XADD)
352 verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n",
353 insn->code,
354 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
355 insn->dst_reg, insn->off,
356 insn->src_reg);
357 else
358 verbose("BUG_%02x\n", insn->code);
359 } else if (class == BPF_ST) {
360 if (BPF_MODE(insn->code) != BPF_MEM) {
361 verbose("BUG_st_%02x\n", insn->code);
362 return;
363 }
364 verbose("(%02x) *(%s *)(r%d %+d) = %d\n",
365 insn->code,
366 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
367 insn->dst_reg,
368 insn->off, insn->imm);
369 } else if (class == BPF_LDX) {
370 if (BPF_MODE(insn->code) != BPF_MEM) {
371 verbose("BUG_ldx_%02x\n", insn->code);
372 return;
373 }
374 verbose("(%02x) r%d = *(%s *)(r%d %+d)\n",
375 insn->code, insn->dst_reg,
376 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
377 insn->src_reg, insn->off);
378 } else if (class == BPF_LD) {
379 if (BPF_MODE(insn->code) == BPF_ABS) {
380 verbose("(%02x) r0 = *(%s *)skb[%d]\n",
381 insn->code,
382 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
383 insn->imm);
384 } else if (BPF_MODE(insn->code) == BPF_IND) {
385 verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n",
386 insn->code,
387 bpf_ldst_string[BPF_SIZE(insn->code) >> 3],
388 insn->src_reg, insn->imm);
389 } else if (BPF_MODE(insn->code) == BPF_IMM) {
390 verbose("(%02x) r%d = 0x%x\n",
391 insn->code, insn->dst_reg, insn->imm);
392 } else {
393 verbose("BUG_ld_%02x\n", insn->code);
394 return;
395 }
396 } else if (class == BPF_JMP) {
397 u8 opcode = BPF_OP(insn->code);
398
399 if (opcode == BPF_CALL) {
400 verbose("(%02x) call %d\n", insn->code, insn->imm);
401 } else if (insn->code == (BPF_JMP | BPF_JA)) {
402 verbose("(%02x) goto pc%+d\n",
403 insn->code, insn->off);
404 } else if (insn->code == (BPF_JMP | BPF_EXIT)) {
405 verbose("(%02x) exit\n", insn->code);
406 } else if (BPF_SRC(insn->code) == BPF_X) {
407 verbose("(%02x) if r%d %s r%d goto pc%+d\n",
408 insn->code, insn->dst_reg,
409 bpf_jmp_string[BPF_OP(insn->code) >> 4],
410 insn->src_reg, insn->off);
411 } else {
412 verbose("(%02x) if r%d %s 0x%x goto pc%+d\n",
413 insn->code, insn->dst_reg,
414 bpf_jmp_string[BPF_OP(insn->code) >> 4],
415 insn->imm, insn->off);
416 }
417 } else {
418 verbose("(%02x) %s\n", insn->code, bpf_class_string[class]);
419 }
420 }
421
422 static int pop_stack(struct verifier_env *env, int *prev_insn_idx)
423 {
424 struct verifier_stack_elem *elem;
425 int insn_idx;
426
427 if (env->head == NULL)
428 return -1;
429
430 memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state));
431 insn_idx = env->head->insn_idx;
432 if (prev_insn_idx)
433 *prev_insn_idx = env->head->prev_insn_idx;
434 elem = env->head->next;
435 kfree(env->head);
436 env->head = elem;
437 env->stack_size--;
438 return insn_idx;
439 }
440
441 static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx,
442 int prev_insn_idx)
443 {
444 struct verifier_stack_elem *elem;
445
446 elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL);
447 if (!elem)
448 goto err;
449
450 memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state));
451 elem->insn_idx = insn_idx;
452 elem->prev_insn_idx = prev_insn_idx;
453 elem->next = env->head;
454 env->head = elem;
455 env->stack_size++;
456 if (env->stack_size > 1024) {
457 verbose("BPF program is too complex\n");
458 goto err;
459 }
460 return &elem->st;
461 err:
462 /* pop all elements and return */
463 while (pop_stack(env, NULL) >= 0);
464 return NULL;
465 }
466
467 #define CALLER_SAVED_REGS 6
468 static const int caller_saved[CALLER_SAVED_REGS] = {
469 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
470 };
471
472 static void init_reg_state(struct reg_state *regs)
473 {
474 int i;
475
476 for (i = 0; i < MAX_BPF_REG; i++) {
477 regs[i].type = NOT_INIT;
478 regs[i].imm = 0;
479 regs[i].map_ptr = NULL;
480 }
481
482 /* frame pointer */
483 regs[BPF_REG_FP].type = FRAME_PTR;
484
485 /* 1st arg to a function */
486 regs[BPF_REG_1].type = PTR_TO_CTX;
487 }
488
489 static void mark_reg_unknown_value(struct reg_state *regs, u32 regno)
490 {
491 BUG_ON(regno >= MAX_BPF_REG);
492 regs[regno].type = UNKNOWN_VALUE;
493 regs[regno].imm = 0;
494 regs[regno].map_ptr = NULL;
495 }
496
497 enum reg_arg_type {
498 SRC_OP, /* register is used as source operand */
499 DST_OP, /* register is used as destination operand */
500 DST_OP_NO_MARK /* same as above, check only, don't mark */
501 };
502
503 static int check_reg_arg(struct reg_state *regs, u32 regno,
504 enum reg_arg_type t)
505 {
506 if (regno >= MAX_BPF_REG) {
507 verbose("R%d is invalid\n", regno);
508 return -EINVAL;
509 }
510
511 if (t == SRC_OP) {
512 /* check whether register used as source operand can be read */
513 if (regs[regno].type == NOT_INIT) {
514 verbose("R%d !read_ok\n", regno);
515 return -EACCES;
516 }
517 } else {
518 /* check whether register used as dest operand can be written to */
519 if (regno == BPF_REG_FP) {
520 verbose("frame pointer is read only\n");
521 return -EACCES;
522 }
523 if (t == DST_OP)
524 mark_reg_unknown_value(regs, regno);
525 }
526 return 0;
527 }
528
529 static int bpf_size_to_bytes(int bpf_size)
530 {
531 if (bpf_size == BPF_W)
532 return 4;
533 else if (bpf_size == BPF_H)
534 return 2;
535 else if (bpf_size == BPF_B)
536 return 1;
537 else if (bpf_size == BPF_DW)
538 return 8;
539 else
540 return -EINVAL;
541 }
542
543 static bool is_spillable_regtype(enum bpf_reg_type type)
544 {
545 switch (type) {
546 case PTR_TO_MAP_VALUE:
547 case PTR_TO_MAP_VALUE_OR_NULL:
548 case PTR_TO_STACK:
549 case PTR_TO_CTX:
550 case FRAME_PTR:
551 case CONST_PTR_TO_MAP:
552 return true;
553 default:
554 return false;
555 }
556 }
557
558 /* check_stack_read/write functions track spill/fill of registers,
559 * stack boundary and alignment are checked in check_mem_access()
560 */
561 static int check_stack_write(struct verifier_state *state, int off, int size,
562 int value_regno)
563 {
564 int i;
565 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
566 * so it's aligned access and [off, off + size) are within stack limits
567 */
568
569 if (value_regno >= 0 &&
570 is_spillable_regtype(state->regs[value_regno].type)) {
571
572 /* register containing pointer is being spilled into stack */
573 if (size != BPF_REG_SIZE) {
574 verbose("invalid size of register spill\n");
575 return -EACCES;
576 }
577
578 /* save register state */
579 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
580 state->regs[value_regno];
581
582 for (i = 0; i < BPF_REG_SIZE; i++)
583 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
584 } else {
585 /* regular write of data into stack */
586 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
587 (struct reg_state) {};
588
589 for (i = 0; i < size; i++)
590 state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
591 }
592 return 0;
593 }
594
595 static int check_stack_read(struct verifier_state *state, int off, int size,
596 int value_regno)
597 {
598 u8 *slot_type;
599 int i;
600
601 slot_type = &state->stack_slot_type[MAX_BPF_STACK + off];
602
603 if (slot_type[0] == STACK_SPILL) {
604 if (size != BPF_REG_SIZE) {
605 verbose("invalid size of register spill\n");
606 return -EACCES;
607 }
608 for (i = 1; i < BPF_REG_SIZE; i++) {
609 if (slot_type[i] != STACK_SPILL) {
610 verbose("corrupted spill memory\n");
611 return -EACCES;
612 }
613 }
614
615 if (value_regno >= 0)
616 /* restore register state from stack */
617 state->regs[value_regno] =
618 state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE];
619 return 0;
620 } else {
621 for (i = 0; i < size; i++) {
622 if (slot_type[i] != STACK_MISC) {
623 verbose("invalid read from stack off %d+%d size %d\n",
624 off, i, size);
625 return -EACCES;
626 }
627 }
628 if (value_regno >= 0)
629 /* have read misc data from the stack */
630 mark_reg_unknown_value(state->regs, value_regno);
631 return 0;
632 }
633 }
634
635 /* check read/write into map element returned by bpf_map_lookup_elem() */
636 static int check_map_access(struct verifier_env *env, u32 regno, int off,
637 int size)
638 {
639 struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
640
641 if (off < 0 || off + size > map->value_size) {
642 verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
643 map->value_size, off, size);
644 return -EACCES;
645 }
646 return 0;
647 }
648
649 /* check access to 'struct bpf_context' fields */
650 static int check_ctx_access(struct verifier_env *env, int off, int size,
651 enum bpf_access_type t)
652 {
653 if (env->prog->aux->ops->is_valid_access &&
654 env->prog->aux->ops->is_valid_access(off, size, t))
655 return 0;
656
657 verbose("invalid bpf_context access off=%d size=%d\n", off, size);
658 return -EACCES;
659 }
660
661 static bool is_pointer_value(struct verifier_env *env, int regno)
662 {
663 if (env->allow_ptr_leaks)
664 return false;
665
666 switch (env->cur_state.regs[regno].type) {
667 case UNKNOWN_VALUE:
668 case CONST_IMM:
669 return false;
670 default:
671 return true;
672 }
673 }
674
675 /* check whether memory at (regno + off) is accessible for t = (read | write)
676 * if t==write, value_regno is a register which value is stored into memory
677 * if t==read, value_regno is a register which will receive the value from memory
678 * if t==write && value_regno==-1, some unknown value is stored into memory
679 * if t==read && value_regno==-1, don't care what we read from memory
680 */
681 static int check_mem_access(struct verifier_env *env, u32 regno, int off,
682 int bpf_size, enum bpf_access_type t,
683 int value_regno)
684 {
685 struct verifier_state *state = &env->cur_state;
686 int size, err = 0;
687
688 if (state->regs[regno].type == PTR_TO_STACK)
689 off += state->regs[regno].imm;
690
691 size = bpf_size_to_bytes(bpf_size);
692 if (size < 0)
693 return size;
694
695 if (off % size != 0) {
696 verbose("misaligned access off %d size %d\n", off, size);
697 return -EACCES;
698 }
699
700 if (state->regs[regno].type == PTR_TO_MAP_VALUE) {
701 if (t == BPF_WRITE && value_regno >= 0 &&
702 is_pointer_value(env, value_regno)) {
703 verbose("R%d leaks addr into map\n", value_regno);
704 return -EACCES;
705 }
706 err = check_map_access(env, regno, off, size);
707 if (!err && t == BPF_READ && value_regno >= 0)
708 mark_reg_unknown_value(state->regs, value_regno);
709
710 } else if (state->regs[regno].type == PTR_TO_CTX) {
711 if (t == BPF_WRITE && value_regno >= 0 &&
712 is_pointer_value(env, value_regno)) {
713 verbose("R%d leaks addr into ctx\n", value_regno);
714 return -EACCES;
715 }
716 err = check_ctx_access(env, off, size, t);
717 if (!err && t == BPF_READ && value_regno >= 0)
718 mark_reg_unknown_value(state->regs, value_regno);
719
720 } else if (state->regs[regno].type == FRAME_PTR ||
721 state->regs[regno].type == PTR_TO_STACK) {
722 if (off >= 0 || off < -MAX_BPF_STACK) {
723 verbose("invalid stack off=%d size=%d\n", off, size);
724 return -EACCES;
725 }
726 if (t == BPF_WRITE) {
727 if (!env->allow_ptr_leaks &&
728 state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL &&
729 size != BPF_REG_SIZE) {
730 verbose("attempt to corrupt spilled pointer on stack\n");
731 return -EACCES;
732 }
733 err = check_stack_write(state, off, size, value_regno);
734 } else {
735 err = check_stack_read(state, off, size, value_regno);
736 }
737 } else {
738 verbose("R%d invalid mem access '%s'\n",
739 regno, reg_type_str[state->regs[regno].type]);
740 return -EACCES;
741 }
742 return err;
743 }
744
745 static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
746 {
747 struct reg_state *regs = env->cur_state.regs;
748 int err;
749
750 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
751 insn->imm != 0) {
752 verbose("BPF_XADD uses reserved fields\n");
753 return -EINVAL;
754 }
755
756 /* check src1 operand */
757 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
758 if (err)
759 return err;
760
761 /* check src2 operand */
762 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
763 if (err)
764 return err;
765
766 /* check whether atomic_add can read the memory */
767 err = check_mem_access(env, insn->dst_reg, insn->off,
768 BPF_SIZE(insn->code), BPF_READ, -1);
769 if (err)
770 return err;
771
772 /* check whether atomic_add can write into the same memory */
773 return check_mem_access(env, insn->dst_reg, insn->off,
774 BPF_SIZE(insn->code), BPF_WRITE, -1);
775 }
776
777 /* when register 'regno' is passed into function that will read 'access_size'
778 * bytes from that pointer, make sure that it's within stack boundary
779 * and all elements of stack are initialized
780 */
781 static int check_stack_boundary(struct verifier_env *env,
782 int regno, int access_size)
783 {
784 struct verifier_state *state = &env->cur_state;
785 struct reg_state *regs = state->regs;
786 int off, i;
787
788 if (regs[regno].type != PTR_TO_STACK)
789 return -EACCES;
790
791 off = regs[regno].imm;
792 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
793 access_size <= 0) {
794 verbose("invalid stack type R%d off=%d access_size=%d\n",
795 regno, off, access_size);
796 return -EACCES;
797 }
798
799 for (i = 0; i < access_size; i++) {
800 if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) {
801 verbose("invalid indirect read from stack off %d+%d size %d\n",
802 off, i, access_size);
803 return -EACCES;
804 }
805 }
806 return 0;
807 }
808
809 static int check_func_arg(struct verifier_env *env, u32 regno,
810 enum bpf_arg_type arg_type, struct bpf_map **mapp)
811 {
812 struct reg_state *reg = env->cur_state.regs + regno;
813 enum bpf_reg_type expected_type;
814 int err = 0;
815
816 if (arg_type == ARG_DONTCARE)
817 return 0;
818
819 if (reg->type == NOT_INIT) {
820 verbose("R%d !read_ok\n", regno);
821 return -EACCES;
822 }
823
824 if (arg_type == ARG_ANYTHING) {
825 if (is_pointer_value(env, regno)) {
826 verbose("R%d leaks addr into helper function\n", regno);
827 return -EACCES;
828 }
829 return 0;
830 }
831
832 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
833 arg_type == ARG_PTR_TO_MAP_VALUE) {
834 expected_type = PTR_TO_STACK;
835 } else if (arg_type == ARG_CONST_STACK_SIZE) {
836 expected_type = CONST_IMM;
837 } else if (arg_type == ARG_CONST_MAP_PTR) {
838 expected_type = CONST_PTR_TO_MAP;
839 } else if (arg_type == ARG_PTR_TO_CTX) {
840 expected_type = PTR_TO_CTX;
841 } else {
842 verbose("unsupported arg_type %d\n", arg_type);
843 return -EFAULT;
844 }
845
846 if (reg->type != expected_type) {
847 verbose("R%d type=%s expected=%s\n", regno,
848 reg_type_str[reg->type], reg_type_str[expected_type]);
849 return -EACCES;
850 }
851
852 if (arg_type == ARG_CONST_MAP_PTR) {
853 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
854 *mapp = reg->map_ptr;
855
856 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
857 /* bpf_map_xxx(..., map_ptr, ..., key) call:
858 * check that [key, key + map->key_size) are within
859 * stack limits and initialized
860 */
861 if (!*mapp) {
862 /* in function declaration map_ptr must come before
863 * map_key, so that it's verified and known before
864 * we have to check map_key here. Otherwise it means
865 * that kernel subsystem misconfigured verifier
866 */
867 verbose("invalid map_ptr to access map->key\n");
868 return -EACCES;
869 }
870 err = check_stack_boundary(env, regno, (*mapp)->key_size);
871
872 } else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
873 /* bpf_map_xxx(..., map_ptr, ..., value) call:
874 * check [value, value + map->value_size) validity
875 */
876 if (!*mapp) {
877 /* kernel subsystem misconfigured verifier */
878 verbose("invalid map_ptr to access map->value\n");
879 return -EACCES;
880 }
881 err = check_stack_boundary(env, regno, (*mapp)->value_size);
882
883 } else if (arg_type == ARG_CONST_STACK_SIZE) {
884 /* bpf_xxx(..., buf, len) call will access 'len' bytes
885 * from stack pointer 'buf'. Check it
886 * note: regno == len, regno - 1 == buf
887 */
888 if (regno == 0) {
889 /* kernel subsystem misconfigured verifier */
890 verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
891 return -EACCES;
892 }
893 err = check_stack_boundary(env, regno - 1, reg->imm);
894 }
895
896 return err;
897 }
898
899 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
900 {
901 bool bool_map, bool_func;
902 int i;
903
904 if (!map)
905 return 0;
906
907 for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
908 bool_map = (map->map_type == func_limit[i].map_type);
909 bool_func = (func_id == func_limit[i].func_id);
910 /* only when map & func pair match it can continue.
911 * don't allow any other map type to be passed into
912 * the special func;
913 */
914 if (bool_func && bool_map != bool_func)
915 return -EINVAL;
916 }
917
918 return 0;
919 }
920
921 static int check_call(struct verifier_env *env, int func_id)
922 {
923 struct verifier_state *state = &env->cur_state;
924 const struct bpf_func_proto *fn = NULL;
925 struct reg_state *regs = state->regs;
926 struct bpf_map *map = NULL;
927 struct reg_state *reg;
928 int i, err;
929
930 /* find function prototype */
931 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
932 verbose("invalid func %d\n", func_id);
933 return -EINVAL;
934 }
935
936 if (env->prog->aux->ops->get_func_proto)
937 fn = env->prog->aux->ops->get_func_proto(func_id);
938
939 if (!fn) {
940 verbose("unknown func %d\n", func_id);
941 return -EINVAL;
942 }
943
944 /* eBPF programs must be GPL compatible to use GPL-ed functions */
945 if (!env->prog->gpl_compatible && fn->gpl_only) {
946 verbose("cannot call GPL only function from proprietary program\n");
947 return -EINVAL;
948 }
949
950 /* check args */
951 err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map);
952 if (err)
953 return err;
954 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map);
955 if (err)
956 return err;
957 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map);
958 if (err)
959 return err;
960 err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map);
961 if (err)
962 return err;
963 err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map);
964 if (err)
965 return err;
966
967 /* reset caller saved regs */
968 for (i = 0; i < CALLER_SAVED_REGS; i++) {
969 reg = regs + caller_saved[i];
970 reg->type = NOT_INIT;
971 reg->imm = 0;
972 }
973
974 /* update return register */
975 if (fn->ret_type == RET_INTEGER) {
976 regs[BPF_REG_0].type = UNKNOWN_VALUE;
977 } else if (fn->ret_type == RET_VOID) {
978 regs[BPF_REG_0].type = NOT_INIT;
979 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
980 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
981 /* remember map_ptr, so that check_map_access()
982 * can check 'value_size' boundary of memory access
983 * to map element returned from bpf_map_lookup_elem()
984 */
985 if (map == NULL) {
986 verbose("kernel subsystem misconfigured verifier\n");
987 return -EINVAL;
988 }
989 regs[BPF_REG_0].map_ptr = map;
990 } else {
991 verbose("unknown return type %d of func %d\n",
992 fn->ret_type, func_id);
993 return -EINVAL;
994 }
995
996 err = check_map_func_compatibility(map, func_id);
997 if (err)
998 return err;
999
1000 return 0;
1001 }
1002
1003 /* check validity of 32-bit and 64-bit arithmetic operations */
1004 static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn)
1005 {
1006 struct reg_state *regs = env->cur_state.regs;
1007 u8 opcode = BPF_OP(insn->code);
1008 int err;
1009
1010 if (opcode == BPF_END || opcode == BPF_NEG) {
1011 if (opcode == BPF_NEG) {
1012 if (BPF_SRC(insn->code) != 0 ||
1013 insn->src_reg != BPF_REG_0 ||
1014 insn->off != 0 || insn->imm != 0) {
1015 verbose("BPF_NEG uses reserved fields\n");
1016 return -EINVAL;
1017 }
1018 } else {
1019 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
1020 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
1021 verbose("BPF_END uses reserved fields\n");
1022 return -EINVAL;
1023 }
1024 }
1025
1026 /* check src operand */
1027 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1028 if (err)
1029 return err;
1030
1031 if (is_pointer_value(env, insn->dst_reg)) {
1032 verbose("R%d pointer arithmetic prohibited\n",
1033 insn->dst_reg);
1034 return -EACCES;
1035 }
1036
1037 /* check dest operand */
1038 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1039 if (err)
1040 return err;
1041
1042 } else if (opcode == BPF_MOV) {
1043
1044 if (BPF_SRC(insn->code) == BPF_X) {
1045 if (insn->imm != 0 || insn->off != 0) {
1046 verbose("BPF_MOV uses reserved fields\n");
1047 return -EINVAL;
1048 }
1049
1050 /* check src operand */
1051 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1052 if (err)
1053 return err;
1054 } else {
1055 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1056 verbose("BPF_MOV uses reserved fields\n");
1057 return -EINVAL;
1058 }
1059 }
1060
1061 /* check dest operand */
1062 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1063 if (err)
1064 return err;
1065
1066 if (BPF_SRC(insn->code) == BPF_X) {
1067 if (BPF_CLASS(insn->code) == BPF_ALU64) {
1068 /* case: R1 = R2
1069 * copy register state to dest reg
1070 */
1071 regs[insn->dst_reg] = regs[insn->src_reg];
1072 } else {
1073 if (is_pointer_value(env, insn->src_reg)) {
1074 verbose("R%d partial copy of pointer\n",
1075 insn->src_reg);
1076 return -EACCES;
1077 }
1078 regs[insn->dst_reg].type = UNKNOWN_VALUE;
1079 regs[insn->dst_reg].map_ptr = NULL;
1080 }
1081 } else {
1082 /* case: R = imm
1083 * remember the value we stored into this reg
1084 */
1085 regs[insn->dst_reg].type = CONST_IMM;
1086 regs[insn->dst_reg].imm = insn->imm;
1087 }
1088
1089 } else if (opcode > BPF_END) {
1090 verbose("invalid BPF_ALU opcode %x\n", opcode);
1091 return -EINVAL;
1092
1093 } else { /* all other ALU ops: and, sub, xor, add, ... */
1094
1095 bool stack_relative = false;
1096
1097 if (BPF_SRC(insn->code) == BPF_X) {
1098 if (insn->imm != 0 || insn->off != 0) {
1099 verbose("BPF_ALU uses reserved fields\n");
1100 return -EINVAL;
1101 }
1102 /* check src1 operand */
1103 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1104 if (err)
1105 return err;
1106 } else {
1107 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
1108 verbose("BPF_ALU uses reserved fields\n");
1109 return -EINVAL;
1110 }
1111 }
1112
1113 /* check src2 operand */
1114 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1115 if (err)
1116 return err;
1117
1118 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
1119 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
1120 verbose("div by zero\n");
1121 return -EINVAL;
1122 }
1123
1124 /* pattern match 'bpf_add Rx, imm' instruction */
1125 if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
1126 regs[insn->dst_reg].type == FRAME_PTR &&
1127 BPF_SRC(insn->code) == BPF_K) {
1128 stack_relative = true;
1129 } else if (is_pointer_value(env, insn->dst_reg)) {
1130 verbose("R%d pointer arithmetic prohibited\n",
1131 insn->dst_reg);
1132 return -EACCES;
1133 } else if (BPF_SRC(insn->code) == BPF_X &&
1134 is_pointer_value(env, insn->src_reg)) {
1135 verbose("R%d pointer arithmetic prohibited\n",
1136 insn->src_reg);
1137 return -EACCES;
1138 }
1139
1140 /* check dest operand */
1141 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1142 if (err)
1143 return err;
1144
1145 if (stack_relative) {
1146 regs[insn->dst_reg].type = PTR_TO_STACK;
1147 regs[insn->dst_reg].imm = insn->imm;
1148 }
1149 }
1150
1151 return 0;
1152 }
1153
1154 static int check_cond_jmp_op(struct verifier_env *env,
1155 struct bpf_insn *insn, int *insn_idx)
1156 {
1157 struct reg_state *regs = env->cur_state.regs;
1158 struct verifier_state *other_branch;
1159 u8 opcode = BPF_OP(insn->code);
1160 int err;
1161
1162 if (opcode > BPF_EXIT) {
1163 verbose("invalid BPF_JMP opcode %x\n", opcode);
1164 return -EINVAL;
1165 }
1166
1167 if (BPF_SRC(insn->code) == BPF_X) {
1168 if (insn->imm != 0) {
1169 verbose("BPF_JMP uses reserved fields\n");
1170 return -EINVAL;
1171 }
1172
1173 /* check src1 operand */
1174 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1175 if (err)
1176 return err;
1177
1178 if (is_pointer_value(env, insn->src_reg)) {
1179 verbose("R%d pointer comparison prohibited\n",
1180 insn->src_reg);
1181 return -EACCES;
1182 }
1183 } else {
1184 if (insn->src_reg != BPF_REG_0) {
1185 verbose("BPF_JMP uses reserved fields\n");
1186 return -EINVAL;
1187 }
1188 }
1189
1190 /* check src2 operand */
1191 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1192 if (err)
1193 return err;
1194
1195 /* detect if R == 0 where R was initialized to zero earlier */
1196 if (BPF_SRC(insn->code) == BPF_K &&
1197 (opcode == BPF_JEQ || opcode == BPF_JNE) &&
1198 regs[insn->dst_reg].type == CONST_IMM &&
1199 regs[insn->dst_reg].imm == insn->imm) {
1200 if (opcode == BPF_JEQ) {
1201 /* if (imm == imm) goto pc+off;
1202 * only follow the goto, ignore fall-through
1203 */
1204 *insn_idx += insn->off;
1205 return 0;
1206 } else {
1207 /* if (imm != imm) goto pc+off;
1208 * only follow fall-through branch, since
1209 * that's where the program will go
1210 */
1211 return 0;
1212 }
1213 }
1214
1215 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
1216 if (!other_branch)
1217 return -EFAULT;
1218
1219 /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */
1220 if (BPF_SRC(insn->code) == BPF_K &&
1221 insn->imm == 0 && (opcode == BPF_JEQ ||
1222 opcode == BPF_JNE) &&
1223 regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) {
1224 if (opcode == BPF_JEQ) {
1225 /* next fallthrough insn can access memory via
1226 * this register
1227 */
1228 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1229 /* branch targer cannot access it, since reg == 0 */
1230 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1231 other_branch->regs[insn->dst_reg].imm = 0;
1232 } else {
1233 other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
1234 regs[insn->dst_reg].type = CONST_IMM;
1235 regs[insn->dst_reg].imm = 0;
1236 }
1237 } else if (is_pointer_value(env, insn->dst_reg)) {
1238 verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
1239 return -EACCES;
1240 } else if (BPF_SRC(insn->code) == BPF_K &&
1241 (opcode == BPF_JEQ || opcode == BPF_JNE)) {
1242
1243 if (opcode == BPF_JEQ) {
1244 /* detect if (R == imm) goto
1245 * and in the target state recognize that R = imm
1246 */
1247 other_branch->regs[insn->dst_reg].type = CONST_IMM;
1248 other_branch->regs[insn->dst_reg].imm = insn->imm;
1249 } else {
1250 /* detect if (R != imm) goto
1251 * and in the fall-through state recognize that R = imm
1252 */
1253 regs[insn->dst_reg].type = CONST_IMM;
1254 regs[insn->dst_reg].imm = insn->imm;
1255 }
1256 }
1257 if (log_level)
1258 print_verifier_state(env);
1259 return 0;
1260 }
1261
1262 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
1263 static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
1264 {
1265 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
1266
1267 return (struct bpf_map *) (unsigned long) imm64;
1268 }
1269
1270 /* verify BPF_LD_IMM64 instruction */
1271 static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn)
1272 {
1273 struct reg_state *regs = env->cur_state.regs;
1274 int err;
1275
1276 if (BPF_SIZE(insn->code) != BPF_DW) {
1277 verbose("invalid BPF_LD_IMM insn\n");
1278 return -EINVAL;
1279 }
1280 if (insn->off != 0) {
1281 verbose("BPF_LD_IMM64 uses reserved fields\n");
1282 return -EINVAL;
1283 }
1284
1285 err = check_reg_arg(regs, insn->dst_reg, DST_OP);
1286 if (err)
1287 return err;
1288
1289 if (insn->src_reg == 0)
1290 /* generic move 64-bit immediate into a register */
1291 return 0;
1292
1293 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
1294 BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD);
1295
1296 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
1297 regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn);
1298 return 0;
1299 }
1300
1301 static bool may_access_skb(enum bpf_prog_type type)
1302 {
1303 switch (type) {
1304 case BPF_PROG_TYPE_SOCKET_FILTER:
1305 case BPF_PROG_TYPE_SCHED_CLS:
1306 case BPF_PROG_TYPE_SCHED_ACT:
1307 return true;
1308 default:
1309 return false;
1310 }
1311 }
1312
1313 /* verify safety of LD_ABS|LD_IND instructions:
1314 * - they can only appear in the programs where ctx == skb
1315 * - since they are wrappers of function calls, they scratch R1-R5 registers,
1316 * preserve R6-R9, and store return value into R0
1317 *
1318 * Implicit input:
1319 * ctx == skb == R6 == CTX
1320 *
1321 * Explicit input:
1322 * SRC == any register
1323 * IMM == 32-bit immediate
1324 *
1325 * Output:
1326 * R0 - 8/16/32-bit skb data converted to cpu endianness
1327 */
1328 static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
1329 {
1330 struct reg_state *regs = env->cur_state.regs;
1331 u8 mode = BPF_MODE(insn->code);
1332 struct reg_state *reg;
1333 int i, err;
1334
1335 if (!may_access_skb(env->prog->type)) {
1336 verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n");
1337 return -EINVAL;
1338 }
1339
1340 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
1341 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
1342 verbose("BPF_LD_ABS uses reserved fields\n");
1343 return -EINVAL;
1344 }
1345
1346 /* check whether implicit source operand (register R6) is readable */
1347 err = check_reg_arg(regs, BPF_REG_6, SRC_OP);
1348 if (err)
1349 return err;
1350
1351 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
1352 verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
1353 return -EINVAL;
1354 }
1355
1356 if (mode == BPF_IND) {
1357 /* check explicit source operand */
1358 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1359 if (err)
1360 return err;
1361 }
1362
1363 /* reset caller saved regs to unreadable */
1364 for (i = 0; i < CALLER_SAVED_REGS; i++) {
1365 reg = regs + caller_saved[i];
1366 reg->type = NOT_INIT;
1367 reg->imm = 0;
1368 }
1369
1370 /* mark destination R0 register as readable, since it contains
1371 * the value fetched from the packet
1372 */
1373 regs[BPF_REG_0].type = UNKNOWN_VALUE;
1374 return 0;
1375 }
1376
1377 /* non-recursive DFS pseudo code
1378 * 1 procedure DFS-iterative(G,v):
1379 * 2 label v as discovered
1380 * 3 let S be a stack
1381 * 4 S.push(v)
1382 * 5 while S is not empty
1383 * 6 t <- S.pop()
1384 * 7 if t is what we're looking for:
1385 * 8 return t
1386 * 9 for all edges e in G.adjacentEdges(t) do
1387 * 10 if edge e is already labelled
1388 * 11 continue with the next edge
1389 * 12 w <- G.adjacentVertex(t,e)
1390 * 13 if vertex w is not discovered and not explored
1391 * 14 label e as tree-edge
1392 * 15 label w as discovered
1393 * 16 S.push(w)
1394 * 17 continue at 5
1395 * 18 else if vertex w is discovered
1396 * 19 label e as back-edge
1397 * 20 else
1398 * 21 // vertex w is explored
1399 * 22 label e as forward- or cross-edge
1400 * 23 label t as explored
1401 * 24 S.pop()
1402 *
1403 * convention:
1404 * 0x10 - discovered
1405 * 0x11 - discovered and fall-through edge labelled
1406 * 0x12 - discovered and fall-through and branch edges labelled
1407 * 0x20 - explored
1408 */
1409
1410 enum {
1411 DISCOVERED = 0x10,
1412 EXPLORED = 0x20,
1413 FALLTHROUGH = 1,
1414 BRANCH = 2,
1415 };
1416
1417 #define STATE_LIST_MARK ((struct verifier_state_list *) -1L)
1418
1419 static int *insn_stack; /* stack of insns to process */
1420 static int cur_stack; /* current stack index */
1421 static int *insn_state;
1422
1423 /* t, w, e - match pseudo-code above:
1424 * t - index of current instruction
1425 * w - next instruction
1426 * e - edge
1427 */
1428 static int push_insn(int t, int w, int e, struct verifier_env *env)
1429 {
1430 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
1431 return 0;
1432
1433 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
1434 return 0;
1435
1436 if (w < 0 || w >= env->prog->len) {
1437 verbose("jump out of range from insn %d to %d\n", t, w);
1438 return -EINVAL;
1439 }
1440
1441 if (e == BRANCH)
1442 /* mark branch target for state pruning */
1443 env->explored_states[w] = STATE_LIST_MARK;
1444
1445 if (insn_state[w] == 0) {
1446 /* tree-edge */
1447 insn_state[t] = DISCOVERED | e;
1448 insn_state[w] = DISCOVERED;
1449 if (cur_stack >= env->prog->len)
1450 return -E2BIG;
1451 insn_stack[cur_stack++] = w;
1452 return 1;
1453 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
1454 verbose("back-edge from insn %d to %d\n", t, w);
1455 return -EINVAL;
1456 } else if (insn_state[w] == EXPLORED) {
1457 /* forward- or cross-edge */
1458 insn_state[t] = DISCOVERED | e;
1459 } else {
1460 verbose("insn state internal bug\n");
1461 return -EFAULT;
1462 }
1463 return 0;
1464 }
1465
1466 /* non-recursive depth-first-search to detect loops in BPF program
1467 * loop == back-edge in directed graph
1468 */
1469 static int check_cfg(struct verifier_env *env)
1470 {
1471 struct bpf_insn *insns = env->prog->insnsi;
1472 int insn_cnt = env->prog->len;
1473 int ret = 0;
1474 int i, t;
1475
1476 insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1477 if (!insn_state)
1478 return -ENOMEM;
1479
1480 insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
1481 if (!insn_stack) {
1482 kfree(insn_state);
1483 return -ENOMEM;
1484 }
1485
1486 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
1487 insn_stack[0] = 0; /* 0 is the first instruction */
1488 cur_stack = 1;
1489
1490 peek_stack:
1491 if (cur_stack == 0)
1492 goto check_state;
1493 t = insn_stack[cur_stack - 1];
1494
1495 if (BPF_CLASS(insns[t].code) == BPF_JMP) {
1496 u8 opcode = BPF_OP(insns[t].code);
1497
1498 if (opcode == BPF_EXIT) {
1499 goto mark_explored;
1500 } else if (opcode == BPF_CALL) {
1501 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1502 if (ret == 1)
1503 goto peek_stack;
1504 else if (ret < 0)
1505 goto err_free;
1506 } else if (opcode == BPF_JA) {
1507 if (BPF_SRC(insns[t].code) != BPF_K) {
1508 ret = -EINVAL;
1509 goto err_free;
1510 }
1511 /* unconditional jump with single edge */
1512 ret = push_insn(t, t + insns[t].off + 1,
1513 FALLTHROUGH, env);
1514 if (ret == 1)
1515 goto peek_stack;
1516 else if (ret < 0)
1517 goto err_free;
1518 /* tell verifier to check for equivalent states
1519 * after every call and jump
1520 */
1521 if (t + 1 < insn_cnt)
1522 env->explored_states[t + 1] = STATE_LIST_MARK;
1523 } else {
1524 /* conditional jump with two edges */
1525 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1526 if (ret == 1)
1527 goto peek_stack;
1528 else if (ret < 0)
1529 goto err_free;
1530
1531 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env);
1532 if (ret == 1)
1533 goto peek_stack;
1534 else if (ret < 0)
1535 goto err_free;
1536 }
1537 } else {
1538 /* all other non-branch instructions with single
1539 * fall-through edge
1540 */
1541 ret = push_insn(t, t + 1, FALLTHROUGH, env);
1542 if (ret == 1)
1543 goto peek_stack;
1544 else if (ret < 0)
1545 goto err_free;
1546 }
1547
1548 mark_explored:
1549 insn_state[t] = EXPLORED;
1550 if (cur_stack-- <= 0) {
1551 verbose("pop stack internal bug\n");
1552 ret = -EFAULT;
1553 goto err_free;
1554 }
1555 goto peek_stack;
1556
1557 check_state:
1558 for (i = 0; i < insn_cnt; i++) {
1559 if (insn_state[i] != EXPLORED) {
1560 verbose("unreachable insn %d\n", i);
1561 ret = -EINVAL;
1562 goto err_free;
1563 }
1564 }
1565 ret = 0; /* cfg looks good */
1566
1567 err_free:
1568 kfree(insn_state);
1569 kfree(insn_stack);
1570 return ret;
1571 }
1572
1573 /* compare two verifier states
1574 *
1575 * all states stored in state_list are known to be valid, since
1576 * verifier reached 'bpf_exit' instruction through them
1577 *
1578 * this function is called when verifier exploring different branches of
1579 * execution popped from the state stack. If it sees an old state that has
1580 * more strict register state and more strict stack state then this execution
1581 * branch doesn't need to be explored further, since verifier already
1582 * concluded that more strict state leads to valid finish.
1583 *
1584 * Therefore two states are equivalent if register state is more conservative
1585 * and explored stack state is more conservative than the current one.
1586 * Example:
1587 * explored current
1588 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
1589 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
1590 *
1591 * In other words if current stack state (one being explored) has more
1592 * valid slots than old one that already passed validation, it means
1593 * the verifier can stop exploring and conclude that current state is valid too
1594 *
1595 * Similarly with registers. If explored state has register type as invalid
1596 * whereas register type in current state is meaningful, it means that
1597 * the current state will reach 'bpf_exit' instruction safely
1598 */
1599 static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
1600 {
1601 int i;
1602
1603 for (i = 0; i < MAX_BPF_REG; i++) {
1604 if (memcmp(&old->regs[i], &cur->regs[i],
1605 sizeof(old->regs[0])) != 0) {
1606 if (old->regs[i].type == NOT_INIT ||
1607 (old->regs[i].type == UNKNOWN_VALUE &&
1608 cur->regs[i].type != NOT_INIT))
1609 continue;
1610 return false;
1611 }
1612 }
1613
1614 for (i = 0; i < MAX_BPF_STACK; i++) {
1615 if (old->stack_slot_type[i] == STACK_INVALID)
1616 continue;
1617 if (old->stack_slot_type[i] != cur->stack_slot_type[i])
1618 /* Ex: old explored (safe) state has STACK_SPILL in
1619 * this stack slot, but current has has STACK_MISC ->
1620 * this verifier states are not equivalent,
1621 * return false to continue verification of this path
1622 */
1623 return false;
1624 if (i % BPF_REG_SIZE)
1625 continue;
1626 if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE],
1627 &cur->spilled_regs[i / BPF_REG_SIZE],
1628 sizeof(old->spilled_regs[0])))
1629 /* when explored and current stack slot types are
1630 * the same, check that stored pointers types
1631 * are the same as well.
1632 * Ex: explored safe path could have stored
1633 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8}
1634 * but current path has stored:
1635 * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16}
1636 * such verifier states are not equivalent.
1637 * return false to continue verification of this path
1638 */
1639 return false;
1640 else
1641 continue;
1642 }
1643 return true;
1644 }
1645
1646 static int is_state_visited(struct verifier_env *env, int insn_idx)
1647 {
1648 struct verifier_state_list *new_sl;
1649 struct verifier_state_list *sl;
1650
1651 sl = env->explored_states[insn_idx];
1652 if (!sl)
1653 /* this 'insn_idx' instruction wasn't marked, so we will not
1654 * be doing state search here
1655 */
1656 return 0;
1657
1658 while (sl != STATE_LIST_MARK) {
1659 if (states_equal(&sl->state, &env->cur_state))
1660 /* reached equivalent register/stack state,
1661 * prune the search
1662 */
1663 return 1;
1664 sl = sl->next;
1665 }
1666
1667 /* there were no equivalent states, remember current one.
1668 * technically the current state is not proven to be safe yet,
1669 * but it will either reach bpf_exit (which means it's safe) or
1670 * it will be rejected. Since there are no loops, we won't be
1671 * seeing this 'insn_idx' instruction again on the way to bpf_exit
1672 */
1673 new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER);
1674 if (!new_sl)
1675 return -ENOMEM;
1676
1677 /* add new state to the head of linked list */
1678 memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state));
1679 new_sl->next = env->explored_states[insn_idx];
1680 env->explored_states[insn_idx] = new_sl;
1681 return 0;
1682 }
1683
1684 static int do_check(struct verifier_env *env)
1685 {
1686 struct verifier_state *state = &env->cur_state;
1687 struct bpf_insn *insns = env->prog->insnsi;
1688 struct reg_state *regs = state->regs;
1689 int insn_cnt = env->prog->len;
1690 int insn_idx, prev_insn_idx = 0;
1691 int insn_processed = 0;
1692 bool do_print_state = false;
1693
1694 init_reg_state(regs);
1695 insn_idx = 0;
1696 for (;;) {
1697 struct bpf_insn *insn;
1698 u8 class;
1699 int err;
1700
1701 if (insn_idx >= insn_cnt) {
1702 verbose("invalid insn idx %d insn_cnt %d\n",
1703 insn_idx, insn_cnt);
1704 return -EFAULT;
1705 }
1706
1707 insn = &insns[insn_idx];
1708 class = BPF_CLASS(insn->code);
1709
1710 if (++insn_processed > 32768) {
1711 verbose("BPF program is too large. Proccessed %d insn\n",
1712 insn_processed);
1713 return -E2BIG;
1714 }
1715
1716 err = is_state_visited(env, insn_idx);
1717 if (err < 0)
1718 return err;
1719 if (err == 1) {
1720 /* found equivalent state, can prune the search */
1721 if (log_level) {
1722 if (do_print_state)
1723 verbose("\nfrom %d to %d: safe\n",
1724 prev_insn_idx, insn_idx);
1725 else
1726 verbose("%d: safe\n", insn_idx);
1727 }
1728 goto process_bpf_exit;
1729 }
1730
1731 if (log_level && do_print_state) {
1732 verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
1733 print_verifier_state(env);
1734 do_print_state = false;
1735 }
1736
1737 if (log_level) {
1738 verbose("%d: ", insn_idx);
1739 print_bpf_insn(insn);
1740 }
1741
1742 if (class == BPF_ALU || class == BPF_ALU64) {
1743 err = check_alu_op(env, insn);
1744 if (err)
1745 return err;
1746
1747 } else if (class == BPF_LDX) {
1748 enum bpf_reg_type src_reg_type;
1749
1750 /* check for reserved fields is already done */
1751
1752 /* check src operand */
1753 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1754 if (err)
1755 return err;
1756
1757 err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK);
1758 if (err)
1759 return err;
1760
1761 src_reg_type = regs[insn->src_reg].type;
1762
1763 /* check that memory (src_reg + off) is readable,
1764 * the state of dst_reg will be updated by this func
1765 */
1766 err = check_mem_access(env, insn->src_reg, insn->off,
1767 BPF_SIZE(insn->code), BPF_READ,
1768 insn->dst_reg);
1769 if (err)
1770 return err;
1771
1772 if (BPF_SIZE(insn->code) != BPF_W) {
1773 insn_idx++;
1774 continue;
1775 }
1776
1777 if (insn->imm == 0) {
1778 /* saw a valid insn
1779 * dst_reg = *(u32 *)(src_reg + off)
1780 * use reserved 'imm' field to mark this insn
1781 */
1782 insn->imm = src_reg_type;
1783
1784 } else if (src_reg_type != insn->imm &&
1785 (src_reg_type == PTR_TO_CTX ||
1786 insn->imm == PTR_TO_CTX)) {
1787 /* ABuser program is trying to use the same insn
1788 * dst_reg = *(u32*) (src_reg + off)
1789 * with different pointer types:
1790 * src_reg == ctx in one branch and
1791 * src_reg == stack|map in some other branch.
1792 * Reject it.
1793 */
1794 verbose("same insn cannot be used with different pointers\n");
1795 return -EINVAL;
1796 }
1797
1798 } else if (class == BPF_STX) {
1799 enum bpf_reg_type dst_reg_type;
1800
1801 if (BPF_MODE(insn->code) == BPF_XADD) {
1802 err = check_xadd(env, insn);
1803 if (err)
1804 return err;
1805 insn_idx++;
1806 continue;
1807 }
1808
1809 /* check src1 operand */
1810 err = check_reg_arg(regs, insn->src_reg, SRC_OP);
1811 if (err)
1812 return err;
1813 /* check src2 operand */
1814 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1815 if (err)
1816 return err;
1817
1818 dst_reg_type = regs[insn->dst_reg].type;
1819
1820 /* check that memory (dst_reg + off) is writeable */
1821 err = check_mem_access(env, insn->dst_reg, insn->off,
1822 BPF_SIZE(insn->code), BPF_WRITE,
1823 insn->src_reg);
1824 if (err)
1825 return err;
1826
1827 if (insn->imm == 0) {
1828 insn->imm = dst_reg_type;
1829 } else if (dst_reg_type != insn->imm &&
1830 (dst_reg_type == PTR_TO_CTX ||
1831 insn->imm == PTR_TO_CTX)) {
1832 verbose("same insn cannot be used with different pointers\n");
1833 return -EINVAL;
1834 }
1835
1836 } else if (class == BPF_ST) {
1837 if (BPF_MODE(insn->code) != BPF_MEM ||
1838 insn->src_reg != BPF_REG_0) {
1839 verbose("BPF_ST uses reserved fields\n");
1840 return -EINVAL;
1841 }
1842 /* check src operand */
1843 err = check_reg_arg(regs, insn->dst_reg, SRC_OP);
1844 if (err)
1845 return err;
1846
1847 /* check that memory (dst_reg + off) is writeable */
1848 err = check_mem_access(env, insn->dst_reg, insn->off,
1849 BPF_SIZE(insn->code), BPF_WRITE,
1850 -1);
1851 if (err)
1852 return err;
1853
1854 } else if (class == BPF_JMP) {
1855 u8 opcode = BPF_OP(insn->code);
1856
1857 if (opcode == BPF_CALL) {
1858 if (BPF_SRC(insn->code) != BPF_K ||
1859 insn->off != 0 ||
1860 insn->src_reg != BPF_REG_0 ||
1861 insn->dst_reg != BPF_REG_0) {
1862 verbose("BPF_CALL uses reserved fields\n");
1863 return -EINVAL;
1864 }
1865
1866 err = check_call(env, insn->imm);
1867 if (err)
1868 return err;
1869
1870 } else if (opcode == BPF_JA) {
1871 if (BPF_SRC(insn->code) != BPF_K ||
1872 insn->imm != 0 ||
1873 insn->src_reg != BPF_REG_0 ||
1874 insn->dst_reg != BPF_REG_0) {
1875 verbose("BPF_JA uses reserved fields\n");
1876 return -EINVAL;
1877 }
1878
1879 insn_idx += insn->off + 1;
1880 continue;
1881
1882 } else if (opcode == BPF_EXIT) {
1883 if (BPF_SRC(insn->code) != BPF_K ||
1884 insn->imm != 0 ||
1885 insn->src_reg != BPF_REG_0 ||
1886 insn->dst_reg != BPF_REG_0) {
1887 verbose("BPF_EXIT uses reserved fields\n");
1888 return -EINVAL;
1889 }
1890
1891 /* eBPF calling convetion is such that R0 is used
1892 * to return the value from eBPF program.
1893 * Make sure that it's readable at this time
1894 * of bpf_exit, which means that program wrote
1895 * something into it earlier
1896 */
1897 err = check_reg_arg(regs, BPF_REG_0, SRC_OP);
1898 if (err)
1899 return err;
1900
1901 if (is_pointer_value(env, BPF_REG_0)) {
1902 verbose("R0 leaks addr as return value\n");
1903 return -EACCES;
1904 }
1905
1906 process_bpf_exit:
1907 insn_idx = pop_stack(env, &prev_insn_idx);
1908 if (insn_idx < 0) {
1909 break;
1910 } else {
1911 do_print_state = true;
1912 continue;
1913 }
1914 } else {
1915 err = check_cond_jmp_op(env, insn, &insn_idx);
1916 if (err)
1917 return err;
1918 }
1919 } else if (class == BPF_LD) {
1920 u8 mode = BPF_MODE(insn->code);
1921
1922 if (mode == BPF_ABS || mode == BPF_IND) {
1923 err = check_ld_abs(env, insn);
1924 if (err)
1925 return err;
1926
1927 } else if (mode == BPF_IMM) {
1928 err = check_ld_imm(env, insn);
1929 if (err)
1930 return err;
1931
1932 insn_idx++;
1933 } else {
1934 verbose("invalid BPF_LD mode\n");
1935 return -EINVAL;
1936 }
1937 } else {
1938 verbose("unknown insn class %d\n", class);
1939 return -EINVAL;
1940 }
1941
1942 insn_idx++;
1943 }
1944
1945 return 0;
1946 }
1947
1948 /* look for pseudo eBPF instructions that access map FDs and
1949 * replace them with actual map pointers
1950 */
1951 static int replace_map_fd_with_map_ptr(struct verifier_env *env)
1952 {
1953 struct bpf_insn *insn = env->prog->insnsi;
1954 int insn_cnt = env->prog->len;
1955 int i, j;
1956
1957 for (i = 0; i < insn_cnt; i++, insn++) {
1958 if (BPF_CLASS(insn->code) == BPF_LDX &&
1959 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
1960 verbose("BPF_LDX uses reserved fields\n");
1961 return -EINVAL;
1962 }
1963
1964 if (BPF_CLASS(insn->code) == BPF_STX &&
1965 ((BPF_MODE(insn->code) != BPF_MEM &&
1966 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
1967 verbose("BPF_STX uses reserved fields\n");
1968 return -EINVAL;
1969 }
1970
1971 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
1972 struct bpf_map *map;
1973 struct fd f;
1974
1975 if (i == insn_cnt - 1 || insn[1].code != 0 ||
1976 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
1977 insn[1].off != 0) {
1978 verbose("invalid bpf_ld_imm64 insn\n");
1979 return -EINVAL;
1980 }
1981
1982 if (insn->src_reg == 0)
1983 /* valid generic load 64-bit imm */
1984 goto next_insn;
1985
1986 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
1987 verbose("unrecognized bpf_ld_imm64 insn\n");
1988 return -EINVAL;
1989 }
1990
1991 f = fdget(insn->imm);
1992 map = __bpf_map_get(f);
1993 if (IS_ERR(map)) {
1994 verbose("fd %d is not pointing to valid bpf_map\n",
1995 insn->imm);
1996 fdput(f);
1997 return PTR_ERR(map);
1998 }
1999
2000 /* store map pointer inside BPF_LD_IMM64 instruction */
2001 insn[0].imm = (u32) (unsigned long) map;
2002 insn[1].imm = ((u64) (unsigned long) map) >> 32;
2003
2004 /* check whether we recorded this map already */
2005 for (j = 0; j < env->used_map_cnt; j++)
2006 if (env->used_maps[j] == map) {
2007 fdput(f);
2008 goto next_insn;
2009 }
2010
2011 if (env->used_map_cnt >= MAX_USED_MAPS) {
2012 fdput(f);
2013 return -E2BIG;
2014 }
2015
2016 /* remember this map */
2017 env->used_maps[env->used_map_cnt++] = map;
2018
2019 /* hold the map. If the program is rejected by verifier,
2020 * the map will be released by release_maps() or it
2021 * will be used by the valid program until it's unloaded
2022 * and all maps are released in free_bpf_prog_info()
2023 */
2024 atomic_inc(&map->refcnt);
2025
2026 fdput(f);
2027 next_insn:
2028 insn++;
2029 i++;
2030 }
2031 }
2032
2033 /* now all pseudo BPF_LD_IMM64 instructions load valid
2034 * 'struct bpf_map *' into a register instead of user map_fd.
2035 * These pointers will be used later by verifier to validate map access.
2036 */
2037 return 0;
2038 }
2039
2040 /* drop refcnt of maps used by the rejected program */
2041 static void release_maps(struct verifier_env *env)
2042 {
2043 int i;
2044
2045 for (i = 0; i < env->used_map_cnt; i++)
2046 bpf_map_put(env->used_maps[i]);
2047 }
2048
2049 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
2050 static void convert_pseudo_ld_imm64(struct verifier_env *env)
2051 {
2052 struct bpf_insn *insn = env->prog->insnsi;
2053 int insn_cnt = env->prog->len;
2054 int i;
2055
2056 for (i = 0; i < insn_cnt; i++, insn++)
2057 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
2058 insn->src_reg = 0;
2059 }
2060
2061 static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
2062 {
2063 struct bpf_insn *insn = prog->insnsi;
2064 int insn_cnt = prog->len;
2065 int i;
2066
2067 for (i = 0; i < insn_cnt; i++, insn++) {
2068 if (BPF_CLASS(insn->code) != BPF_JMP ||
2069 BPF_OP(insn->code) == BPF_CALL ||
2070 BPF_OP(insn->code) == BPF_EXIT)
2071 continue;
2072
2073 /* adjust offset of jmps if necessary */
2074 if (i < pos && i + insn->off + 1 > pos)
2075 insn->off += delta;
2076 else if (i > pos && i + insn->off + 1 < pos)
2077 insn->off -= delta;
2078 }
2079 }
2080
2081 /* convert load instructions that access fields of 'struct __sk_buff'
2082 * into sequence of instructions that access fields of 'struct sk_buff'
2083 */
2084 static int convert_ctx_accesses(struct verifier_env *env)
2085 {
2086 struct bpf_insn *insn = env->prog->insnsi;
2087 int insn_cnt = env->prog->len;
2088 struct bpf_insn insn_buf[16];
2089 struct bpf_prog *new_prog;
2090 u32 cnt;
2091 int i;
2092 enum bpf_access_type type;
2093
2094 if (!env->prog->aux->ops->convert_ctx_access)
2095 return 0;
2096
2097 for (i = 0; i < insn_cnt; i++, insn++) {
2098 if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
2099 type = BPF_READ;
2100 else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
2101 type = BPF_WRITE;
2102 else
2103 continue;
2104
2105 if (insn->imm != PTR_TO_CTX) {
2106 /* clear internal mark */
2107 insn->imm = 0;
2108 continue;
2109 }
2110
2111 cnt = env->prog->aux->ops->
2112 convert_ctx_access(type, insn->dst_reg, insn->src_reg,
2113 insn->off, insn_buf, env->prog);
2114 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
2115 verbose("bpf verifier is misconfigured\n");
2116 return -EINVAL;
2117 }
2118
2119 if (cnt == 1) {
2120 memcpy(insn, insn_buf, sizeof(*insn));
2121 continue;
2122 }
2123
2124 /* several new insns need to be inserted. Make room for them */
2125 insn_cnt += cnt - 1;
2126 new_prog = bpf_prog_realloc(env->prog,
2127 bpf_prog_size(insn_cnt),
2128 GFP_USER);
2129 if (!new_prog)
2130 return -ENOMEM;
2131
2132 new_prog->len = insn_cnt;
2133
2134 memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1,
2135 sizeof(*insn) * (insn_cnt - i - cnt));
2136
2137 /* copy substitute insns in place of load instruction */
2138 memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt);
2139
2140 /* adjust branches in the whole program */
2141 adjust_branches(new_prog, i, cnt - 1);
2142
2143 /* keep walking new program and skip insns we just inserted */
2144 env->prog = new_prog;
2145 insn = new_prog->insnsi + i + cnt - 1;
2146 i += cnt - 1;
2147 }
2148
2149 return 0;
2150 }
2151
2152 static void free_states(struct verifier_env *env)
2153 {
2154 struct verifier_state_list *sl, *sln;
2155 int i;
2156
2157 if (!env->explored_states)
2158 return;
2159
2160 for (i = 0; i < env->prog->len; i++) {
2161 sl = env->explored_states[i];
2162
2163 if (sl)
2164 while (sl != STATE_LIST_MARK) {
2165 sln = sl->next;
2166 kfree(sl);
2167 sl = sln;
2168 }
2169 }
2170
2171 kfree(env->explored_states);
2172 }
2173
2174 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
2175 {
2176 char __user *log_ubuf = NULL;
2177 struct verifier_env *env;
2178 int ret = -EINVAL;
2179
2180 if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS)
2181 return -E2BIG;
2182
2183 /* 'struct verifier_env' can be global, but since it's not small,
2184 * allocate/free it every time bpf_check() is called
2185 */
2186 env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL);
2187 if (!env)
2188 return -ENOMEM;
2189
2190 env->prog = *prog;
2191
2192 /* grab the mutex to protect few globals used by verifier */
2193 mutex_lock(&bpf_verifier_lock);
2194
2195 if (attr->log_level || attr->log_buf || attr->log_size) {
2196 /* user requested verbose verifier output
2197 * and supplied buffer to store the verification trace
2198 */
2199 log_level = attr->log_level;
2200 log_ubuf = (char __user *) (unsigned long) attr->log_buf;
2201 log_size = attr->log_size;
2202 log_len = 0;
2203
2204 ret = -EINVAL;
2205 /* log_* values have to be sane */
2206 if (log_size < 128 || log_size > UINT_MAX >> 8 ||
2207 log_level == 0 || log_ubuf == NULL)
2208 goto free_env;
2209
2210 ret = -ENOMEM;
2211 log_buf = vmalloc(log_size);
2212 if (!log_buf)
2213 goto free_env;
2214 } else {
2215 log_level = 0;
2216 }
2217
2218 ret = replace_map_fd_with_map_ptr(env);
2219 if (ret < 0)
2220 goto skip_full_check;
2221
2222 env->explored_states = kcalloc(env->prog->len,
2223 sizeof(struct verifier_state_list *),
2224 GFP_USER);
2225 ret = -ENOMEM;
2226 if (!env->explored_states)
2227 goto skip_full_check;
2228
2229 ret = check_cfg(env);
2230 if (ret < 0)
2231 goto skip_full_check;
2232
2233 env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
2234
2235 ret = do_check(env);
2236
2237 skip_full_check:
2238 while (pop_stack(env, NULL) >= 0);
2239 free_states(env);
2240
2241 if (ret == 0)
2242 /* program is valid, convert *(u32*)(ctx + off) accesses */
2243 ret = convert_ctx_accesses(env);
2244
2245 if (log_level && log_len >= log_size - 1) {
2246 BUG_ON(log_len >= log_size);
2247 /* verifier log exceeded user supplied buffer */
2248 ret = -ENOSPC;
2249 /* fall through to return what was recorded */
2250 }
2251
2252 /* copy verifier log back to user space including trailing zero */
2253 if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) {
2254 ret = -EFAULT;
2255 goto free_log_buf;
2256 }
2257
2258 if (ret == 0 && env->used_map_cnt) {
2259 /* if program passed verifier, update used_maps in bpf_prog_info */
2260 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
2261 sizeof(env->used_maps[0]),
2262 GFP_KERNEL);
2263
2264 if (!env->prog->aux->used_maps) {
2265 ret = -ENOMEM;
2266 goto free_log_buf;
2267 }
2268
2269 memcpy(env->prog->aux->used_maps, env->used_maps,
2270 sizeof(env->used_maps[0]) * env->used_map_cnt);
2271 env->prog->aux->used_map_cnt = env->used_map_cnt;
2272
2273 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
2274 * bpf_ld_imm64 instructions
2275 */
2276 convert_pseudo_ld_imm64(env);
2277 }
2278
2279 free_log_buf:
2280 if (log_level)
2281 vfree(log_buf);
2282 free_env:
2283 if (!env->prog->aux->used_maps)
2284 /* if we didn't copy map pointers into bpf_prog_info, release
2285 * them now. Otherwise free_bpf_prog_info() will release them.
2286 */
2287 release_maps(env);
2288 *prog = env->prog;
2289 kfree(env);
2290 mutex_unlock(&bpf_verifier_lock);
2291 return ret;
2292 }