1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook
3 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/bpf.h>
18 #include <linux/bpf_verifier.h>
19 #include <linux/filter.h>
20 #include <net/netlink.h>
21 #include <linux/file.h>
22 #include <linux/vmalloc.h>
23 #include <linux/stringify.h>
24 #include <linux/bsearch.h>
25 #include <linux/sort.h>
26 #include <linux/perf_event.h>
30 static const struct bpf_verifier_ops
* const bpf_verifier_ops
[] = {
31 #define BPF_PROG_TYPE(_id, _name) \
32 [_id] = & _name ## _verifier_ops,
33 #define BPF_MAP_TYPE(_id, _ops)
34 #include <linux/bpf_types.h>
39 /* bpf_check() is a static code analyzer that walks eBPF program
40 * instruction by instruction and updates register/stack state.
41 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
43 * The first pass is depth-first-search to check that the program is a DAG.
44 * It rejects the following programs:
45 * - larger than BPF_MAXINSNS insns
46 * - if loop is present (detected via back-edge)
47 * - unreachable insns exist (shouldn't be a forest. program = one function)
48 * - out of bounds or malformed jumps
49 * The second pass is all possible path descent from the 1st insn.
50 * Since it's analyzing all pathes through the program, the length of the
51 * analysis is limited to 64k insn, which may be hit even if total number of
52 * insn is less then 4K, but there are too many branches that change stack/regs.
53 * Number of 'branches to be analyzed' is limited to 1k
55 * On entry to each instruction, each register has a type, and the instruction
56 * changes the types of the registers depending on instruction semantics.
57 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60 * All registers are 64-bit.
61 * R0 - return register
62 * R1-R5 argument passing registers
63 * R6-R9 callee saved registers
64 * R10 - frame pointer read-only
66 * At the start of BPF program the register R1 contains a pointer to bpf_context
67 * and has type PTR_TO_CTX.
69 * Verifier tracks arithmetic operations on pointers in case:
70 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
71 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
72 * 1st insn copies R10 (which has FRAME_PTR) type into R1
73 * and 2nd arithmetic instruction is pattern matched to recognize
74 * that it wants to construct a pointer to some element within stack.
75 * So after 2nd insn, the register R1 has type PTR_TO_STACK
76 * (and -20 constant is saved for further stack bounds checking).
77 * Meaning that this reg is a pointer to stack plus known immediate constant.
79 * Most of the time the registers have SCALAR_VALUE type, which
80 * means the register has some value, but it's not a valid pointer.
81 * (like pointer plus pointer becomes SCALAR_VALUE type)
83 * When verifier sees load or store instructions the type of base register
84 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
85 * four pointer types recognized by check_mem_access() function.
87 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
88 * and the range of [ptr, ptr + map's value_size) is accessible.
90 * registers used to pass values to function calls are checked against
91 * function argument constraints.
93 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
94 * It means that the register type passed to this function must be
95 * PTR_TO_STACK and it will be used inside the function as
96 * 'pointer to map element key'
98 * For example the argument constraints for bpf_map_lookup_elem():
99 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
100 * .arg1_type = ARG_CONST_MAP_PTR,
101 * .arg2_type = ARG_PTR_TO_MAP_KEY,
103 * ret_type says that this function returns 'pointer to map elem value or null'
104 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
105 * 2nd argument should be a pointer to stack, which will be used inside
106 * the helper function as a pointer to map element key.
108 * On the kernel side the helper function looks like:
109 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
111 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
112 * void *key = (void *) (unsigned long) r2;
115 * here kernel can access 'key' and 'map' pointers safely, knowing that
116 * [key, key + map->key_size) bytes are valid and were initialized on
117 * the stack of eBPF program.
120 * Corresponding eBPF program may look like:
121 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
122 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
123 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
124 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
125 * here verifier looks at prototype of map_lookup_elem() and sees:
126 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
127 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
130 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
131 * and were initialized prior to this call.
132 * If it's ok, then verifier allows this BPF_CALL insn and looks at
133 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
134 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
135 * returns ether pointer to map value or NULL.
137 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
138 * insn, the register holding that pointer in the true branch changes state to
139 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
140 * branch. See check_cond_jmp_op().
142 * After the call R0 is set to return type of the function and registers R1-R5
143 * are set to NOT_INIT to indicate that they are no longer readable.
145 * The following reference types represent a potential reference to a kernel
146 * resource which, after first being allocated, must be checked and freed by
148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
150 * When the verifier sees a helper call return a reference type, it allocates a
151 * pointer id for the reference and stores it in the current function state.
152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
154 * passes through a NULL-check conditional. For the branch wherein the state is
155 * changed to CONST_IMM, the verifier releases the reference.
157 * For each helper function that allocates a reference, such as
158 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
159 * bpf_sk_release(). When a reference type passes into the release function,
160 * the verifier also releases the reference. If any unchecked or unreleased
161 * reference remains at the end of the program, the verifier rejects it.
164 /* verifier_state + insn_idx are pushed to stack when branch is encountered */
165 struct bpf_verifier_stack_elem
{
166 /* verifer state is 'st'
167 * before processing instruction 'insn_idx'
168 * and after processing instruction 'prev_insn_idx'
170 struct bpf_verifier_state st
;
173 struct bpf_verifier_stack_elem
*next
;
176 #define BPF_COMPLEXITY_LIMIT_INSNS 131072
177 #define BPF_COMPLEXITY_LIMIT_STACK 1024
178 #define BPF_COMPLEXITY_LIMIT_STATES 64
180 #define BPF_MAP_PTR_UNPRIV 1UL
181 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
182 POISON_POINTER_DELTA))
183 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
185 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data
*aux
)
187 return BPF_MAP_PTR(aux
->map_state
) == BPF_MAP_PTR_POISON
;
190 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data
*aux
)
192 return aux
->map_state
& BPF_MAP_PTR_UNPRIV
;
195 static void bpf_map_ptr_store(struct bpf_insn_aux_data
*aux
,
196 const struct bpf_map
*map
, bool unpriv
)
198 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON
& BPF_MAP_PTR_UNPRIV
);
199 unpriv
|= bpf_map_ptr_unpriv(aux
);
200 aux
->map_state
= (unsigned long)map
|
201 (unpriv
? BPF_MAP_PTR_UNPRIV
: 0UL);
204 struct bpf_call_arg_meta
{
205 struct bpf_map
*map_ptr
;
210 s64 msize_smax_value
;
211 u64 msize_umax_value
;
215 static DEFINE_MUTEX(bpf_verifier_lock
);
217 void bpf_verifier_vlog(struct bpf_verifier_log
*log
, const char *fmt
,
222 n
= vscnprintf(log
->kbuf
, BPF_VERIFIER_TMP_LOG_SIZE
, fmt
, args
);
224 WARN_ONCE(n
>= BPF_VERIFIER_TMP_LOG_SIZE
- 1,
225 "verifier log line truncated - local buffer too short\n");
227 n
= min(log
->len_total
- log
->len_used
- 1, n
);
230 if (!copy_to_user(log
->ubuf
+ log
->len_used
, log
->kbuf
, n
+ 1))
236 /* log_level controls verbosity level of eBPF verifier.
237 * bpf_verifier_log_write() is used to dump the verification trace to the log,
238 * so the user can figure out what's wrong with the program
240 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env
*env
,
241 const char *fmt
, ...)
245 if (!bpf_verifier_log_needed(&env
->log
))
249 bpf_verifier_vlog(&env
->log
, fmt
, args
);
252 EXPORT_SYMBOL_GPL(bpf_verifier_log_write
);
254 __printf(2, 3) static void verbose(void *private_data
, const char *fmt
, ...)
256 struct bpf_verifier_env
*env
= private_data
;
259 if (!bpf_verifier_log_needed(&env
->log
))
263 bpf_verifier_vlog(&env
->log
, fmt
, args
);
267 static bool type_is_pkt_pointer(enum bpf_reg_type type
)
269 return type
== PTR_TO_PACKET
||
270 type
== PTR_TO_PACKET_META
;
273 static bool reg_type_may_be_null(enum bpf_reg_type type
)
275 return type
== PTR_TO_MAP_VALUE_OR_NULL
||
276 type
== PTR_TO_SOCKET_OR_NULL
;
279 static bool type_is_refcounted(enum bpf_reg_type type
)
281 return type
== PTR_TO_SOCKET
;
284 static bool type_is_refcounted_or_null(enum bpf_reg_type type
)
286 return type
== PTR_TO_SOCKET
|| type
== PTR_TO_SOCKET_OR_NULL
;
289 static bool reg_is_refcounted(const struct bpf_reg_state
*reg
)
291 return type_is_refcounted(reg
->type
);
294 static bool reg_is_refcounted_or_null(const struct bpf_reg_state
*reg
)
296 return type_is_refcounted_or_null(reg
->type
);
299 static bool arg_type_is_refcounted(enum bpf_arg_type type
)
301 return type
== ARG_PTR_TO_SOCKET
;
304 /* Determine whether the function releases some resources allocated by another
305 * function call. The first reference type argument will be assumed to be
306 * released by release_reference().
308 static bool is_release_function(enum bpf_func_id func_id
)
310 return func_id
== BPF_FUNC_sk_release
;
313 /* string representation of 'enum bpf_reg_type' */
314 static const char * const reg_type_str
[] = {
316 [SCALAR_VALUE
] = "inv",
317 [PTR_TO_CTX
] = "ctx",
318 [CONST_PTR_TO_MAP
] = "map_ptr",
319 [PTR_TO_MAP_VALUE
] = "map_value",
320 [PTR_TO_MAP_VALUE_OR_NULL
] = "map_value_or_null",
321 [PTR_TO_STACK
] = "fp",
322 [PTR_TO_PACKET
] = "pkt",
323 [PTR_TO_PACKET_META
] = "pkt_meta",
324 [PTR_TO_PACKET_END
] = "pkt_end",
325 [PTR_TO_FLOW_KEYS
] = "flow_keys",
326 [PTR_TO_SOCKET
] = "sock",
327 [PTR_TO_SOCKET_OR_NULL
] = "sock_or_null",
330 static char slot_type_char
[] = {
331 [STACK_INVALID
] = '?',
337 static void print_liveness(struct bpf_verifier_env
*env
,
338 enum bpf_reg_liveness live
)
340 if (live
& (REG_LIVE_READ
| REG_LIVE_WRITTEN
))
342 if (live
& REG_LIVE_READ
)
344 if (live
& REG_LIVE_WRITTEN
)
348 static struct bpf_func_state
*func(struct bpf_verifier_env
*env
,
349 const struct bpf_reg_state
*reg
)
351 struct bpf_verifier_state
*cur
= env
->cur_state
;
353 return cur
->frame
[reg
->frameno
];
356 static void print_verifier_state(struct bpf_verifier_env
*env
,
357 const struct bpf_func_state
*state
)
359 const struct bpf_reg_state
*reg
;
364 verbose(env
, " frame%d:", state
->frameno
);
365 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
366 reg
= &state
->regs
[i
];
370 verbose(env
, " R%d", i
);
371 print_liveness(env
, reg
->live
);
372 verbose(env
, "=%s", reg_type_str
[t
]);
373 if ((t
== SCALAR_VALUE
|| t
== PTR_TO_STACK
) &&
374 tnum_is_const(reg
->var_off
)) {
375 /* reg->off should be 0 for SCALAR_VALUE */
376 verbose(env
, "%lld", reg
->var_off
.value
+ reg
->off
);
377 if (t
== PTR_TO_STACK
)
378 verbose(env
, ",call_%d", func(env
, reg
)->callsite
);
380 verbose(env
, "(id=%d", reg
->id
);
381 if (t
!= SCALAR_VALUE
)
382 verbose(env
, ",off=%d", reg
->off
);
383 if (type_is_pkt_pointer(t
))
384 verbose(env
, ",r=%d", reg
->range
);
385 else if (t
== CONST_PTR_TO_MAP
||
386 t
== PTR_TO_MAP_VALUE
||
387 t
== PTR_TO_MAP_VALUE_OR_NULL
)
388 verbose(env
, ",ks=%d,vs=%d",
389 reg
->map_ptr
->key_size
,
390 reg
->map_ptr
->value_size
);
391 if (tnum_is_const(reg
->var_off
)) {
392 /* Typically an immediate SCALAR_VALUE, but
393 * could be a pointer whose offset is too big
396 verbose(env
, ",imm=%llx", reg
->var_off
.value
);
398 if (reg
->smin_value
!= reg
->umin_value
&&
399 reg
->smin_value
!= S64_MIN
)
400 verbose(env
, ",smin_value=%lld",
401 (long long)reg
->smin_value
);
402 if (reg
->smax_value
!= reg
->umax_value
&&
403 reg
->smax_value
!= S64_MAX
)
404 verbose(env
, ",smax_value=%lld",
405 (long long)reg
->smax_value
);
406 if (reg
->umin_value
!= 0)
407 verbose(env
, ",umin_value=%llu",
408 (unsigned long long)reg
->umin_value
);
409 if (reg
->umax_value
!= U64_MAX
)
410 verbose(env
, ",umax_value=%llu",
411 (unsigned long long)reg
->umax_value
);
412 if (!tnum_is_unknown(reg
->var_off
)) {
415 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
416 verbose(env
, ",var_off=%s", tn_buf
);
422 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
423 char types_buf
[BPF_REG_SIZE
+ 1];
427 for (j
= 0; j
< BPF_REG_SIZE
; j
++) {
428 if (state
->stack
[i
].slot_type
[j
] != STACK_INVALID
)
430 types_buf
[j
] = slot_type_char
[
431 state
->stack
[i
].slot_type
[j
]];
433 types_buf
[BPF_REG_SIZE
] = 0;
436 verbose(env
, " fp%d", (-i
- 1) * BPF_REG_SIZE
);
437 print_liveness(env
, state
->stack
[i
].spilled_ptr
.live
);
438 if (state
->stack
[i
].slot_type
[0] == STACK_SPILL
)
440 reg_type_str
[state
->stack
[i
].spilled_ptr
.type
]);
442 verbose(env
, "=%s", types_buf
);
444 if (state
->acquired_refs
&& state
->refs
[0].id
) {
445 verbose(env
, " refs=%d", state
->refs
[0].id
);
446 for (i
= 1; i
< state
->acquired_refs
; i
++)
447 if (state
->refs
[i
].id
)
448 verbose(env
, ",%d", state
->refs
[i
].id
);
453 #define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
454 static int copy_##NAME##_state(struct bpf_func_state *dst, \
455 const struct bpf_func_state *src) \
459 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
460 /* internal bug, make state invalid to reject the program */ \
461 memset(dst, 0, sizeof(*dst)); \
464 memcpy(dst->FIELD, src->FIELD, \
465 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
468 /* copy_reference_state() */
469 COPY_STATE_FN(reference
, acquired_refs
, refs
, 1)
470 /* copy_stack_state() */
471 COPY_STATE_FN(stack
, allocated_stack
, stack
, BPF_REG_SIZE
)
474 #define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
475 static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
478 u32 old_size = state->COUNT; \
479 struct bpf_##NAME##_state *new_##FIELD; \
480 int slot = size / SIZE; \
482 if (size <= old_size || !size) { \
485 state->COUNT = slot * SIZE; \
486 if (!size && old_size) { \
487 kfree(state->FIELD); \
488 state->FIELD = NULL; \
492 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
498 memcpy(new_##FIELD, state->FIELD, \
499 sizeof(*new_##FIELD) * (old_size / SIZE)); \
500 memset(new_##FIELD + old_size / SIZE, 0, \
501 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
503 state->COUNT = slot * SIZE; \
504 kfree(state->FIELD); \
505 state->FIELD = new_##FIELD; \
508 /* realloc_reference_state() */
509 REALLOC_STATE_FN(reference
, acquired_refs
, refs
, 1)
510 /* realloc_stack_state() */
511 REALLOC_STATE_FN(stack
, allocated_stack
, stack
, BPF_REG_SIZE
)
512 #undef REALLOC_STATE_FN
514 /* do_check() starts with zero-sized stack in struct bpf_verifier_state to
515 * make it consume minimal amount of memory. check_stack_write() access from
516 * the program calls into realloc_func_state() to grow the stack size.
517 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
518 * which realloc_stack_state() copies over. It points to previous
519 * bpf_verifier_state which is never reallocated.
521 static int realloc_func_state(struct bpf_func_state
*state
, int stack_size
,
522 int refs_size
, bool copy_old
)
524 int err
= realloc_reference_state(state
, refs_size
, copy_old
);
527 return realloc_stack_state(state
, stack_size
, copy_old
);
530 /* Acquire a pointer id from the env and update the state->refs to include
531 * this new pointer reference.
532 * On success, returns a valid pointer id to associate with the register
533 * On failure, returns a negative errno.
535 static int acquire_reference_state(struct bpf_verifier_env
*env
, int insn_idx
)
537 struct bpf_func_state
*state
= cur_func(env
);
538 int new_ofs
= state
->acquired_refs
;
541 err
= realloc_reference_state(state
, state
->acquired_refs
+ 1, true);
545 state
->refs
[new_ofs
].id
= id
;
546 state
->refs
[new_ofs
].insn_idx
= insn_idx
;
551 /* release function corresponding to acquire_reference_state(). Idempotent. */
552 static int __release_reference_state(struct bpf_func_state
*state
, int ptr_id
)
559 last_idx
= state
->acquired_refs
- 1;
560 for (i
= 0; i
< state
->acquired_refs
; i
++) {
561 if (state
->refs
[i
].id
== ptr_id
) {
562 if (last_idx
&& i
!= last_idx
)
563 memcpy(&state
->refs
[i
], &state
->refs
[last_idx
],
564 sizeof(*state
->refs
));
565 memset(&state
->refs
[last_idx
], 0, sizeof(*state
->refs
));
566 state
->acquired_refs
--;
573 /* variation on the above for cases where we expect that there must be an
574 * outstanding reference for the specified ptr_id.
576 static int release_reference_state(struct bpf_verifier_env
*env
, int ptr_id
)
578 struct bpf_func_state
*state
= cur_func(env
);
581 err
= __release_reference_state(state
, ptr_id
);
582 if (WARN_ON_ONCE(err
!= 0))
583 verbose(env
, "verifier internal error: can't release reference\n");
587 static int transfer_reference_state(struct bpf_func_state
*dst
,
588 struct bpf_func_state
*src
)
590 int err
= realloc_reference_state(dst
, src
->acquired_refs
, false);
593 err
= copy_reference_state(dst
, src
);
599 static void free_func_state(struct bpf_func_state
*state
)
608 static void free_verifier_state(struct bpf_verifier_state
*state
,
613 for (i
= 0; i
<= state
->curframe
; i
++) {
614 free_func_state(state
->frame
[i
]);
615 state
->frame
[i
] = NULL
;
621 /* copy verifier state from src to dst growing dst stack space
622 * when necessary to accommodate larger src stack
624 static int copy_func_state(struct bpf_func_state
*dst
,
625 const struct bpf_func_state
*src
)
629 err
= realloc_func_state(dst
, src
->allocated_stack
, src
->acquired_refs
,
633 memcpy(dst
, src
, offsetof(struct bpf_func_state
, acquired_refs
));
634 err
= copy_reference_state(dst
, src
);
637 return copy_stack_state(dst
, src
);
640 static int copy_verifier_state(struct bpf_verifier_state
*dst_state
,
641 const struct bpf_verifier_state
*src
)
643 struct bpf_func_state
*dst
;
646 /* if dst has more stack frames then src frame, free them */
647 for (i
= src
->curframe
+ 1; i
<= dst_state
->curframe
; i
++) {
648 free_func_state(dst_state
->frame
[i
]);
649 dst_state
->frame
[i
] = NULL
;
651 dst_state
->curframe
= src
->curframe
;
652 for (i
= 0; i
<= src
->curframe
; i
++) {
653 dst
= dst_state
->frame
[i
];
655 dst
= kzalloc(sizeof(*dst
), GFP_KERNEL
);
658 dst_state
->frame
[i
] = dst
;
660 err
= copy_func_state(dst
, src
->frame
[i
]);
667 static int pop_stack(struct bpf_verifier_env
*env
, int *prev_insn_idx
,
670 struct bpf_verifier_state
*cur
= env
->cur_state
;
671 struct bpf_verifier_stack_elem
*elem
, *head
= env
->head
;
674 if (env
->head
== NULL
)
678 err
= copy_verifier_state(cur
, &head
->st
);
683 *insn_idx
= head
->insn_idx
;
685 *prev_insn_idx
= head
->prev_insn_idx
;
687 free_verifier_state(&head
->st
, false);
694 static struct bpf_verifier_state
*push_stack(struct bpf_verifier_env
*env
,
695 int insn_idx
, int prev_insn_idx
)
697 struct bpf_verifier_state
*cur
= env
->cur_state
;
698 struct bpf_verifier_stack_elem
*elem
;
701 elem
= kzalloc(sizeof(struct bpf_verifier_stack_elem
), GFP_KERNEL
);
705 elem
->insn_idx
= insn_idx
;
706 elem
->prev_insn_idx
= prev_insn_idx
;
707 elem
->next
= env
->head
;
710 err
= copy_verifier_state(&elem
->st
, cur
);
713 if (env
->stack_size
> BPF_COMPLEXITY_LIMIT_STACK
) {
714 verbose(env
, "BPF program is too complex\n");
719 free_verifier_state(env
->cur_state
, true);
720 env
->cur_state
= NULL
;
721 /* pop all elements and return */
722 while (!pop_stack(env
, NULL
, NULL
));
726 #define CALLER_SAVED_REGS 6
727 static const int caller_saved
[CALLER_SAVED_REGS
] = {
728 BPF_REG_0
, BPF_REG_1
, BPF_REG_2
, BPF_REG_3
, BPF_REG_4
, BPF_REG_5
731 static void __mark_reg_not_init(struct bpf_reg_state
*reg
);
733 /* Mark the unknown part of a register (variable offset or scalar value) as
734 * known to have the value @imm.
736 static void __mark_reg_known(struct bpf_reg_state
*reg
, u64 imm
)
738 /* Clear id, off, and union(map_ptr, range) */
739 memset(((u8
*)reg
) + sizeof(reg
->type
), 0,
740 offsetof(struct bpf_reg_state
, var_off
) - sizeof(reg
->type
));
741 reg
->var_off
= tnum_const(imm
);
742 reg
->smin_value
= (s64
)imm
;
743 reg
->smax_value
= (s64
)imm
;
744 reg
->umin_value
= imm
;
745 reg
->umax_value
= imm
;
748 /* Mark the 'variable offset' part of a register as zero. This should be
749 * used only on registers holding a pointer type.
751 static void __mark_reg_known_zero(struct bpf_reg_state
*reg
)
753 __mark_reg_known(reg
, 0);
756 static void __mark_reg_const_zero(struct bpf_reg_state
*reg
)
758 __mark_reg_known(reg
, 0);
759 reg
->type
= SCALAR_VALUE
;
762 static void mark_reg_known_zero(struct bpf_verifier_env
*env
,
763 struct bpf_reg_state
*regs
, u32 regno
)
765 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
766 verbose(env
, "mark_reg_known_zero(regs, %u)\n", regno
);
767 /* Something bad happened, let's kill all regs */
768 for (regno
= 0; regno
< MAX_BPF_REG
; regno
++)
769 __mark_reg_not_init(regs
+ regno
);
772 __mark_reg_known_zero(regs
+ regno
);
775 static bool reg_is_pkt_pointer(const struct bpf_reg_state
*reg
)
777 return type_is_pkt_pointer(reg
->type
);
780 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state
*reg
)
782 return reg_is_pkt_pointer(reg
) ||
783 reg
->type
== PTR_TO_PACKET_END
;
786 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
787 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state
*reg
,
788 enum bpf_reg_type which
)
790 /* The register can already have a range from prior markings.
791 * This is fine as long as it hasn't been advanced from its
794 return reg
->type
== which
&&
797 tnum_equals_const(reg
->var_off
, 0);
800 /* Attempts to improve min/max values based on var_off information */
801 static void __update_reg_bounds(struct bpf_reg_state
*reg
)
803 /* min signed is max(sign bit) | min(other bits) */
804 reg
->smin_value
= max_t(s64
, reg
->smin_value
,
805 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MIN
));
806 /* max signed is min(sign bit) | max(other bits) */
807 reg
->smax_value
= min_t(s64
, reg
->smax_value
,
808 reg
->var_off
.value
| (reg
->var_off
.mask
& S64_MAX
));
809 reg
->umin_value
= max(reg
->umin_value
, reg
->var_off
.value
);
810 reg
->umax_value
= min(reg
->umax_value
,
811 reg
->var_off
.value
| reg
->var_off
.mask
);
814 /* Uses signed min/max values to inform unsigned, and vice-versa */
815 static void __reg_deduce_bounds(struct bpf_reg_state
*reg
)
817 /* Learn sign from signed bounds.
818 * If we cannot cross the sign boundary, then signed and unsigned bounds
819 * are the same, so combine. This works even in the negative case, e.g.
820 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
822 if (reg
->smin_value
>= 0 || reg
->smax_value
< 0) {
823 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
825 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
829 /* Learn sign from unsigned bounds. Signed bounds cross the sign
830 * boundary, so we must be careful.
832 if ((s64
)reg
->umax_value
>= 0) {
833 /* Positive. We can't learn anything from the smin, but smax
834 * is positive, hence safe.
836 reg
->smin_value
= reg
->umin_value
;
837 reg
->smax_value
= reg
->umax_value
= min_t(u64
, reg
->smax_value
,
839 } else if ((s64
)reg
->umin_value
< 0) {
840 /* Negative. We can't learn anything from the smax, but smin
841 * is negative, hence safe.
843 reg
->smin_value
= reg
->umin_value
= max_t(u64
, reg
->smin_value
,
845 reg
->smax_value
= reg
->umax_value
;
849 /* Attempts to improve var_off based on unsigned min/max information */
850 static void __reg_bound_offset(struct bpf_reg_state
*reg
)
852 reg
->var_off
= tnum_intersect(reg
->var_off
,
853 tnum_range(reg
->umin_value
,
857 /* Reset the min/max bounds of a register */
858 static void __mark_reg_unbounded(struct bpf_reg_state
*reg
)
860 reg
->smin_value
= S64_MIN
;
861 reg
->smax_value
= S64_MAX
;
863 reg
->umax_value
= U64_MAX
;
866 /* Mark a register as having a completely unknown (scalar) value. */
867 static void __mark_reg_unknown(struct bpf_reg_state
*reg
)
870 * Clear type, id, off, and union(map_ptr, range) and
871 * padding between 'type' and union
873 memset(reg
, 0, offsetof(struct bpf_reg_state
, var_off
));
874 reg
->type
= SCALAR_VALUE
;
875 reg
->var_off
= tnum_unknown
;
877 __mark_reg_unbounded(reg
);
880 static void mark_reg_unknown(struct bpf_verifier_env
*env
,
881 struct bpf_reg_state
*regs
, u32 regno
)
883 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
884 verbose(env
, "mark_reg_unknown(regs, %u)\n", regno
);
885 /* Something bad happened, let's kill all regs except FP */
886 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
887 __mark_reg_not_init(regs
+ regno
);
890 __mark_reg_unknown(regs
+ regno
);
893 static void __mark_reg_not_init(struct bpf_reg_state
*reg
)
895 __mark_reg_unknown(reg
);
896 reg
->type
= NOT_INIT
;
899 static void mark_reg_not_init(struct bpf_verifier_env
*env
,
900 struct bpf_reg_state
*regs
, u32 regno
)
902 if (WARN_ON(regno
>= MAX_BPF_REG
)) {
903 verbose(env
, "mark_reg_not_init(regs, %u)\n", regno
);
904 /* Something bad happened, let's kill all regs except FP */
905 for (regno
= 0; regno
< BPF_REG_FP
; regno
++)
906 __mark_reg_not_init(regs
+ regno
);
909 __mark_reg_not_init(regs
+ regno
);
912 static void init_reg_state(struct bpf_verifier_env
*env
,
913 struct bpf_func_state
*state
)
915 struct bpf_reg_state
*regs
= state
->regs
;
918 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
919 mark_reg_not_init(env
, regs
, i
);
920 regs
[i
].live
= REG_LIVE_NONE
;
921 regs
[i
].parent
= NULL
;
925 regs
[BPF_REG_FP
].type
= PTR_TO_STACK
;
926 mark_reg_known_zero(env
, regs
, BPF_REG_FP
);
927 regs
[BPF_REG_FP
].frameno
= state
->frameno
;
929 /* 1st arg to a function */
930 regs
[BPF_REG_1
].type
= PTR_TO_CTX
;
931 mark_reg_known_zero(env
, regs
, BPF_REG_1
);
934 #define BPF_MAIN_FUNC (-1)
935 static void init_func_state(struct bpf_verifier_env
*env
,
936 struct bpf_func_state
*state
,
937 int callsite
, int frameno
, int subprogno
)
939 state
->callsite
= callsite
;
940 state
->frameno
= frameno
;
941 state
->subprogno
= subprogno
;
942 init_reg_state(env
, state
);
946 SRC_OP
, /* register is used as source operand */
947 DST_OP
, /* register is used as destination operand */
948 DST_OP_NO_MARK
/* same as above, check only, don't mark */
951 static int cmp_subprogs(const void *a
, const void *b
)
953 return ((struct bpf_subprog_info
*)a
)->start
-
954 ((struct bpf_subprog_info
*)b
)->start
;
957 static int find_subprog(struct bpf_verifier_env
*env
, int off
)
959 struct bpf_subprog_info
*p
;
961 p
= bsearch(&off
, env
->subprog_info
, env
->subprog_cnt
,
962 sizeof(env
->subprog_info
[0]), cmp_subprogs
);
965 return p
- env
->subprog_info
;
969 static int add_subprog(struct bpf_verifier_env
*env
, int off
)
971 int insn_cnt
= env
->prog
->len
;
974 if (off
>= insn_cnt
|| off
< 0) {
975 verbose(env
, "call to invalid destination\n");
978 ret
= find_subprog(env
, off
);
981 if (env
->subprog_cnt
>= BPF_MAX_SUBPROGS
) {
982 verbose(env
, "too many subprograms\n");
985 env
->subprog_info
[env
->subprog_cnt
++].start
= off
;
986 sort(env
->subprog_info
, env
->subprog_cnt
,
987 sizeof(env
->subprog_info
[0]), cmp_subprogs
, NULL
);
991 static int check_subprogs(struct bpf_verifier_env
*env
)
993 int i
, ret
, subprog_start
, subprog_end
, off
, cur_subprog
= 0;
994 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
995 struct bpf_insn
*insn
= env
->prog
->insnsi
;
996 int insn_cnt
= env
->prog
->len
;
998 /* Add entry function. */
999 ret
= add_subprog(env
, 0);
1003 /* determine subprog starts. The end is one before the next starts */
1004 for (i
= 0; i
< insn_cnt
; i
++) {
1005 if (insn
[i
].code
!= (BPF_JMP
| BPF_CALL
))
1007 if (insn
[i
].src_reg
!= BPF_PSEUDO_CALL
)
1009 if (!env
->allow_ptr_leaks
) {
1010 verbose(env
, "function calls to other bpf functions are allowed for root only\n");
1013 ret
= add_subprog(env
, i
+ insn
[i
].imm
+ 1);
1018 /* Add a fake 'exit' subprog which could simplify subprog iteration
1019 * logic. 'subprog_cnt' should not be increased.
1021 subprog
[env
->subprog_cnt
].start
= insn_cnt
;
1023 if (env
->log
.level
> 1)
1024 for (i
= 0; i
< env
->subprog_cnt
; i
++)
1025 verbose(env
, "func#%d @%d\n", i
, subprog
[i
].start
);
1027 /* now check that all jumps are within the same subprog */
1028 subprog_start
= subprog
[cur_subprog
].start
;
1029 subprog_end
= subprog
[cur_subprog
+ 1].start
;
1030 for (i
= 0; i
< insn_cnt
; i
++) {
1031 u8 code
= insn
[i
].code
;
1033 if (BPF_CLASS(code
) != BPF_JMP
)
1035 if (BPF_OP(code
) == BPF_EXIT
|| BPF_OP(code
) == BPF_CALL
)
1037 off
= i
+ insn
[i
].off
+ 1;
1038 if (off
< subprog_start
|| off
>= subprog_end
) {
1039 verbose(env
, "jump out of range from insn %d to %d\n", i
, off
);
1043 if (i
== subprog_end
- 1) {
1044 /* to avoid fall-through from one subprog into another
1045 * the last insn of the subprog should be either exit
1046 * or unconditional jump back
1048 if (code
!= (BPF_JMP
| BPF_EXIT
) &&
1049 code
!= (BPF_JMP
| BPF_JA
)) {
1050 verbose(env
, "last insn is not an exit or jmp\n");
1053 subprog_start
= subprog_end
;
1055 if (cur_subprog
< env
->subprog_cnt
)
1056 subprog_end
= subprog
[cur_subprog
+ 1].start
;
1062 /* Parentage chain of this register (or stack slot) should take care of all
1063 * issues like callee-saved registers, stack slot allocation time, etc.
1065 static int mark_reg_read(struct bpf_verifier_env
*env
,
1066 const struct bpf_reg_state
*state
,
1067 struct bpf_reg_state
*parent
)
1069 bool writes
= parent
== state
->parent
; /* Observe write marks */
1072 /* if read wasn't screened by an earlier write ... */
1073 if (writes
&& state
->live
& REG_LIVE_WRITTEN
)
1075 /* ... then we depend on parent's value */
1076 parent
->live
|= REG_LIVE_READ
;
1078 parent
= state
->parent
;
1084 static int check_reg_arg(struct bpf_verifier_env
*env
, u32 regno
,
1085 enum reg_arg_type t
)
1087 struct bpf_verifier_state
*vstate
= env
->cur_state
;
1088 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
1089 struct bpf_reg_state
*regs
= state
->regs
;
1091 if (regno
>= MAX_BPF_REG
) {
1092 verbose(env
, "R%d is invalid\n", regno
);
1097 /* check whether register used as source operand can be read */
1098 if (regs
[regno
].type
== NOT_INIT
) {
1099 verbose(env
, "R%d !read_ok\n", regno
);
1102 /* We don't need to worry about FP liveness because it's read-only */
1103 if (regno
!= BPF_REG_FP
)
1104 return mark_reg_read(env
, ®s
[regno
],
1105 regs
[regno
].parent
);
1107 /* check whether register used as dest operand can be written to */
1108 if (regno
== BPF_REG_FP
) {
1109 verbose(env
, "frame pointer is read only\n");
1112 regs
[regno
].live
|= REG_LIVE_WRITTEN
;
1114 mark_reg_unknown(env
, regs
, regno
);
1119 static bool is_spillable_regtype(enum bpf_reg_type type
)
1122 case PTR_TO_MAP_VALUE
:
1123 case PTR_TO_MAP_VALUE_OR_NULL
:
1127 case PTR_TO_PACKET_META
:
1128 case PTR_TO_PACKET_END
:
1129 case PTR_TO_FLOW_KEYS
:
1130 case CONST_PTR_TO_MAP
:
1132 case PTR_TO_SOCKET_OR_NULL
:
1139 /* Does this register contain a constant zero? */
1140 static bool register_is_null(struct bpf_reg_state
*reg
)
1142 return reg
->type
== SCALAR_VALUE
&& tnum_equals_const(reg
->var_off
, 0);
1145 /* check_stack_read/write functions track spill/fill of registers,
1146 * stack boundary and alignment are checked in check_mem_access()
1148 static int check_stack_write(struct bpf_verifier_env
*env
,
1149 struct bpf_func_state
*state
, /* func where register points to */
1150 int off
, int size
, int value_regno
, int insn_idx
)
1152 struct bpf_func_state
*cur
; /* state of the current function */
1153 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
, err
;
1154 enum bpf_reg_type type
;
1156 err
= realloc_func_state(state
, round_up(slot
+ 1, BPF_REG_SIZE
),
1157 state
->acquired_refs
, true);
1160 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1161 * so it's aligned access and [off, off + size) are within stack limits
1163 if (!env
->allow_ptr_leaks
&&
1164 state
->stack
[spi
].slot_type
[0] == STACK_SPILL
&&
1165 size
!= BPF_REG_SIZE
) {
1166 verbose(env
, "attempt to corrupt spilled pointer on stack\n");
1170 cur
= env
->cur_state
->frame
[env
->cur_state
->curframe
];
1171 if (value_regno
>= 0 &&
1172 is_spillable_regtype((type
= cur
->regs
[value_regno
].type
))) {
1174 /* register containing pointer is being spilled into stack */
1175 if (size
!= BPF_REG_SIZE
) {
1176 verbose(env
, "invalid size of register spill\n");
1180 if (state
!= cur
&& type
== PTR_TO_STACK
) {
1181 verbose(env
, "cannot spill pointers to stack into stack frame of the caller\n");
1185 /* save register state */
1186 state
->stack
[spi
].spilled_ptr
= cur
->regs
[value_regno
];
1187 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
1189 for (i
= 0; i
< BPF_REG_SIZE
; i
++) {
1190 if (state
->stack
[spi
].slot_type
[i
] == STACK_MISC
&&
1191 !env
->allow_ptr_leaks
) {
1192 int *poff
= &env
->insn_aux_data
[insn_idx
].sanitize_stack_off
;
1193 int soff
= (-spi
- 1) * BPF_REG_SIZE
;
1195 /* detected reuse of integer stack slot with a pointer
1196 * which means either llvm is reusing stack slot or
1197 * an attacker is trying to exploit CVE-2018-3639
1198 * (speculative store bypass)
1199 * Have to sanitize that slot with preemptive
1202 if (*poff
&& *poff
!= soff
) {
1203 /* disallow programs where single insn stores
1204 * into two different stack slots, since verifier
1205 * cannot sanitize them
1208 "insn %d cannot access two stack slots fp%d and fp%d",
1209 insn_idx
, *poff
, soff
);
1214 state
->stack
[spi
].slot_type
[i
] = STACK_SPILL
;
1217 u8 type
= STACK_MISC
;
1219 /* regular write of data into stack destroys any spilled ptr */
1220 state
->stack
[spi
].spilled_ptr
.type
= NOT_INIT
;
1222 /* only mark the slot as written if all 8 bytes were written
1223 * otherwise read propagation may incorrectly stop too soon
1224 * when stack slots are partially written.
1225 * This heuristic means that read propagation will be
1226 * conservative, since it will add reg_live_read marks
1227 * to stack slots all the way to first state when programs
1228 * writes+reads less than 8 bytes
1230 if (size
== BPF_REG_SIZE
)
1231 state
->stack
[spi
].spilled_ptr
.live
|= REG_LIVE_WRITTEN
;
1233 /* when we zero initialize stack slots mark them as such */
1234 if (value_regno
>= 0 &&
1235 register_is_null(&cur
->regs
[value_regno
]))
1238 for (i
= 0; i
< size
; i
++)
1239 state
->stack
[spi
].slot_type
[(slot
- i
) % BPF_REG_SIZE
] =
1245 static int check_stack_read(struct bpf_verifier_env
*env
,
1246 struct bpf_func_state
*reg_state
/* func where register points to */,
1247 int off
, int size
, int value_regno
)
1249 struct bpf_verifier_state
*vstate
= env
->cur_state
;
1250 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
1251 int i
, slot
= -off
- 1, spi
= slot
/ BPF_REG_SIZE
;
1254 if (reg_state
->allocated_stack
<= slot
) {
1255 verbose(env
, "invalid read from stack off %d+0 size %d\n",
1259 stype
= reg_state
->stack
[spi
].slot_type
;
1261 if (stype
[0] == STACK_SPILL
) {
1262 if (size
!= BPF_REG_SIZE
) {
1263 verbose(env
, "invalid size of register spill\n");
1266 for (i
= 1; i
< BPF_REG_SIZE
; i
++) {
1267 if (stype
[(slot
- i
) % BPF_REG_SIZE
] != STACK_SPILL
) {
1268 verbose(env
, "corrupted spill memory\n");
1273 if (value_regno
>= 0) {
1274 /* restore register state from stack */
1275 state
->regs
[value_regno
] = reg_state
->stack
[spi
].spilled_ptr
;
1276 /* mark reg as written since spilled pointer state likely
1277 * has its liveness marks cleared by is_state_visited()
1278 * which resets stack/reg liveness for state transitions
1280 state
->regs
[value_regno
].live
|= REG_LIVE_WRITTEN
;
1282 mark_reg_read(env
, ®_state
->stack
[spi
].spilled_ptr
,
1283 reg_state
->stack
[spi
].spilled_ptr
.parent
);
1288 for (i
= 0; i
< size
; i
++) {
1289 if (stype
[(slot
- i
) % BPF_REG_SIZE
] == STACK_MISC
)
1291 if (stype
[(slot
- i
) % BPF_REG_SIZE
] == STACK_ZERO
) {
1295 verbose(env
, "invalid read from stack off %d+%d size %d\n",
1299 mark_reg_read(env
, ®_state
->stack
[spi
].spilled_ptr
,
1300 reg_state
->stack
[spi
].spilled_ptr
.parent
);
1301 if (value_regno
>= 0) {
1302 if (zeros
== size
) {
1303 /* any size read into register is zero extended,
1304 * so the whole register == const_zero
1306 __mark_reg_const_zero(&state
->regs
[value_regno
]);
1308 /* have read misc data from the stack */
1309 mark_reg_unknown(env
, state
->regs
, value_regno
);
1311 state
->regs
[value_regno
].live
|= REG_LIVE_WRITTEN
;
1317 /* check read/write into map element returned by bpf_map_lookup_elem() */
1318 static int __check_map_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
1319 int size
, bool zero_size_allowed
)
1321 struct bpf_reg_state
*regs
= cur_regs(env
);
1322 struct bpf_map
*map
= regs
[regno
].map_ptr
;
1324 if (off
< 0 || size
< 0 || (size
== 0 && !zero_size_allowed
) ||
1325 off
+ size
> map
->value_size
) {
1326 verbose(env
, "invalid access to map value, value_size=%d off=%d size=%d\n",
1327 map
->value_size
, off
, size
);
1333 /* check read/write into a map element with possible variable offset */
1334 static int check_map_access(struct bpf_verifier_env
*env
, u32 regno
,
1335 int off
, int size
, bool zero_size_allowed
)
1337 struct bpf_verifier_state
*vstate
= env
->cur_state
;
1338 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
1339 struct bpf_reg_state
*reg
= &state
->regs
[regno
];
1342 /* We may have adjusted the register to this map value, so we
1343 * need to try adding each of min_value and max_value to off
1344 * to make sure our theoretical access will be safe.
1347 print_verifier_state(env
, state
);
1348 /* The minimum value is only important with signed
1349 * comparisons where we can't assume the floor of a
1350 * value is 0. If we are using signed variables for our
1351 * index'es we need to make sure that whatever we use
1352 * will have a set floor within our range.
1354 if (reg
->smin_value
< 0) {
1355 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1359 err
= __check_map_access(env
, regno
, reg
->smin_value
+ off
, size
,
1362 verbose(env
, "R%d min value is outside of the array range\n",
1367 /* If we haven't set a max value then we need to bail since we can't be
1368 * sure we won't do bad things.
1369 * If reg->umax_value + off could overflow, treat that as unbounded too.
1371 if (reg
->umax_value
>= BPF_MAX_VAR_OFF
) {
1372 verbose(env
, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
1376 err
= __check_map_access(env
, regno
, reg
->umax_value
+ off
, size
,
1379 verbose(env
, "R%d max value is outside of the array range\n",
1384 #define MAX_PACKET_OFF 0xffff
1386 static bool may_access_direct_pkt_data(struct bpf_verifier_env
*env
,
1387 const struct bpf_call_arg_meta
*meta
,
1388 enum bpf_access_type t
)
1390 switch (env
->prog
->type
) {
1391 /* Program types only with direct read access go here! */
1392 case BPF_PROG_TYPE_LWT_IN
:
1393 case BPF_PROG_TYPE_LWT_OUT
:
1394 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
1395 case BPF_PROG_TYPE_SK_REUSEPORT
:
1396 case BPF_PROG_TYPE_FLOW_DISSECTOR
:
1397 case BPF_PROG_TYPE_CGROUP_SKB
:
1402 /* Program types with direct read + write access go here! */
1403 case BPF_PROG_TYPE_SCHED_CLS
:
1404 case BPF_PROG_TYPE_SCHED_ACT
:
1405 case BPF_PROG_TYPE_XDP
:
1406 case BPF_PROG_TYPE_LWT_XMIT
:
1407 case BPF_PROG_TYPE_SK_SKB
:
1408 case BPF_PROG_TYPE_SK_MSG
:
1410 return meta
->pkt_access
;
1412 env
->seen_direct_write
= true;
1419 static int __check_packet_access(struct bpf_verifier_env
*env
, u32 regno
,
1420 int off
, int size
, bool zero_size_allowed
)
1422 struct bpf_reg_state
*regs
= cur_regs(env
);
1423 struct bpf_reg_state
*reg
= ®s
[regno
];
1425 if (off
< 0 || size
< 0 || (size
== 0 && !zero_size_allowed
) ||
1426 (u64
)off
+ size
> reg
->range
) {
1427 verbose(env
, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
1428 off
, size
, regno
, reg
->id
, reg
->off
, reg
->range
);
1434 static int check_packet_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
1435 int size
, bool zero_size_allowed
)
1437 struct bpf_reg_state
*regs
= cur_regs(env
);
1438 struct bpf_reg_state
*reg
= ®s
[regno
];
1441 /* We may have added a variable offset to the packet pointer; but any
1442 * reg->range we have comes after that. We are only checking the fixed
1446 /* We don't allow negative numbers, because we aren't tracking enough
1447 * detail to prove they're safe.
1449 if (reg
->smin_value
< 0) {
1450 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1454 err
= __check_packet_access(env
, regno
, off
, size
, zero_size_allowed
);
1456 verbose(env
, "R%d offset is outside of the packet\n", regno
);
1462 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */
1463 static int check_ctx_access(struct bpf_verifier_env
*env
, int insn_idx
, int off
, int size
,
1464 enum bpf_access_type t
, enum bpf_reg_type
*reg_type
)
1466 struct bpf_insn_access_aux info
= {
1467 .reg_type
= *reg_type
,
1470 if (env
->ops
->is_valid_access
&&
1471 env
->ops
->is_valid_access(off
, size
, t
, env
->prog
, &info
)) {
1472 /* A non zero info.ctx_field_size indicates that this field is a
1473 * candidate for later verifier transformation to load the whole
1474 * field and then apply a mask when accessed with a narrower
1475 * access than actual ctx access size. A zero info.ctx_field_size
1476 * will only allow for whole field access and rejects any other
1477 * type of narrower access.
1479 *reg_type
= info
.reg_type
;
1481 env
->insn_aux_data
[insn_idx
].ctx_field_size
= info
.ctx_field_size
;
1482 /* remember the offset of last byte accessed in ctx */
1483 if (env
->prog
->aux
->max_ctx_offset
< off
+ size
)
1484 env
->prog
->aux
->max_ctx_offset
= off
+ size
;
1488 verbose(env
, "invalid bpf_context access off=%d size=%d\n", off
, size
);
1492 static int check_flow_keys_access(struct bpf_verifier_env
*env
, int off
,
1495 if (size
< 0 || off
< 0 ||
1496 (u64
)off
+ size
> sizeof(struct bpf_flow_keys
)) {
1497 verbose(env
, "invalid access to flow keys off=%d size=%d\n",
1504 static int check_sock_access(struct bpf_verifier_env
*env
, u32 regno
, int off
,
1505 int size
, enum bpf_access_type t
)
1507 struct bpf_reg_state
*regs
= cur_regs(env
);
1508 struct bpf_reg_state
*reg
= ®s
[regno
];
1509 struct bpf_insn_access_aux info
;
1511 if (reg
->smin_value
< 0) {
1512 verbose(env
, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
1517 if (!bpf_sock_is_valid_access(off
, size
, t
, &info
)) {
1518 verbose(env
, "invalid bpf_sock access off=%d size=%d\n",
1526 static bool __is_pointer_value(bool allow_ptr_leaks
,
1527 const struct bpf_reg_state
*reg
)
1529 if (allow_ptr_leaks
)
1532 return reg
->type
!= SCALAR_VALUE
;
1535 static struct bpf_reg_state
*reg_state(struct bpf_verifier_env
*env
, int regno
)
1537 return cur_regs(env
) + regno
;
1540 static bool is_pointer_value(struct bpf_verifier_env
*env
, int regno
)
1542 return __is_pointer_value(env
->allow_ptr_leaks
, reg_state(env
, regno
));
1545 static bool is_ctx_reg(struct bpf_verifier_env
*env
, int regno
)
1547 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
1549 return reg
->type
== PTR_TO_CTX
||
1550 reg
->type
== PTR_TO_SOCKET
;
1553 static bool is_pkt_reg(struct bpf_verifier_env
*env
, int regno
)
1555 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
1557 return type_is_pkt_pointer(reg
->type
);
1560 static bool is_flow_key_reg(struct bpf_verifier_env
*env
, int regno
)
1562 const struct bpf_reg_state
*reg
= reg_state(env
, regno
);
1564 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
1565 return reg
->type
== PTR_TO_FLOW_KEYS
;
1568 static int check_pkt_ptr_alignment(struct bpf_verifier_env
*env
,
1569 const struct bpf_reg_state
*reg
,
1570 int off
, int size
, bool strict
)
1572 struct tnum reg_off
;
1575 /* Byte size accesses are always allowed. */
1576 if (!strict
|| size
== 1)
1579 /* For platforms that do not have a Kconfig enabling
1580 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
1581 * NET_IP_ALIGN is universally set to '2'. And on platforms
1582 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
1583 * to this code only in strict mode where we want to emulate
1584 * the NET_IP_ALIGN==2 checking. Therefore use an
1585 * unconditional IP align value of '2'.
1589 reg_off
= tnum_add(reg
->var_off
, tnum_const(ip_align
+ reg
->off
+ off
));
1590 if (!tnum_is_aligned(reg_off
, size
)) {
1593 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1595 "misaligned packet access off %d+%s+%d+%d size %d\n",
1596 ip_align
, tn_buf
, reg
->off
, off
, size
);
1603 static int check_generic_ptr_alignment(struct bpf_verifier_env
*env
,
1604 const struct bpf_reg_state
*reg
,
1605 const char *pointer_desc
,
1606 int off
, int size
, bool strict
)
1608 struct tnum reg_off
;
1610 /* Byte size accesses are always allowed. */
1611 if (!strict
|| size
== 1)
1614 reg_off
= tnum_add(reg
->var_off
, tnum_const(reg
->off
+ off
));
1615 if (!tnum_is_aligned(reg_off
, size
)) {
1618 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1619 verbose(env
, "misaligned %saccess off %s+%d+%d size %d\n",
1620 pointer_desc
, tn_buf
, reg
->off
, off
, size
);
1627 static int check_ptr_alignment(struct bpf_verifier_env
*env
,
1628 const struct bpf_reg_state
*reg
, int off
,
1629 int size
, bool strict_alignment_once
)
1631 bool strict
= env
->strict_alignment
|| strict_alignment_once
;
1632 const char *pointer_desc
= "";
1634 switch (reg
->type
) {
1636 case PTR_TO_PACKET_META
:
1637 /* Special case, because of NET_IP_ALIGN. Given metadata sits
1638 * right in front, treat it the very same way.
1640 return check_pkt_ptr_alignment(env
, reg
, off
, size
, strict
);
1641 case PTR_TO_FLOW_KEYS
:
1642 pointer_desc
= "flow keys ";
1644 case PTR_TO_MAP_VALUE
:
1645 pointer_desc
= "value ";
1648 pointer_desc
= "context ";
1651 pointer_desc
= "stack ";
1652 /* The stack spill tracking logic in check_stack_write()
1653 * and check_stack_read() relies on stack accesses being
1659 pointer_desc
= "sock ";
1664 return check_generic_ptr_alignment(env
, reg
, pointer_desc
, off
, size
,
1668 static int update_stack_depth(struct bpf_verifier_env
*env
,
1669 const struct bpf_func_state
*func
,
1672 u16 stack
= env
->subprog_info
[func
->subprogno
].stack_depth
;
1677 /* update known max for given subprogram */
1678 env
->subprog_info
[func
->subprogno
].stack_depth
= -off
;
1682 /* starting from main bpf function walk all instructions of the function
1683 * and recursively walk all callees that given function can call.
1684 * Ignore jump and exit insns.
1685 * Since recursion is prevented by check_cfg() this algorithm
1686 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1688 static int check_max_stack_depth(struct bpf_verifier_env
*env
)
1690 int depth
= 0, frame
= 0, idx
= 0, i
= 0, subprog_end
;
1691 struct bpf_subprog_info
*subprog
= env
->subprog_info
;
1692 struct bpf_insn
*insn
= env
->prog
->insnsi
;
1693 int ret_insn
[MAX_CALL_FRAMES
];
1694 int ret_prog
[MAX_CALL_FRAMES
];
1697 /* round up to 32-bytes, since this is granularity
1698 * of interpreter stack size
1700 depth
+= round_up(max_t(u32
, subprog
[idx
].stack_depth
, 1), 32);
1701 if (depth
> MAX_BPF_STACK
) {
1702 verbose(env
, "combined stack size of %d calls is %d. Too large\n",
1707 subprog_end
= subprog
[idx
+ 1].start
;
1708 for (; i
< subprog_end
; i
++) {
1709 if (insn
[i
].code
!= (BPF_JMP
| BPF_CALL
))
1711 if (insn
[i
].src_reg
!= BPF_PSEUDO_CALL
)
1713 /* remember insn and function to return to */
1714 ret_insn
[frame
] = i
+ 1;
1715 ret_prog
[frame
] = idx
;
1717 /* find the callee */
1718 i
= i
+ insn
[i
].imm
+ 1;
1719 idx
= find_subprog(env
, i
);
1721 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1726 if (frame
>= MAX_CALL_FRAMES
) {
1727 WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1732 /* end of for() loop means the last insn of the 'subprog'
1733 * was reached. Doesn't matter whether it was JA or EXIT
1737 depth
-= round_up(max_t(u32
, subprog
[idx
].stack_depth
, 1), 32);
1739 i
= ret_insn
[frame
];
1740 idx
= ret_prog
[frame
];
1744 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1745 static int get_callee_stack_depth(struct bpf_verifier_env
*env
,
1746 const struct bpf_insn
*insn
, int idx
)
1748 int start
= idx
+ insn
->imm
+ 1, subprog
;
1750 subprog
= find_subprog(env
, start
);
1752 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1756 return env
->subprog_info
[subprog
].stack_depth
;
1760 static int check_ctx_reg(struct bpf_verifier_env
*env
,
1761 const struct bpf_reg_state
*reg
, int regno
)
1763 /* Access to ctx or passing it to a helper is only allowed in
1764 * its original, unmodified form.
1768 verbose(env
, "dereference of modified ctx ptr R%d off=%d disallowed\n",
1773 if (!tnum_is_const(reg
->var_off
) || reg
->var_off
.value
) {
1776 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1777 verbose(env
, "variable ctx access var_off=%s disallowed\n", tn_buf
);
1784 /* truncate register to smaller size (in bytes)
1785 * must be called with size < BPF_REG_SIZE
1787 static void coerce_reg_to_size(struct bpf_reg_state
*reg
, int size
)
1791 /* clear high bits in bit representation */
1792 reg
->var_off
= tnum_cast(reg
->var_off
, size
);
1794 /* fix arithmetic bounds */
1795 mask
= ((u64
)1 << (size
* 8)) - 1;
1796 if ((reg
->umin_value
& ~mask
) == (reg
->umax_value
& ~mask
)) {
1797 reg
->umin_value
&= mask
;
1798 reg
->umax_value
&= mask
;
1800 reg
->umin_value
= 0;
1801 reg
->umax_value
= mask
;
1803 reg
->smin_value
= reg
->umin_value
;
1804 reg
->smax_value
= reg
->umax_value
;
1807 /* check whether memory at (regno + off) is accessible for t = (read | write)
1808 * if t==write, value_regno is a register which value is stored into memory
1809 * if t==read, value_regno is a register which will receive the value from memory
1810 * if t==write && value_regno==-1, some unknown value is stored into memory
1811 * if t==read && value_regno==-1, don't care what we read from memory
1813 static int check_mem_access(struct bpf_verifier_env
*env
, int insn_idx
, u32 regno
,
1814 int off
, int bpf_size
, enum bpf_access_type t
,
1815 int value_regno
, bool strict_alignment_once
)
1817 struct bpf_reg_state
*regs
= cur_regs(env
);
1818 struct bpf_reg_state
*reg
= regs
+ regno
;
1819 struct bpf_func_state
*state
;
1822 size
= bpf_size_to_bytes(bpf_size
);
1826 /* alignment checks will add in reg->off themselves */
1827 err
= check_ptr_alignment(env
, reg
, off
, size
, strict_alignment_once
);
1831 /* for access checks, reg->off is just part of off */
1834 if (reg
->type
== PTR_TO_MAP_VALUE
) {
1835 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1836 is_pointer_value(env
, value_regno
)) {
1837 verbose(env
, "R%d leaks addr into map\n", value_regno
);
1841 err
= check_map_access(env
, regno
, off
, size
, false);
1842 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
1843 mark_reg_unknown(env
, regs
, value_regno
);
1845 } else if (reg
->type
== PTR_TO_CTX
) {
1846 enum bpf_reg_type reg_type
= SCALAR_VALUE
;
1848 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1849 is_pointer_value(env
, value_regno
)) {
1850 verbose(env
, "R%d leaks addr into ctx\n", value_regno
);
1854 err
= check_ctx_reg(env
, reg
, regno
);
1858 err
= check_ctx_access(env
, insn_idx
, off
, size
, t
, ®_type
);
1859 if (!err
&& t
== BPF_READ
&& value_regno
>= 0) {
1860 /* ctx access returns either a scalar, or a
1861 * PTR_TO_PACKET[_META,_END]. In the latter
1862 * case, we know the offset is zero.
1864 if (reg_type
== SCALAR_VALUE
)
1865 mark_reg_unknown(env
, regs
, value_regno
);
1867 mark_reg_known_zero(env
, regs
,
1869 regs
[value_regno
].type
= reg_type
;
1872 } else if (reg
->type
== PTR_TO_STACK
) {
1873 /* stack accesses must be at a fixed offset, so that we can
1874 * determine what type of data were returned.
1875 * See check_stack_read().
1877 if (!tnum_is_const(reg
->var_off
)) {
1880 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
1881 verbose(env
, "variable stack access var_off=%s off=%d size=%d",
1885 off
+= reg
->var_off
.value
;
1886 if (off
>= 0 || off
< -MAX_BPF_STACK
) {
1887 verbose(env
, "invalid stack off=%d size=%d\n", off
,
1892 state
= func(env
, reg
);
1893 err
= update_stack_depth(env
, state
, off
);
1898 err
= check_stack_write(env
, state
, off
, size
,
1899 value_regno
, insn_idx
);
1901 err
= check_stack_read(env
, state
, off
, size
,
1903 } else if (reg_is_pkt_pointer(reg
)) {
1904 if (t
== BPF_WRITE
&& !may_access_direct_pkt_data(env
, NULL
, t
)) {
1905 verbose(env
, "cannot write into packet\n");
1908 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1909 is_pointer_value(env
, value_regno
)) {
1910 verbose(env
, "R%d leaks addr into packet\n",
1914 err
= check_packet_access(env
, regno
, off
, size
, false);
1915 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
1916 mark_reg_unknown(env
, regs
, value_regno
);
1917 } else if (reg
->type
== PTR_TO_FLOW_KEYS
) {
1918 if (t
== BPF_WRITE
&& value_regno
>= 0 &&
1919 is_pointer_value(env
, value_regno
)) {
1920 verbose(env
, "R%d leaks addr into flow keys\n",
1925 err
= check_flow_keys_access(env
, off
, size
);
1926 if (!err
&& t
== BPF_READ
&& value_regno
>= 0)
1927 mark_reg_unknown(env
, regs
, value_regno
);
1928 } else if (reg
->type
== PTR_TO_SOCKET
) {
1929 if (t
== BPF_WRITE
) {
1930 verbose(env
, "cannot write into socket\n");
1933 err
= check_sock_access(env
, regno
, off
, size
, t
);
1934 if (!err
&& value_regno
>= 0)
1935 mark_reg_unknown(env
, regs
, value_regno
);
1937 verbose(env
, "R%d invalid mem access '%s'\n", regno
,
1938 reg_type_str
[reg
->type
]);
1942 if (!err
&& size
< BPF_REG_SIZE
&& value_regno
>= 0 && t
== BPF_READ
&&
1943 regs
[value_regno
].type
== SCALAR_VALUE
) {
1944 /* b/h/w load zero-extends, mark upper bits as known 0 */
1945 coerce_reg_to_size(®s
[value_regno
], size
);
1950 static int check_xadd(struct bpf_verifier_env
*env
, int insn_idx
, struct bpf_insn
*insn
)
1954 if ((BPF_SIZE(insn
->code
) != BPF_W
&& BPF_SIZE(insn
->code
) != BPF_DW
) ||
1956 verbose(env
, "BPF_XADD uses reserved fields\n");
1960 /* check src1 operand */
1961 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
1965 /* check src2 operand */
1966 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
1970 if (is_pointer_value(env
, insn
->src_reg
)) {
1971 verbose(env
, "R%d leaks addr into mem\n", insn
->src_reg
);
1975 if (is_ctx_reg(env
, insn
->dst_reg
) ||
1976 is_pkt_reg(env
, insn
->dst_reg
) ||
1977 is_flow_key_reg(env
, insn
->dst_reg
)) {
1978 verbose(env
, "BPF_XADD stores into R%d %s is not allowed\n",
1980 reg_type_str
[reg_state(env
, insn
->dst_reg
)->type
]);
1984 /* check whether atomic_add can read the memory */
1985 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
1986 BPF_SIZE(insn
->code
), BPF_READ
, -1, true);
1990 /* check whether atomic_add can write into the same memory */
1991 return check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
1992 BPF_SIZE(insn
->code
), BPF_WRITE
, -1, true);
1995 /* when register 'regno' is passed into function that will read 'access_size'
1996 * bytes from that pointer, make sure that it's within stack boundary
1997 * and all elements of stack are initialized.
1998 * Unlike most pointer bounds-checking functions, this one doesn't take an
1999 * 'off' argument, so it has to add in reg->off itself.
2001 static int check_stack_boundary(struct bpf_verifier_env
*env
, int regno
,
2002 int access_size
, bool zero_size_allowed
,
2003 struct bpf_call_arg_meta
*meta
)
2005 struct bpf_reg_state
*reg
= reg_state(env
, regno
);
2006 struct bpf_func_state
*state
= func(env
, reg
);
2007 int off
, i
, slot
, spi
;
2009 if (reg
->type
!= PTR_TO_STACK
) {
2010 /* Allow zero-byte read from NULL, regardless of pointer type */
2011 if (zero_size_allowed
&& access_size
== 0 &&
2012 register_is_null(reg
))
2015 verbose(env
, "R%d type=%s expected=%s\n", regno
,
2016 reg_type_str
[reg
->type
],
2017 reg_type_str
[PTR_TO_STACK
]);
2021 /* Only allow fixed-offset stack reads */
2022 if (!tnum_is_const(reg
->var_off
)) {
2025 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
2026 verbose(env
, "invalid variable stack read R%d var_off=%s\n",
2030 off
= reg
->off
+ reg
->var_off
.value
;
2031 if (off
>= 0 || off
< -MAX_BPF_STACK
|| off
+ access_size
> 0 ||
2032 access_size
< 0 || (access_size
== 0 && !zero_size_allowed
)) {
2033 verbose(env
, "invalid stack type R%d off=%d access_size=%d\n",
2034 regno
, off
, access_size
);
2038 if (meta
&& meta
->raw_mode
) {
2039 meta
->access_size
= access_size
;
2040 meta
->regno
= regno
;
2044 for (i
= 0; i
< access_size
; i
++) {
2047 slot
= -(off
+ i
) - 1;
2048 spi
= slot
/ BPF_REG_SIZE
;
2049 if (state
->allocated_stack
<= slot
)
2051 stype
= &state
->stack
[spi
].slot_type
[slot
% BPF_REG_SIZE
];
2052 if (*stype
== STACK_MISC
)
2054 if (*stype
== STACK_ZERO
) {
2055 /* helper can write anything into the stack */
2056 *stype
= STACK_MISC
;
2060 verbose(env
, "invalid indirect read from stack off %d+%d size %d\n",
2061 off
, i
, access_size
);
2064 /* reading any byte out of 8-byte 'spill_slot' will cause
2065 * the whole slot to be marked as 'read'
2067 mark_reg_read(env
, &state
->stack
[spi
].spilled_ptr
,
2068 state
->stack
[spi
].spilled_ptr
.parent
);
2070 return update_stack_depth(env
, state
, off
);
2073 static int check_helper_mem_access(struct bpf_verifier_env
*env
, int regno
,
2074 int access_size
, bool zero_size_allowed
,
2075 struct bpf_call_arg_meta
*meta
)
2077 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
2079 switch (reg
->type
) {
2081 case PTR_TO_PACKET_META
:
2082 return check_packet_access(env
, regno
, reg
->off
, access_size
,
2084 case PTR_TO_MAP_VALUE
:
2085 return check_map_access(env
, regno
, reg
->off
, access_size
,
2087 default: /* scalar_value|ptr_to_stack or invalid ptr */
2088 return check_stack_boundary(env
, regno
, access_size
,
2089 zero_size_allowed
, meta
);
2093 static bool arg_type_is_mem_ptr(enum bpf_arg_type type
)
2095 return type
== ARG_PTR_TO_MEM
||
2096 type
== ARG_PTR_TO_MEM_OR_NULL
||
2097 type
== ARG_PTR_TO_UNINIT_MEM
;
2100 static bool arg_type_is_mem_size(enum bpf_arg_type type
)
2102 return type
== ARG_CONST_SIZE
||
2103 type
== ARG_CONST_SIZE_OR_ZERO
;
2106 static int check_func_arg(struct bpf_verifier_env
*env
, u32 regno
,
2107 enum bpf_arg_type arg_type
,
2108 struct bpf_call_arg_meta
*meta
)
2110 struct bpf_reg_state
*regs
= cur_regs(env
), *reg
= ®s
[regno
];
2111 enum bpf_reg_type expected_type
, type
= reg
->type
;
2114 if (arg_type
== ARG_DONTCARE
)
2117 err
= check_reg_arg(env
, regno
, SRC_OP
);
2121 if (arg_type
== ARG_ANYTHING
) {
2122 if (is_pointer_value(env
, regno
)) {
2123 verbose(env
, "R%d leaks addr into helper function\n",
2130 if (type_is_pkt_pointer(type
) &&
2131 !may_access_direct_pkt_data(env
, meta
, BPF_READ
)) {
2132 verbose(env
, "helper access to the packet is not allowed\n");
2136 if (arg_type
== ARG_PTR_TO_MAP_KEY
||
2137 arg_type
== ARG_PTR_TO_MAP_VALUE
||
2138 arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
) {
2139 expected_type
= PTR_TO_STACK
;
2140 if (!type_is_pkt_pointer(type
) && type
!= PTR_TO_MAP_VALUE
&&
2141 type
!= expected_type
)
2143 } else if (arg_type
== ARG_CONST_SIZE
||
2144 arg_type
== ARG_CONST_SIZE_OR_ZERO
) {
2145 expected_type
= SCALAR_VALUE
;
2146 if (type
!= expected_type
)
2148 } else if (arg_type
== ARG_CONST_MAP_PTR
) {
2149 expected_type
= CONST_PTR_TO_MAP
;
2150 if (type
!= expected_type
)
2152 } else if (arg_type
== ARG_PTR_TO_CTX
) {
2153 expected_type
= PTR_TO_CTX
;
2154 if (type
!= expected_type
)
2156 err
= check_ctx_reg(env
, reg
, regno
);
2159 } else if (arg_type
== ARG_PTR_TO_SOCKET
) {
2160 expected_type
= PTR_TO_SOCKET
;
2161 if (type
!= expected_type
)
2163 if (meta
->ptr_id
|| !reg
->id
) {
2164 verbose(env
, "verifier internal error: mismatched references meta=%d, reg=%d\n",
2165 meta
->ptr_id
, reg
->id
);
2168 meta
->ptr_id
= reg
->id
;
2169 } else if (arg_type_is_mem_ptr(arg_type
)) {
2170 expected_type
= PTR_TO_STACK
;
2171 /* One exception here. In case function allows for NULL to be
2172 * passed in as argument, it's a SCALAR_VALUE type. Final test
2173 * happens during stack boundary checking.
2175 if (register_is_null(reg
) &&
2176 arg_type
== ARG_PTR_TO_MEM_OR_NULL
)
2177 /* final test in check_stack_boundary() */;
2178 else if (!type_is_pkt_pointer(type
) &&
2179 type
!= PTR_TO_MAP_VALUE
&&
2180 type
!= expected_type
)
2182 meta
->raw_mode
= arg_type
== ARG_PTR_TO_UNINIT_MEM
;
2184 verbose(env
, "unsupported arg_type %d\n", arg_type
);
2188 if (arg_type
== ARG_CONST_MAP_PTR
) {
2189 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
2190 meta
->map_ptr
= reg
->map_ptr
;
2191 } else if (arg_type
== ARG_PTR_TO_MAP_KEY
) {
2192 /* bpf_map_xxx(..., map_ptr, ..., key) call:
2193 * check that [key, key + map->key_size) are within
2194 * stack limits and initialized
2196 if (!meta
->map_ptr
) {
2197 /* in function declaration map_ptr must come before
2198 * map_key, so that it's verified and known before
2199 * we have to check map_key here. Otherwise it means
2200 * that kernel subsystem misconfigured verifier
2202 verbose(env
, "invalid map_ptr to access map->key\n");
2205 err
= check_helper_mem_access(env
, regno
,
2206 meta
->map_ptr
->key_size
, false,
2208 } else if (arg_type
== ARG_PTR_TO_MAP_VALUE
||
2209 arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
) {
2210 /* bpf_map_xxx(..., map_ptr, ..., value) call:
2211 * check [value, value + map->value_size) validity
2213 if (!meta
->map_ptr
) {
2214 /* kernel subsystem misconfigured verifier */
2215 verbose(env
, "invalid map_ptr to access map->value\n");
2218 meta
->raw_mode
= (arg_type
== ARG_PTR_TO_UNINIT_MAP_VALUE
);
2219 err
= check_helper_mem_access(env
, regno
,
2220 meta
->map_ptr
->value_size
, false,
2222 } else if (arg_type_is_mem_size(arg_type
)) {
2223 bool zero_size_allowed
= (arg_type
== ARG_CONST_SIZE_OR_ZERO
);
2225 /* remember the mem_size which may be used later
2226 * to refine return values.
2228 meta
->msize_smax_value
= reg
->smax_value
;
2229 meta
->msize_umax_value
= reg
->umax_value
;
2231 /* The register is SCALAR_VALUE; the access check
2232 * happens using its boundaries.
2234 if (!tnum_is_const(reg
->var_off
))
2235 /* For unprivileged variable accesses, disable raw
2236 * mode so that the program is required to
2237 * initialize all the memory that the helper could
2238 * just partially fill up.
2242 if (reg
->smin_value
< 0) {
2243 verbose(env
, "R%d min value is negative, either use unsigned or 'var &= const'\n",
2248 if (reg
->umin_value
== 0) {
2249 err
= check_helper_mem_access(env
, regno
- 1, 0,
2256 if (reg
->umax_value
>= BPF_MAX_VAR_SIZ
) {
2257 verbose(env
, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
2261 err
= check_helper_mem_access(env
, regno
- 1,
2263 zero_size_allowed
, meta
);
2268 verbose(env
, "R%d type=%s expected=%s\n", regno
,
2269 reg_type_str
[type
], reg_type_str
[expected_type
]);
2273 static int check_map_func_compatibility(struct bpf_verifier_env
*env
,
2274 struct bpf_map
*map
, int func_id
)
2279 /* We need a two way check, first is from map perspective ... */
2280 switch (map
->map_type
) {
2281 case BPF_MAP_TYPE_PROG_ARRAY
:
2282 if (func_id
!= BPF_FUNC_tail_call
)
2285 case BPF_MAP_TYPE_PERF_EVENT_ARRAY
:
2286 if (func_id
!= BPF_FUNC_perf_event_read
&&
2287 func_id
!= BPF_FUNC_perf_event_output
&&
2288 func_id
!= BPF_FUNC_perf_event_read_value
)
2291 case BPF_MAP_TYPE_STACK_TRACE
:
2292 if (func_id
!= BPF_FUNC_get_stackid
)
2295 case BPF_MAP_TYPE_CGROUP_ARRAY
:
2296 if (func_id
!= BPF_FUNC_skb_under_cgroup
&&
2297 func_id
!= BPF_FUNC_current_task_under_cgroup
)
2300 case BPF_MAP_TYPE_CGROUP_STORAGE
:
2301 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
:
2302 if (func_id
!= BPF_FUNC_get_local_storage
)
2305 /* devmap returns a pointer to a live net_device ifindex that we cannot
2306 * allow to be modified from bpf side. So do not allow lookup elements
2309 case BPF_MAP_TYPE_DEVMAP
:
2310 if (func_id
!= BPF_FUNC_redirect_map
)
2313 /* Restrict bpf side of cpumap and xskmap, open when use-cases
2316 case BPF_MAP_TYPE_CPUMAP
:
2317 case BPF_MAP_TYPE_XSKMAP
:
2318 if (func_id
!= BPF_FUNC_redirect_map
)
2321 case BPF_MAP_TYPE_ARRAY_OF_MAPS
:
2322 case BPF_MAP_TYPE_HASH_OF_MAPS
:
2323 if (func_id
!= BPF_FUNC_map_lookup_elem
)
2326 case BPF_MAP_TYPE_SOCKMAP
:
2327 if (func_id
!= BPF_FUNC_sk_redirect_map
&&
2328 func_id
!= BPF_FUNC_sock_map_update
&&
2329 func_id
!= BPF_FUNC_map_delete_elem
&&
2330 func_id
!= BPF_FUNC_msg_redirect_map
)
2333 case BPF_MAP_TYPE_SOCKHASH
:
2334 if (func_id
!= BPF_FUNC_sk_redirect_hash
&&
2335 func_id
!= BPF_FUNC_sock_hash_update
&&
2336 func_id
!= BPF_FUNC_map_delete_elem
&&
2337 func_id
!= BPF_FUNC_msg_redirect_hash
)
2340 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
:
2341 if (func_id
!= BPF_FUNC_sk_select_reuseport
)
2344 case BPF_MAP_TYPE_QUEUE
:
2345 case BPF_MAP_TYPE_STACK
:
2346 if (func_id
!= BPF_FUNC_map_peek_elem
&&
2347 func_id
!= BPF_FUNC_map_pop_elem
&&
2348 func_id
!= BPF_FUNC_map_push_elem
)
2355 /* ... and second from the function itself. */
2357 case BPF_FUNC_tail_call
:
2358 if (map
->map_type
!= BPF_MAP_TYPE_PROG_ARRAY
)
2360 if (env
->subprog_cnt
> 1) {
2361 verbose(env
, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2365 case BPF_FUNC_perf_event_read
:
2366 case BPF_FUNC_perf_event_output
:
2367 case BPF_FUNC_perf_event_read_value
:
2368 if (map
->map_type
!= BPF_MAP_TYPE_PERF_EVENT_ARRAY
)
2371 case BPF_FUNC_get_stackid
:
2372 if (map
->map_type
!= BPF_MAP_TYPE_STACK_TRACE
)
2375 case BPF_FUNC_current_task_under_cgroup
:
2376 case BPF_FUNC_skb_under_cgroup
:
2377 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_ARRAY
)
2380 case BPF_FUNC_redirect_map
:
2381 if (map
->map_type
!= BPF_MAP_TYPE_DEVMAP
&&
2382 map
->map_type
!= BPF_MAP_TYPE_CPUMAP
&&
2383 map
->map_type
!= BPF_MAP_TYPE_XSKMAP
)
2386 case BPF_FUNC_sk_redirect_map
:
2387 case BPF_FUNC_msg_redirect_map
:
2388 case BPF_FUNC_sock_map_update
:
2389 if (map
->map_type
!= BPF_MAP_TYPE_SOCKMAP
)
2392 case BPF_FUNC_sk_redirect_hash
:
2393 case BPF_FUNC_msg_redirect_hash
:
2394 case BPF_FUNC_sock_hash_update
:
2395 if (map
->map_type
!= BPF_MAP_TYPE_SOCKHASH
)
2398 case BPF_FUNC_get_local_storage
:
2399 if (map
->map_type
!= BPF_MAP_TYPE_CGROUP_STORAGE
&&
2400 map
->map_type
!= BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
)
2403 case BPF_FUNC_sk_select_reuseport
:
2404 if (map
->map_type
!= BPF_MAP_TYPE_REUSEPORT_SOCKARRAY
)
2407 case BPF_FUNC_map_peek_elem
:
2408 case BPF_FUNC_map_pop_elem
:
2409 case BPF_FUNC_map_push_elem
:
2410 if (map
->map_type
!= BPF_MAP_TYPE_QUEUE
&&
2411 map
->map_type
!= BPF_MAP_TYPE_STACK
)
2420 verbose(env
, "cannot pass map_type %d into func %s#%d\n",
2421 map
->map_type
, func_id_name(func_id
), func_id
);
2425 static bool check_raw_mode_ok(const struct bpf_func_proto
*fn
)
2429 if (fn
->arg1_type
== ARG_PTR_TO_UNINIT_MEM
)
2431 if (fn
->arg2_type
== ARG_PTR_TO_UNINIT_MEM
)
2433 if (fn
->arg3_type
== ARG_PTR_TO_UNINIT_MEM
)
2435 if (fn
->arg4_type
== ARG_PTR_TO_UNINIT_MEM
)
2437 if (fn
->arg5_type
== ARG_PTR_TO_UNINIT_MEM
)
2440 /* We only support one arg being in raw mode at the moment,
2441 * which is sufficient for the helper functions we have
2447 static bool check_args_pair_invalid(enum bpf_arg_type arg_curr
,
2448 enum bpf_arg_type arg_next
)
2450 return (arg_type_is_mem_ptr(arg_curr
) &&
2451 !arg_type_is_mem_size(arg_next
)) ||
2452 (!arg_type_is_mem_ptr(arg_curr
) &&
2453 arg_type_is_mem_size(arg_next
));
2456 static bool check_arg_pair_ok(const struct bpf_func_proto
*fn
)
2458 /* bpf_xxx(..., buf, len) call will access 'len'
2459 * bytes from memory 'buf'. Both arg types need
2460 * to be paired, so make sure there's no buggy
2461 * helper function specification.
2463 if (arg_type_is_mem_size(fn
->arg1_type
) ||
2464 arg_type_is_mem_ptr(fn
->arg5_type
) ||
2465 check_args_pair_invalid(fn
->arg1_type
, fn
->arg2_type
) ||
2466 check_args_pair_invalid(fn
->arg2_type
, fn
->arg3_type
) ||
2467 check_args_pair_invalid(fn
->arg3_type
, fn
->arg4_type
) ||
2468 check_args_pair_invalid(fn
->arg4_type
, fn
->arg5_type
))
2474 static bool check_refcount_ok(const struct bpf_func_proto
*fn
)
2478 if (arg_type_is_refcounted(fn
->arg1_type
))
2480 if (arg_type_is_refcounted(fn
->arg2_type
))
2482 if (arg_type_is_refcounted(fn
->arg3_type
))
2484 if (arg_type_is_refcounted(fn
->arg4_type
))
2486 if (arg_type_is_refcounted(fn
->arg5_type
))
2489 /* We only support one arg being unreferenced at the moment,
2490 * which is sufficient for the helper functions we have right now.
2495 static int check_func_proto(const struct bpf_func_proto
*fn
)
2497 return check_raw_mode_ok(fn
) &&
2498 check_arg_pair_ok(fn
) &&
2499 check_refcount_ok(fn
) ? 0 : -EINVAL
;
2502 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
2503 * are now invalid, so turn them into unknown SCALAR_VALUE.
2505 static void __clear_all_pkt_pointers(struct bpf_verifier_env
*env
,
2506 struct bpf_func_state
*state
)
2508 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
2511 for (i
= 0; i
< MAX_BPF_REG
; i
++)
2512 if (reg_is_pkt_pointer_any(®s
[i
]))
2513 mark_reg_unknown(env
, regs
, i
);
2515 bpf_for_each_spilled_reg(i
, state
, reg
) {
2518 if (reg_is_pkt_pointer_any(reg
))
2519 __mark_reg_unknown(reg
);
2523 static void clear_all_pkt_pointers(struct bpf_verifier_env
*env
)
2525 struct bpf_verifier_state
*vstate
= env
->cur_state
;
2528 for (i
= 0; i
<= vstate
->curframe
; i
++)
2529 __clear_all_pkt_pointers(env
, vstate
->frame
[i
]);
2532 static void release_reg_references(struct bpf_verifier_env
*env
,
2533 struct bpf_func_state
*state
, int id
)
2535 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
2538 for (i
= 0; i
< MAX_BPF_REG
; i
++)
2539 if (regs
[i
].id
== id
)
2540 mark_reg_unknown(env
, regs
, i
);
2542 bpf_for_each_spilled_reg(i
, state
, reg
) {
2545 if (reg_is_refcounted(reg
) && reg
->id
== id
)
2546 __mark_reg_unknown(reg
);
2550 /* The pointer with the specified id has released its reference to kernel
2551 * resources. Identify all copies of the same pointer and clear the reference.
2553 static int release_reference(struct bpf_verifier_env
*env
,
2554 struct bpf_call_arg_meta
*meta
)
2556 struct bpf_verifier_state
*vstate
= env
->cur_state
;
2559 for (i
= 0; i
<= vstate
->curframe
; i
++)
2560 release_reg_references(env
, vstate
->frame
[i
], meta
->ptr_id
);
2562 return release_reference_state(env
, meta
->ptr_id
);
2565 static int check_func_call(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
,
2568 struct bpf_verifier_state
*state
= env
->cur_state
;
2569 struct bpf_func_state
*caller
, *callee
;
2570 int i
, err
, subprog
, target_insn
;
2572 if (state
->curframe
+ 1 >= MAX_CALL_FRAMES
) {
2573 verbose(env
, "the call stack of %d frames is too deep\n",
2574 state
->curframe
+ 2);
2578 target_insn
= *insn_idx
+ insn
->imm
;
2579 subprog
= find_subprog(env
, target_insn
+ 1);
2581 verbose(env
, "verifier bug. No program starts at insn %d\n",
2586 caller
= state
->frame
[state
->curframe
];
2587 if (state
->frame
[state
->curframe
+ 1]) {
2588 verbose(env
, "verifier bug. Frame %d already allocated\n",
2589 state
->curframe
+ 1);
2593 callee
= kzalloc(sizeof(*callee
), GFP_KERNEL
);
2596 state
->frame
[state
->curframe
+ 1] = callee
;
2598 /* callee cannot access r0, r6 - r9 for reading and has to write
2599 * into its own stack before reading from it.
2600 * callee can read/write into caller's stack
2602 init_func_state(env
, callee
,
2603 /* remember the callsite, it will be used by bpf_exit */
2604 *insn_idx
/* callsite */,
2605 state
->curframe
+ 1 /* frameno within this callchain */,
2606 subprog
/* subprog number within this prog */);
2608 /* Transfer references to the callee */
2609 err
= transfer_reference_state(callee
, caller
);
2613 /* copy r1 - r5 args that callee can access. The copy includes parent
2614 * pointers, which connects us up to the liveness chain
2616 for (i
= BPF_REG_1
; i
<= BPF_REG_5
; i
++)
2617 callee
->regs
[i
] = caller
->regs
[i
];
2619 /* after the call registers r0 - r5 were scratched */
2620 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
2621 mark_reg_not_init(env
, caller
->regs
, caller_saved
[i
]);
2622 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
2625 /* only increment it after check_reg_arg() finished */
2628 /* and go analyze first insn of the callee */
2629 *insn_idx
= target_insn
;
2631 if (env
->log
.level
) {
2632 verbose(env
, "caller:\n");
2633 print_verifier_state(env
, caller
);
2634 verbose(env
, "callee:\n");
2635 print_verifier_state(env
, callee
);
2640 static int prepare_func_exit(struct bpf_verifier_env
*env
, int *insn_idx
)
2642 struct bpf_verifier_state
*state
= env
->cur_state
;
2643 struct bpf_func_state
*caller
, *callee
;
2644 struct bpf_reg_state
*r0
;
2647 callee
= state
->frame
[state
->curframe
];
2648 r0
= &callee
->regs
[BPF_REG_0
];
2649 if (r0
->type
== PTR_TO_STACK
) {
2650 /* technically it's ok to return caller's stack pointer
2651 * (or caller's caller's pointer) back to the caller,
2652 * since these pointers are valid. Only current stack
2653 * pointer will be invalid as soon as function exits,
2654 * but let's be conservative
2656 verbose(env
, "cannot return stack pointer to the caller\n");
2661 caller
= state
->frame
[state
->curframe
];
2662 /* return to the caller whatever r0 had in the callee */
2663 caller
->regs
[BPF_REG_0
] = *r0
;
2665 /* Transfer references to the caller */
2666 err
= transfer_reference_state(caller
, callee
);
2670 *insn_idx
= callee
->callsite
+ 1;
2671 if (env
->log
.level
) {
2672 verbose(env
, "returning from callee:\n");
2673 print_verifier_state(env
, callee
);
2674 verbose(env
, "to caller at %d:\n", *insn_idx
);
2675 print_verifier_state(env
, caller
);
2677 /* clear everything in the callee */
2678 free_func_state(callee
);
2679 state
->frame
[state
->curframe
+ 1] = NULL
;
2683 static void do_refine_retval_range(struct bpf_reg_state
*regs
, int ret_type
,
2685 struct bpf_call_arg_meta
*meta
)
2687 struct bpf_reg_state
*ret_reg
= ®s
[BPF_REG_0
];
2689 if (ret_type
!= RET_INTEGER
||
2690 (func_id
!= BPF_FUNC_get_stack
&&
2691 func_id
!= BPF_FUNC_probe_read_str
))
2694 ret_reg
->smax_value
= meta
->msize_smax_value
;
2695 ret_reg
->umax_value
= meta
->msize_umax_value
;
2696 __reg_deduce_bounds(ret_reg
);
2697 __reg_bound_offset(ret_reg
);
2701 record_func_map(struct bpf_verifier_env
*env
, struct bpf_call_arg_meta
*meta
,
2702 int func_id
, int insn_idx
)
2704 struct bpf_insn_aux_data
*aux
= &env
->insn_aux_data
[insn_idx
];
2706 if (func_id
!= BPF_FUNC_tail_call
&&
2707 func_id
!= BPF_FUNC_map_lookup_elem
&&
2708 func_id
!= BPF_FUNC_map_update_elem
&&
2709 func_id
!= BPF_FUNC_map_delete_elem
&&
2710 func_id
!= BPF_FUNC_map_push_elem
&&
2711 func_id
!= BPF_FUNC_map_pop_elem
&&
2712 func_id
!= BPF_FUNC_map_peek_elem
)
2715 if (meta
->map_ptr
== NULL
) {
2716 verbose(env
, "kernel subsystem misconfigured verifier\n");
2720 if (!BPF_MAP_PTR(aux
->map_state
))
2721 bpf_map_ptr_store(aux
, meta
->map_ptr
,
2722 meta
->map_ptr
->unpriv_array
);
2723 else if (BPF_MAP_PTR(aux
->map_state
) != meta
->map_ptr
)
2724 bpf_map_ptr_store(aux
, BPF_MAP_PTR_POISON
,
2725 meta
->map_ptr
->unpriv_array
);
2729 static int check_reference_leak(struct bpf_verifier_env
*env
)
2731 struct bpf_func_state
*state
= cur_func(env
);
2734 for (i
= 0; i
< state
->acquired_refs
; i
++) {
2735 verbose(env
, "Unreleased reference id=%d alloc_insn=%d\n",
2736 state
->refs
[i
].id
, state
->refs
[i
].insn_idx
);
2738 return state
->acquired_refs
? -EINVAL
: 0;
2741 static int check_helper_call(struct bpf_verifier_env
*env
, int func_id
, int insn_idx
)
2743 const struct bpf_func_proto
*fn
= NULL
;
2744 struct bpf_reg_state
*regs
;
2745 struct bpf_call_arg_meta meta
;
2749 /* find function prototype */
2750 if (func_id
< 0 || func_id
>= __BPF_FUNC_MAX_ID
) {
2751 verbose(env
, "invalid func %s#%d\n", func_id_name(func_id
),
2756 if (env
->ops
->get_func_proto
)
2757 fn
= env
->ops
->get_func_proto(func_id
, env
->prog
);
2759 verbose(env
, "unknown func %s#%d\n", func_id_name(func_id
),
2764 /* eBPF programs must be GPL compatible to use GPL-ed functions */
2765 if (!env
->prog
->gpl_compatible
&& fn
->gpl_only
) {
2766 verbose(env
, "cannot call GPL-restricted function from non-GPL compatible program\n");
2770 /* With LD_ABS/IND some JITs save/restore skb from r1. */
2771 changes_data
= bpf_helper_changes_pkt_data(fn
->func
);
2772 if (changes_data
&& fn
->arg1_type
!= ARG_PTR_TO_CTX
) {
2773 verbose(env
, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
2774 func_id_name(func_id
), func_id
);
2778 memset(&meta
, 0, sizeof(meta
));
2779 meta
.pkt_access
= fn
->pkt_access
;
2781 err
= check_func_proto(fn
);
2783 verbose(env
, "kernel subsystem misconfigured func %s#%d\n",
2784 func_id_name(func_id
), func_id
);
2789 err
= check_func_arg(env
, BPF_REG_1
, fn
->arg1_type
, &meta
);
2792 err
= check_func_arg(env
, BPF_REG_2
, fn
->arg2_type
, &meta
);
2795 err
= check_func_arg(env
, BPF_REG_3
, fn
->arg3_type
, &meta
);
2798 err
= check_func_arg(env
, BPF_REG_4
, fn
->arg4_type
, &meta
);
2801 err
= check_func_arg(env
, BPF_REG_5
, fn
->arg5_type
, &meta
);
2805 err
= record_func_map(env
, &meta
, func_id
, insn_idx
);
2809 /* Mark slots with STACK_MISC in case of raw mode, stack offset
2810 * is inferred from register state.
2812 for (i
= 0; i
< meta
.access_size
; i
++) {
2813 err
= check_mem_access(env
, insn_idx
, meta
.regno
, i
, BPF_B
,
2814 BPF_WRITE
, -1, false);
2819 if (func_id
== BPF_FUNC_tail_call
) {
2820 err
= check_reference_leak(env
);
2822 verbose(env
, "tail_call would lead to reference leak\n");
2825 } else if (is_release_function(func_id
)) {
2826 err
= release_reference(env
, &meta
);
2831 regs
= cur_regs(env
);
2833 /* check that flags argument in get_local_storage(map, flags) is 0,
2834 * this is required because get_local_storage() can't return an error.
2836 if (func_id
== BPF_FUNC_get_local_storage
&&
2837 !register_is_null(®s
[BPF_REG_2
])) {
2838 verbose(env
, "get_local_storage() doesn't support non-zero flags\n");
2842 /* reset caller saved regs */
2843 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
2844 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
2845 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
2848 /* update return register (already marked as written above) */
2849 if (fn
->ret_type
== RET_INTEGER
) {
2850 /* sets type to SCALAR_VALUE */
2851 mark_reg_unknown(env
, regs
, BPF_REG_0
);
2852 } else if (fn
->ret_type
== RET_VOID
) {
2853 regs
[BPF_REG_0
].type
= NOT_INIT
;
2854 } else if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE_OR_NULL
||
2855 fn
->ret_type
== RET_PTR_TO_MAP_VALUE
) {
2856 /* There is no offset yet applied, variable or fixed */
2857 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
2858 /* remember map_ptr, so that check_map_access()
2859 * can check 'value_size' boundary of memory access
2860 * to map element returned from bpf_map_lookup_elem()
2862 if (meta
.map_ptr
== NULL
) {
2864 "kernel subsystem misconfigured verifier\n");
2867 regs
[BPF_REG_0
].map_ptr
= meta
.map_ptr
;
2868 if (fn
->ret_type
== RET_PTR_TO_MAP_VALUE
) {
2869 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE
;
2871 regs
[BPF_REG_0
].type
= PTR_TO_MAP_VALUE_OR_NULL
;
2872 regs
[BPF_REG_0
].id
= ++env
->id_gen
;
2874 } else if (fn
->ret_type
== RET_PTR_TO_SOCKET_OR_NULL
) {
2875 int id
= acquire_reference_state(env
, insn_idx
);
2878 mark_reg_known_zero(env
, regs
, BPF_REG_0
);
2879 regs
[BPF_REG_0
].type
= PTR_TO_SOCKET_OR_NULL
;
2880 regs
[BPF_REG_0
].id
= id
;
2882 verbose(env
, "unknown return type %d of func %s#%d\n",
2883 fn
->ret_type
, func_id_name(func_id
), func_id
);
2887 do_refine_retval_range(regs
, fn
->ret_type
, func_id
, &meta
);
2889 err
= check_map_func_compatibility(env
, meta
.map_ptr
, func_id
);
2893 if (func_id
== BPF_FUNC_get_stack
&& !env
->prog
->has_callchain_buf
) {
2894 const char *err_str
;
2896 #ifdef CONFIG_PERF_EVENTS
2897 err
= get_callchain_buffers(sysctl_perf_event_max_stack
);
2898 err_str
= "cannot get callchain buffer for func %s#%d\n";
2901 err_str
= "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
2904 verbose(env
, err_str
, func_id_name(func_id
), func_id
);
2908 env
->prog
->has_callchain_buf
= true;
2912 clear_all_pkt_pointers(env
);
2916 static bool signed_add_overflows(s64 a
, s64 b
)
2918 /* Do the add in u64, where overflow is well-defined */
2919 s64 res
= (s64
)((u64
)a
+ (u64
)b
);
2926 static bool signed_sub_overflows(s64 a
, s64 b
)
2928 /* Do the sub in u64, where overflow is well-defined */
2929 s64 res
= (s64
)((u64
)a
- (u64
)b
);
2936 static bool check_reg_sane_offset(struct bpf_verifier_env
*env
,
2937 const struct bpf_reg_state
*reg
,
2938 enum bpf_reg_type type
)
2940 bool known
= tnum_is_const(reg
->var_off
);
2941 s64 val
= reg
->var_off
.value
;
2942 s64 smin
= reg
->smin_value
;
2944 if (known
&& (val
>= BPF_MAX_VAR_OFF
|| val
<= -BPF_MAX_VAR_OFF
)) {
2945 verbose(env
, "math between %s pointer and %lld is not allowed\n",
2946 reg_type_str
[type
], val
);
2950 if (reg
->off
>= BPF_MAX_VAR_OFF
|| reg
->off
<= -BPF_MAX_VAR_OFF
) {
2951 verbose(env
, "%s pointer offset %d is not allowed\n",
2952 reg_type_str
[type
], reg
->off
);
2956 if (smin
== S64_MIN
) {
2957 verbose(env
, "math between %s pointer and register with unbounded min value is not allowed\n",
2958 reg_type_str
[type
]);
2962 if (smin
>= BPF_MAX_VAR_OFF
|| smin
<= -BPF_MAX_VAR_OFF
) {
2963 verbose(env
, "value %lld makes %s pointer be out of bounds\n",
2964 smin
, reg_type_str
[type
]);
2971 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
2972 * Caller should also handle BPF_MOV case separately.
2973 * If we return -EACCES, caller may want to try again treating pointer as a
2974 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
2976 static int adjust_ptr_min_max_vals(struct bpf_verifier_env
*env
,
2977 struct bpf_insn
*insn
,
2978 const struct bpf_reg_state
*ptr_reg
,
2979 const struct bpf_reg_state
*off_reg
)
2981 struct bpf_verifier_state
*vstate
= env
->cur_state
;
2982 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
2983 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
;
2984 bool known
= tnum_is_const(off_reg
->var_off
);
2985 s64 smin_val
= off_reg
->smin_value
, smax_val
= off_reg
->smax_value
,
2986 smin_ptr
= ptr_reg
->smin_value
, smax_ptr
= ptr_reg
->smax_value
;
2987 u64 umin_val
= off_reg
->umin_value
, umax_val
= off_reg
->umax_value
,
2988 umin_ptr
= ptr_reg
->umin_value
, umax_ptr
= ptr_reg
->umax_value
;
2989 u8 opcode
= BPF_OP(insn
->code
);
2990 u32 dst
= insn
->dst_reg
;
2992 dst_reg
= ®s
[dst
];
2994 if ((known
&& (smin_val
!= smax_val
|| umin_val
!= umax_val
)) ||
2995 smin_val
> smax_val
|| umin_val
> umax_val
) {
2996 /* Taint dst register if offset had invalid bounds derived from
2997 * e.g. dead branches.
2999 __mark_reg_unknown(dst_reg
);
3003 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
3004 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
3006 "R%d 32-bit pointer arithmetic prohibited\n",
3011 switch (ptr_reg
->type
) {
3012 case PTR_TO_MAP_VALUE_OR_NULL
:
3013 verbose(env
, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
3014 dst
, reg_type_str
[ptr_reg
->type
]);
3016 case CONST_PTR_TO_MAP
:
3017 case PTR_TO_PACKET_END
:
3019 case PTR_TO_SOCKET_OR_NULL
:
3020 verbose(env
, "R%d pointer arithmetic on %s prohibited\n",
3021 dst
, reg_type_str
[ptr_reg
->type
]);
3027 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
3028 * The id may be overwritten later if we create a new variable offset.
3030 dst_reg
->type
= ptr_reg
->type
;
3031 dst_reg
->id
= ptr_reg
->id
;
3033 if (!check_reg_sane_offset(env
, off_reg
, ptr_reg
->type
) ||
3034 !check_reg_sane_offset(env
, ptr_reg
, ptr_reg
->type
))
3039 /* We can take a fixed offset as long as it doesn't overflow
3040 * the s32 'off' field
3042 if (known
&& (ptr_reg
->off
+ smin_val
==
3043 (s64
)(s32
)(ptr_reg
->off
+ smin_val
))) {
3044 /* pointer += K. Accumulate it into fixed offset */
3045 dst_reg
->smin_value
= smin_ptr
;
3046 dst_reg
->smax_value
= smax_ptr
;
3047 dst_reg
->umin_value
= umin_ptr
;
3048 dst_reg
->umax_value
= umax_ptr
;
3049 dst_reg
->var_off
= ptr_reg
->var_off
;
3050 dst_reg
->off
= ptr_reg
->off
+ smin_val
;
3051 dst_reg
->raw
= ptr_reg
->raw
;
3054 /* A new variable offset is created. Note that off_reg->off
3055 * == 0, since it's a scalar.
3056 * dst_reg gets the pointer type and since some positive
3057 * integer value was added to the pointer, give it a new 'id'
3058 * if it's a PTR_TO_PACKET.
3059 * this creates a new 'base' pointer, off_reg (variable) gets
3060 * added into the variable offset, and we copy the fixed offset
3063 if (signed_add_overflows(smin_ptr
, smin_val
) ||
3064 signed_add_overflows(smax_ptr
, smax_val
)) {
3065 dst_reg
->smin_value
= S64_MIN
;
3066 dst_reg
->smax_value
= S64_MAX
;
3068 dst_reg
->smin_value
= smin_ptr
+ smin_val
;
3069 dst_reg
->smax_value
= smax_ptr
+ smax_val
;
3071 if (umin_ptr
+ umin_val
< umin_ptr
||
3072 umax_ptr
+ umax_val
< umax_ptr
) {
3073 dst_reg
->umin_value
= 0;
3074 dst_reg
->umax_value
= U64_MAX
;
3076 dst_reg
->umin_value
= umin_ptr
+ umin_val
;
3077 dst_reg
->umax_value
= umax_ptr
+ umax_val
;
3079 dst_reg
->var_off
= tnum_add(ptr_reg
->var_off
, off_reg
->var_off
);
3080 dst_reg
->off
= ptr_reg
->off
;
3081 dst_reg
->raw
= ptr_reg
->raw
;
3082 if (reg_is_pkt_pointer(ptr_reg
)) {
3083 dst_reg
->id
= ++env
->id_gen
;
3084 /* something was added to pkt_ptr, set range to zero */
3089 if (dst_reg
== off_reg
) {
3090 /* scalar -= pointer. Creates an unknown scalar */
3091 verbose(env
, "R%d tried to subtract pointer from scalar\n",
3095 /* We don't allow subtraction from FP, because (according to
3096 * test_verifier.c test "invalid fp arithmetic", JITs might not
3097 * be able to deal with it.
3099 if (ptr_reg
->type
== PTR_TO_STACK
) {
3100 verbose(env
, "R%d subtraction from stack pointer prohibited\n",
3104 if (known
&& (ptr_reg
->off
- smin_val
==
3105 (s64
)(s32
)(ptr_reg
->off
- smin_val
))) {
3106 /* pointer -= K. Subtract it from fixed offset */
3107 dst_reg
->smin_value
= smin_ptr
;
3108 dst_reg
->smax_value
= smax_ptr
;
3109 dst_reg
->umin_value
= umin_ptr
;
3110 dst_reg
->umax_value
= umax_ptr
;
3111 dst_reg
->var_off
= ptr_reg
->var_off
;
3112 dst_reg
->id
= ptr_reg
->id
;
3113 dst_reg
->off
= ptr_reg
->off
- smin_val
;
3114 dst_reg
->raw
= ptr_reg
->raw
;
3117 /* A new variable offset is created. If the subtrahend is known
3118 * nonnegative, then any reg->range we had before is still good.
3120 if (signed_sub_overflows(smin_ptr
, smax_val
) ||
3121 signed_sub_overflows(smax_ptr
, smin_val
)) {
3122 /* Overflow possible, we know nothing */
3123 dst_reg
->smin_value
= S64_MIN
;
3124 dst_reg
->smax_value
= S64_MAX
;
3126 dst_reg
->smin_value
= smin_ptr
- smax_val
;
3127 dst_reg
->smax_value
= smax_ptr
- smin_val
;
3129 if (umin_ptr
< umax_val
) {
3130 /* Overflow possible, we know nothing */
3131 dst_reg
->umin_value
= 0;
3132 dst_reg
->umax_value
= U64_MAX
;
3134 /* Cannot overflow (as long as bounds are consistent) */
3135 dst_reg
->umin_value
= umin_ptr
- umax_val
;
3136 dst_reg
->umax_value
= umax_ptr
- umin_val
;
3138 dst_reg
->var_off
= tnum_sub(ptr_reg
->var_off
, off_reg
->var_off
);
3139 dst_reg
->off
= ptr_reg
->off
;
3140 dst_reg
->raw
= ptr_reg
->raw
;
3141 if (reg_is_pkt_pointer(ptr_reg
)) {
3142 dst_reg
->id
= ++env
->id_gen
;
3143 /* something was added to pkt_ptr, set range to zero */
3151 /* bitwise ops on pointers are troublesome, prohibit. */
3152 verbose(env
, "R%d bitwise operator %s on pointer prohibited\n",
3153 dst
, bpf_alu_string
[opcode
>> 4]);
3156 /* other operators (e.g. MUL,LSH) produce non-pointer results */
3157 verbose(env
, "R%d pointer arithmetic with %s operator prohibited\n",
3158 dst
, bpf_alu_string
[opcode
>> 4]);
3162 if (!check_reg_sane_offset(env
, dst_reg
, ptr_reg
->type
))
3165 __update_reg_bounds(dst_reg
);
3166 __reg_deduce_bounds(dst_reg
);
3167 __reg_bound_offset(dst_reg
);
3171 /* WARNING: This function does calculations on 64-bit values, but the actual
3172 * execution may occur on 32-bit values. Therefore, things like bitshifts
3173 * need extra checks in the 32-bit case.
3175 static int adjust_scalar_min_max_vals(struct bpf_verifier_env
*env
,
3176 struct bpf_insn
*insn
,
3177 struct bpf_reg_state
*dst_reg
,
3178 struct bpf_reg_state src_reg
)
3180 struct bpf_reg_state
*regs
= cur_regs(env
);
3181 u8 opcode
= BPF_OP(insn
->code
);
3182 bool src_known
, dst_known
;
3183 s64 smin_val
, smax_val
;
3184 u64 umin_val
, umax_val
;
3185 u64 insn_bitness
= (BPF_CLASS(insn
->code
) == BPF_ALU64
) ? 64 : 32;
3187 if (insn_bitness
== 32) {
3188 /* Relevant for 32-bit RSH: Information can propagate towards
3189 * LSB, so it isn't sufficient to only truncate the output to
3192 coerce_reg_to_size(dst_reg
, 4);
3193 coerce_reg_to_size(&src_reg
, 4);
3196 smin_val
= src_reg
.smin_value
;
3197 smax_val
= src_reg
.smax_value
;
3198 umin_val
= src_reg
.umin_value
;
3199 umax_val
= src_reg
.umax_value
;
3200 src_known
= tnum_is_const(src_reg
.var_off
);
3201 dst_known
= tnum_is_const(dst_reg
->var_off
);
3203 if ((src_known
&& (smin_val
!= smax_val
|| umin_val
!= umax_val
)) ||
3204 smin_val
> smax_val
|| umin_val
> umax_val
) {
3205 /* Taint dst register if offset had invalid bounds derived from
3206 * e.g. dead branches.
3208 __mark_reg_unknown(dst_reg
);
3213 opcode
!= BPF_ADD
&& opcode
!= BPF_SUB
&& opcode
!= BPF_AND
) {
3214 __mark_reg_unknown(dst_reg
);
3220 if (signed_add_overflows(dst_reg
->smin_value
, smin_val
) ||
3221 signed_add_overflows(dst_reg
->smax_value
, smax_val
)) {
3222 dst_reg
->smin_value
= S64_MIN
;
3223 dst_reg
->smax_value
= S64_MAX
;
3225 dst_reg
->smin_value
+= smin_val
;
3226 dst_reg
->smax_value
+= smax_val
;
3228 if (dst_reg
->umin_value
+ umin_val
< umin_val
||
3229 dst_reg
->umax_value
+ umax_val
< umax_val
) {
3230 dst_reg
->umin_value
= 0;
3231 dst_reg
->umax_value
= U64_MAX
;
3233 dst_reg
->umin_value
+= umin_val
;
3234 dst_reg
->umax_value
+= umax_val
;
3236 dst_reg
->var_off
= tnum_add(dst_reg
->var_off
, src_reg
.var_off
);
3239 if (signed_sub_overflows(dst_reg
->smin_value
, smax_val
) ||
3240 signed_sub_overflows(dst_reg
->smax_value
, smin_val
)) {
3241 /* Overflow possible, we know nothing */
3242 dst_reg
->smin_value
= S64_MIN
;
3243 dst_reg
->smax_value
= S64_MAX
;
3245 dst_reg
->smin_value
-= smax_val
;
3246 dst_reg
->smax_value
-= smin_val
;
3248 if (dst_reg
->umin_value
< umax_val
) {
3249 /* Overflow possible, we know nothing */
3250 dst_reg
->umin_value
= 0;
3251 dst_reg
->umax_value
= U64_MAX
;
3253 /* Cannot overflow (as long as bounds are consistent) */
3254 dst_reg
->umin_value
-= umax_val
;
3255 dst_reg
->umax_value
-= umin_val
;
3257 dst_reg
->var_off
= tnum_sub(dst_reg
->var_off
, src_reg
.var_off
);
3260 dst_reg
->var_off
= tnum_mul(dst_reg
->var_off
, src_reg
.var_off
);
3261 if (smin_val
< 0 || dst_reg
->smin_value
< 0) {
3262 /* Ain't nobody got time to multiply that sign */
3263 __mark_reg_unbounded(dst_reg
);
3264 __update_reg_bounds(dst_reg
);
3267 /* Both values are positive, so we can work with unsigned and
3268 * copy the result to signed (unless it exceeds S64_MAX).
3270 if (umax_val
> U32_MAX
|| dst_reg
->umax_value
> U32_MAX
) {
3271 /* Potential overflow, we know nothing */
3272 __mark_reg_unbounded(dst_reg
);
3273 /* (except what we can learn from the var_off) */
3274 __update_reg_bounds(dst_reg
);
3277 dst_reg
->umin_value
*= umin_val
;
3278 dst_reg
->umax_value
*= umax_val
;
3279 if (dst_reg
->umax_value
> S64_MAX
) {
3280 /* Overflow possible, we know nothing */
3281 dst_reg
->smin_value
= S64_MIN
;
3282 dst_reg
->smax_value
= S64_MAX
;
3284 dst_reg
->smin_value
= dst_reg
->umin_value
;
3285 dst_reg
->smax_value
= dst_reg
->umax_value
;
3289 if (src_known
&& dst_known
) {
3290 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
&
3291 src_reg
.var_off
.value
);
3294 /* We get our minimum from the var_off, since that's inherently
3295 * bitwise. Our maximum is the minimum of the operands' maxima.
3297 dst_reg
->var_off
= tnum_and(dst_reg
->var_off
, src_reg
.var_off
);
3298 dst_reg
->umin_value
= dst_reg
->var_off
.value
;
3299 dst_reg
->umax_value
= min(dst_reg
->umax_value
, umax_val
);
3300 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
3301 /* Lose signed bounds when ANDing negative numbers,
3302 * ain't nobody got time for that.
3304 dst_reg
->smin_value
= S64_MIN
;
3305 dst_reg
->smax_value
= S64_MAX
;
3307 /* ANDing two positives gives a positive, so safe to
3308 * cast result into s64.
3310 dst_reg
->smin_value
= dst_reg
->umin_value
;
3311 dst_reg
->smax_value
= dst_reg
->umax_value
;
3313 /* We may learn something more from the var_off */
3314 __update_reg_bounds(dst_reg
);
3317 if (src_known
&& dst_known
) {
3318 __mark_reg_known(dst_reg
, dst_reg
->var_off
.value
|
3319 src_reg
.var_off
.value
);
3322 /* We get our maximum from the var_off, and our minimum is the
3323 * maximum of the operands' minima
3325 dst_reg
->var_off
= tnum_or(dst_reg
->var_off
, src_reg
.var_off
);
3326 dst_reg
->umin_value
= max(dst_reg
->umin_value
, umin_val
);
3327 dst_reg
->umax_value
= dst_reg
->var_off
.value
|
3328 dst_reg
->var_off
.mask
;
3329 if (dst_reg
->smin_value
< 0 || smin_val
< 0) {
3330 /* Lose signed bounds when ORing negative numbers,
3331 * ain't nobody got time for that.
3333 dst_reg
->smin_value
= S64_MIN
;
3334 dst_reg
->smax_value
= S64_MAX
;
3336 /* ORing two positives gives a positive, so safe to
3337 * cast result into s64.
3339 dst_reg
->smin_value
= dst_reg
->umin_value
;
3340 dst_reg
->smax_value
= dst_reg
->umax_value
;
3342 /* We may learn something more from the var_off */
3343 __update_reg_bounds(dst_reg
);
3346 if (umax_val
>= insn_bitness
) {
3347 /* Shifts greater than 31 or 63 are undefined.
3348 * This includes shifts by a negative number.
3350 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3353 /* We lose all sign bit information (except what we can pick
3356 dst_reg
->smin_value
= S64_MIN
;
3357 dst_reg
->smax_value
= S64_MAX
;
3358 /* If we might shift our top bit out, then we know nothing */
3359 if (dst_reg
->umax_value
> 1ULL << (63 - umax_val
)) {
3360 dst_reg
->umin_value
= 0;
3361 dst_reg
->umax_value
= U64_MAX
;
3363 dst_reg
->umin_value
<<= umin_val
;
3364 dst_reg
->umax_value
<<= umax_val
;
3366 dst_reg
->var_off
= tnum_lshift(dst_reg
->var_off
, umin_val
);
3367 /* We may learn something more from the var_off */
3368 __update_reg_bounds(dst_reg
);
3371 if (umax_val
>= insn_bitness
) {
3372 /* Shifts greater than 31 or 63 are undefined.
3373 * This includes shifts by a negative number.
3375 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3378 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
3379 * be negative, then either:
3380 * 1) src_reg might be zero, so the sign bit of the result is
3381 * unknown, so we lose our signed bounds
3382 * 2) it's known negative, thus the unsigned bounds capture the
3384 * 3) the signed bounds cross zero, so they tell us nothing
3386 * If the value in dst_reg is known nonnegative, then again the
3387 * unsigned bounts capture the signed bounds.
3388 * Thus, in all cases it suffices to blow away our signed bounds
3389 * and rely on inferring new ones from the unsigned bounds and
3390 * var_off of the result.
3392 dst_reg
->smin_value
= S64_MIN
;
3393 dst_reg
->smax_value
= S64_MAX
;
3394 dst_reg
->var_off
= tnum_rshift(dst_reg
->var_off
, umin_val
);
3395 dst_reg
->umin_value
>>= umax_val
;
3396 dst_reg
->umax_value
>>= umin_val
;
3397 /* We may learn something more from the var_off */
3398 __update_reg_bounds(dst_reg
);
3401 if (umax_val
>= insn_bitness
) {
3402 /* Shifts greater than 31 or 63 are undefined.
3403 * This includes shifts by a negative number.
3405 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3409 /* Upon reaching here, src_known is true and
3410 * umax_val is equal to umin_val.
3412 dst_reg
->smin_value
>>= umin_val
;
3413 dst_reg
->smax_value
>>= umin_val
;
3414 dst_reg
->var_off
= tnum_arshift(dst_reg
->var_off
, umin_val
);
3416 /* blow away the dst_reg umin_value/umax_value and rely on
3417 * dst_reg var_off to refine the result.
3419 dst_reg
->umin_value
= 0;
3420 dst_reg
->umax_value
= U64_MAX
;
3421 __update_reg_bounds(dst_reg
);
3424 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3428 if (BPF_CLASS(insn
->code
) != BPF_ALU64
) {
3429 /* 32-bit ALU ops are (32,32)->32 */
3430 coerce_reg_to_size(dst_reg
, 4);
3433 __reg_deduce_bounds(dst_reg
);
3434 __reg_bound_offset(dst_reg
);
3438 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
3441 static int adjust_reg_min_max_vals(struct bpf_verifier_env
*env
,
3442 struct bpf_insn
*insn
)
3444 struct bpf_verifier_state
*vstate
= env
->cur_state
;
3445 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
3446 struct bpf_reg_state
*regs
= state
->regs
, *dst_reg
, *src_reg
;
3447 struct bpf_reg_state
*ptr_reg
= NULL
, off_reg
= {0};
3448 u8 opcode
= BPF_OP(insn
->code
);
3450 dst_reg
= ®s
[insn
->dst_reg
];
3452 if (dst_reg
->type
!= SCALAR_VALUE
)
3454 if (BPF_SRC(insn
->code
) == BPF_X
) {
3455 src_reg
= ®s
[insn
->src_reg
];
3456 if (src_reg
->type
!= SCALAR_VALUE
) {
3457 if (dst_reg
->type
!= SCALAR_VALUE
) {
3458 /* Combining two pointers by any ALU op yields
3459 * an arbitrary scalar. Disallow all math except
3460 * pointer subtraction
3462 if (opcode
== BPF_SUB
&& env
->allow_ptr_leaks
) {
3463 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3466 verbose(env
, "R%d pointer %s pointer prohibited\n",
3468 bpf_alu_string
[opcode
>> 4]);
3471 /* scalar += pointer
3472 * This is legal, but we have to reverse our
3473 * src/dest handling in computing the range
3475 return adjust_ptr_min_max_vals(env
, insn
,
3478 } else if (ptr_reg
) {
3479 /* pointer += scalar */
3480 return adjust_ptr_min_max_vals(env
, insn
,
3484 /* Pretend the src is a reg with a known value, since we only
3485 * need to be able to read from this state.
3487 off_reg
.type
= SCALAR_VALUE
;
3488 __mark_reg_known(&off_reg
, insn
->imm
);
3490 if (ptr_reg
) /* pointer += K */
3491 return adjust_ptr_min_max_vals(env
, insn
,
3495 /* Got here implies adding two SCALAR_VALUEs */
3496 if (WARN_ON_ONCE(ptr_reg
)) {
3497 print_verifier_state(env
, state
);
3498 verbose(env
, "verifier internal error: unexpected ptr_reg\n");
3501 if (WARN_ON(!src_reg
)) {
3502 print_verifier_state(env
, state
);
3503 verbose(env
, "verifier internal error: no src_reg\n");
3506 return adjust_scalar_min_max_vals(env
, insn
, dst_reg
, *src_reg
);
3509 /* check validity of 32-bit and 64-bit arithmetic operations */
3510 static int check_alu_op(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
3512 struct bpf_reg_state
*regs
= cur_regs(env
);
3513 u8 opcode
= BPF_OP(insn
->code
);
3516 if (opcode
== BPF_END
|| opcode
== BPF_NEG
) {
3517 if (opcode
== BPF_NEG
) {
3518 if (BPF_SRC(insn
->code
) != 0 ||
3519 insn
->src_reg
!= BPF_REG_0
||
3520 insn
->off
!= 0 || insn
->imm
!= 0) {
3521 verbose(env
, "BPF_NEG uses reserved fields\n");
3525 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
3526 (insn
->imm
!= 16 && insn
->imm
!= 32 && insn
->imm
!= 64) ||
3527 BPF_CLASS(insn
->code
) == BPF_ALU64
) {
3528 verbose(env
, "BPF_END uses reserved fields\n");
3533 /* check src operand */
3534 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
3538 if (is_pointer_value(env
, insn
->dst_reg
)) {
3539 verbose(env
, "R%d pointer arithmetic prohibited\n",
3544 /* check dest operand */
3545 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
3549 } else if (opcode
== BPF_MOV
) {
3551 if (BPF_SRC(insn
->code
) == BPF_X
) {
3552 if (insn
->imm
!= 0 || insn
->off
!= 0) {
3553 verbose(env
, "BPF_MOV uses reserved fields\n");
3557 /* check src operand */
3558 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3562 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
3563 verbose(env
, "BPF_MOV uses reserved fields\n");
3568 /* check dest operand, mark as required later */
3569 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
3573 if (BPF_SRC(insn
->code
) == BPF_X
) {
3574 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
3576 * copy register state to dest reg
3578 regs
[insn
->dst_reg
] = regs
[insn
->src_reg
];
3579 regs
[insn
->dst_reg
].live
|= REG_LIVE_WRITTEN
;
3582 if (is_pointer_value(env
, insn
->src_reg
)) {
3584 "R%d partial copy of pointer\n",
3588 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3589 coerce_reg_to_size(®s
[insn
->dst_reg
], 4);
3593 * remember the value we stored into this reg
3595 /* clear any state __mark_reg_known doesn't set */
3596 mark_reg_unknown(env
, regs
, insn
->dst_reg
);
3597 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
3598 if (BPF_CLASS(insn
->code
) == BPF_ALU64
) {
3599 __mark_reg_known(regs
+ insn
->dst_reg
,
3602 __mark_reg_known(regs
+ insn
->dst_reg
,
3607 } else if (opcode
> BPF_END
) {
3608 verbose(env
, "invalid BPF_ALU opcode %x\n", opcode
);
3611 } else { /* all other ALU ops: and, sub, xor, add, ... */
3613 if (BPF_SRC(insn
->code
) == BPF_X
) {
3614 if (insn
->imm
!= 0 || insn
->off
!= 0) {
3615 verbose(env
, "BPF_ALU uses reserved fields\n");
3618 /* check src1 operand */
3619 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
3623 if (insn
->src_reg
!= BPF_REG_0
|| insn
->off
!= 0) {
3624 verbose(env
, "BPF_ALU uses reserved fields\n");
3629 /* check src2 operand */
3630 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
3634 if ((opcode
== BPF_MOD
|| opcode
== BPF_DIV
) &&
3635 BPF_SRC(insn
->code
) == BPF_K
&& insn
->imm
== 0) {
3636 verbose(env
, "div by zero\n");
3640 if (opcode
== BPF_ARSH
&& BPF_CLASS(insn
->code
) != BPF_ALU64
) {
3641 verbose(env
, "BPF_ARSH not supported for 32 bit ALU\n");
3645 if ((opcode
== BPF_LSH
|| opcode
== BPF_RSH
||
3646 opcode
== BPF_ARSH
) && BPF_SRC(insn
->code
) == BPF_K
) {
3647 int size
= BPF_CLASS(insn
->code
) == BPF_ALU64
? 64 : 32;
3649 if (insn
->imm
< 0 || insn
->imm
>= size
) {
3650 verbose(env
, "invalid shift %d\n", insn
->imm
);
3655 /* check dest operand */
3656 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
3660 return adjust_reg_min_max_vals(env
, insn
);
3666 static void find_good_pkt_pointers(struct bpf_verifier_state
*vstate
,
3667 struct bpf_reg_state
*dst_reg
,
3668 enum bpf_reg_type type
,
3669 bool range_right_open
)
3671 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
3672 struct bpf_reg_state
*regs
= state
->regs
, *reg
;
3676 if (dst_reg
->off
< 0 ||
3677 (dst_reg
->off
== 0 && range_right_open
))
3678 /* This doesn't give us any range */
3681 if (dst_reg
->umax_value
> MAX_PACKET_OFF
||
3682 dst_reg
->umax_value
+ dst_reg
->off
> MAX_PACKET_OFF
)
3683 /* Risk of overflow. For instance, ptr + (1<<63) may be less
3684 * than pkt_end, but that's because it's also less than pkt.
3688 new_range
= dst_reg
->off
;
3689 if (range_right_open
)
3692 /* Examples for register markings:
3694 * pkt_data in dst register:
3698 * if (r2 > pkt_end) goto <handle exception>
3703 * if (r2 < pkt_end) goto <access okay>
3704 * <handle exception>
3707 * r2 == dst_reg, pkt_end == src_reg
3708 * r2=pkt(id=n,off=8,r=0)
3709 * r3=pkt(id=n,off=0,r=0)
3711 * pkt_data in src register:
3715 * if (pkt_end >= r2) goto <access okay>
3716 * <handle exception>
3720 * if (pkt_end <= r2) goto <handle exception>
3724 * pkt_end == dst_reg, r2 == src_reg
3725 * r2=pkt(id=n,off=8,r=0)
3726 * r3=pkt(id=n,off=0,r=0)
3728 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
3729 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
3730 * and [r3, r3 + 8-1) respectively is safe to access depending on
3734 /* If our ids match, then we must have the same max_value. And we
3735 * don't care about the other reg's fixed offset, since if it's too big
3736 * the range won't allow anything.
3737 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
3739 for (i
= 0; i
< MAX_BPF_REG
; i
++)
3740 if (regs
[i
].type
== type
&& regs
[i
].id
== dst_reg
->id
)
3741 /* keep the maximum range already checked */
3742 regs
[i
].range
= max(regs
[i
].range
, new_range
);
3744 for (j
= 0; j
<= vstate
->curframe
; j
++) {
3745 state
= vstate
->frame
[j
];
3746 bpf_for_each_spilled_reg(i
, state
, reg
) {
3749 if (reg
->type
== type
&& reg
->id
== dst_reg
->id
)
3750 reg
->range
= max(reg
->range
, new_range
);
3755 /* compute branch direction of the expression "if (reg opcode val) goto target;"
3757 * 1 - branch will be taken and "goto target" will be executed
3758 * 0 - branch will not be taken and fall-through to next insn
3759 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
3761 static int is_branch_taken(struct bpf_reg_state
*reg
, u64 val
, u8 opcode
)
3763 if (__is_pointer_value(false, reg
))
3768 if (tnum_is_const(reg
->var_off
))
3769 return !!tnum_equals_const(reg
->var_off
, val
);
3772 if (tnum_is_const(reg
->var_off
))
3773 return !tnum_equals_const(reg
->var_off
, val
);
3776 if (reg
->umin_value
> val
)
3778 else if (reg
->umax_value
<= val
)
3782 if (reg
->smin_value
> (s64
)val
)
3784 else if (reg
->smax_value
< (s64
)val
)
3788 if (reg
->umax_value
< val
)
3790 else if (reg
->umin_value
>= val
)
3794 if (reg
->smax_value
< (s64
)val
)
3796 else if (reg
->smin_value
>= (s64
)val
)
3800 if (reg
->umin_value
>= val
)
3802 else if (reg
->umax_value
< val
)
3806 if (reg
->smin_value
>= (s64
)val
)
3808 else if (reg
->smax_value
< (s64
)val
)
3812 if (reg
->umax_value
<= val
)
3814 else if (reg
->umin_value
> val
)
3818 if (reg
->smax_value
<= (s64
)val
)
3820 else if (reg
->smin_value
> (s64
)val
)
3828 /* Adjusts the register min/max values in the case that the dst_reg is the
3829 * variable register that we are working on, and src_reg is a constant or we're
3830 * simply doing a BPF_K check.
3831 * In JEQ/JNE cases we also adjust the var_off values.
3833 static void reg_set_min_max(struct bpf_reg_state
*true_reg
,
3834 struct bpf_reg_state
*false_reg
, u64 val
,
3837 /* If the dst_reg is a pointer, we can't learn anything about its
3838 * variable offset from the compare (unless src_reg were a pointer into
3839 * the same object, but we don't bother with that.
3840 * Since false_reg and true_reg have the same type by construction, we
3841 * only need to check one of them for pointerness.
3843 if (__is_pointer_value(false, false_reg
))
3848 /* If this is false then we know nothing Jon Snow, but if it is
3849 * true then we know for sure.
3851 __mark_reg_known(true_reg
, val
);
3854 /* If this is true we know nothing Jon Snow, but if it is false
3855 * we know the value for sure;
3857 __mark_reg_known(false_reg
, val
);
3860 false_reg
->umax_value
= min(false_reg
->umax_value
, val
);
3861 true_reg
->umin_value
= max(true_reg
->umin_value
, val
+ 1);
3864 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
);
3865 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
+ 1);
3868 false_reg
->umin_value
= max(false_reg
->umin_value
, val
);
3869 true_reg
->umax_value
= min(true_reg
->umax_value
, val
- 1);
3872 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
);
3873 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
- 1);
3876 false_reg
->umax_value
= min(false_reg
->umax_value
, val
- 1);
3877 true_reg
->umin_value
= max(true_reg
->umin_value
, val
);
3880 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
- 1);
3881 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
);
3884 false_reg
->umin_value
= max(false_reg
->umin_value
, val
+ 1);
3885 true_reg
->umax_value
= min(true_reg
->umax_value
, val
);
3888 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
+ 1);
3889 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
);
3895 __reg_deduce_bounds(false_reg
);
3896 __reg_deduce_bounds(true_reg
);
3897 /* We might have learned some bits from the bounds. */
3898 __reg_bound_offset(false_reg
);
3899 __reg_bound_offset(true_reg
);
3900 /* Intersecting with the old var_off might have improved our bounds
3901 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3902 * then new var_off is (0; 0x7f...fc) which improves our umax.
3904 __update_reg_bounds(false_reg
);
3905 __update_reg_bounds(true_reg
);
3908 /* Same as above, but for the case that dst_reg holds a constant and src_reg is
3911 static void reg_set_min_max_inv(struct bpf_reg_state
*true_reg
,
3912 struct bpf_reg_state
*false_reg
, u64 val
,
3915 if (__is_pointer_value(false, false_reg
))
3920 /* If this is false then we know nothing Jon Snow, but if it is
3921 * true then we know for sure.
3923 __mark_reg_known(true_reg
, val
);
3926 /* If this is true we know nothing Jon Snow, but if it is false
3927 * we know the value for sure;
3929 __mark_reg_known(false_reg
, val
);
3932 true_reg
->umax_value
= min(true_reg
->umax_value
, val
- 1);
3933 false_reg
->umin_value
= max(false_reg
->umin_value
, val
);
3936 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
- 1);
3937 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
);
3940 true_reg
->umin_value
= max(true_reg
->umin_value
, val
+ 1);
3941 false_reg
->umax_value
= min(false_reg
->umax_value
, val
);
3944 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
+ 1);
3945 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
);
3948 true_reg
->umax_value
= min(true_reg
->umax_value
, val
);
3949 false_reg
->umin_value
= max(false_reg
->umin_value
, val
+ 1);
3952 true_reg
->smax_value
= min_t(s64
, true_reg
->smax_value
, val
);
3953 false_reg
->smin_value
= max_t(s64
, false_reg
->smin_value
, val
+ 1);
3956 true_reg
->umin_value
= max(true_reg
->umin_value
, val
);
3957 false_reg
->umax_value
= min(false_reg
->umax_value
, val
- 1);
3960 true_reg
->smin_value
= max_t(s64
, true_reg
->smin_value
, val
);
3961 false_reg
->smax_value
= min_t(s64
, false_reg
->smax_value
, val
- 1);
3967 __reg_deduce_bounds(false_reg
);
3968 __reg_deduce_bounds(true_reg
);
3969 /* We might have learned some bits from the bounds. */
3970 __reg_bound_offset(false_reg
);
3971 __reg_bound_offset(true_reg
);
3972 /* Intersecting with the old var_off might have improved our bounds
3973 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
3974 * then new var_off is (0; 0x7f...fc) which improves our umax.
3976 __update_reg_bounds(false_reg
);
3977 __update_reg_bounds(true_reg
);
3980 /* Regs are known to be equal, so intersect their min/max/var_off */
3981 static void __reg_combine_min_max(struct bpf_reg_state
*src_reg
,
3982 struct bpf_reg_state
*dst_reg
)
3984 src_reg
->umin_value
= dst_reg
->umin_value
= max(src_reg
->umin_value
,
3985 dst_reg
->umin_value
);
3986 src_reg
->umax_value
= dst_reg
->umax_value
= min(src_reg
->umax_value
,
3987 dst_reg
->umax_value
);
3988 src_reg
->smin_value
= dst_reg
->smin_value
= max(src_reg
->smin_value
,
3989 dst_reg
->smin_value
);
3990 src_reg
->smax_value
= dst_reg
->smax_value
= min(src_reg
->smax_value
,
3991 dst_reg
->smax_value
);
3992 src_reg
->var_off
= dst_reg
->var_off
= tnum_intersect(src_reg
->var_off
,
3994 /* We might have learned new bounds from the var_off. */
3995 __update_reg_bounds(src_reg
);
3996 __update_reg_bounds(dst_reg
);
3997 /* We might have learned something about the sign bit. */
3998 __reg_deduce_bounds(src_reg
);
3999 __reg_deduce_bounds(dst_reg
);
4000 /* We might have learned some bits from the bounds. */
4001 __reg_bound_offset(src_reg
);
4002 __reg_bound_offset(dst_reg
);
4003 /* Intersecting with the old var_off might have improved our bounds
4004 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4005 * then new var_off is (0; 0x7f...fc) which improves our umax.
4007 __update_reg_bounds(src_reg
);
4008 __update_reg_bounds(dst_reg
);
4011 static void reg_combine_min_max(struct bpf_reg_state
*true_src
,
4012 struct bpf_reg_state
*true_dst
,
4013 struct bpf_reg_state
*false_src
,
4014 struct bpf_reg_state
*false_dst
,
4019 __reg_combine_min_max(true_src
, true_dst
);
4022 __reg_combine_min_max(false_src
, false_dst
);
4027 static void mark_ptr_or_null_reg(struct bpf_func_state
*state
,
4028 struct bpf_reg_state
*reg
, u32 id
,
4031 if (reg_type_may_be_null(reg
->type
) && reg
->id
== id
) {
4032 /* Old offset (both fixed and variable parts) should
4033 * have been known-zero, because we don't allow pointer
4034 * arithmetic on pointers that might be NULL.
4036 if (WARN_ON_ONCE(reg
->smin_value
|| reg
->smax_value
||
4037 !tnum_equals_const(reg
->var_off
, 0) ||
4039 __mark_reg_known_zero(reg
);
4043 reg
->type
= SCALAR_VALUE
;
4044 } else if (reg
->type
== PTR_TO_MAP_VALUE_OR_NULL
) {
4045 if (reg
->map_ptr
->inner_map_meta
) {
4046 reg
->type
= CONST_PTR_TO_MAP
;
4047 reg
->map_ptr
= reg
->map_ptr
->inner_map_meta
;
4049 reg
->type
= PTR_TO_MAP_VALUE
;
4051 } else if (reg
->type
== PTR_TO_SOCKET_OR_NULL
) {
4052 reg
->type
= PTR_TO_SOCKET
;
4054 if (is_null
|| !reg_is_refcounted(reg
)) {
4055 /* We don't need id from this point onwards anymore,
4056 * thus we should better reset it, so that state
4057 * pruning has chances to take effect.
4064 /* The logic is similar to find_good_pkt_pointers(), both could eventually
4065 * be folded together at some point.
4067 static void mark_ptr_or_null_regs(struct bpf_verifier_state
*vstate
, u32 regno
,
4070 struct bpf_func_state
*state
= vstate
->frame
[vstate
->curframe
];
4071 struct bpf_reg_state
*reg
, *regs
= state
->regs
;
4072 u32 id
= regs
[regno
].id
;
4075 if (reg_is_refcounted_or_null(®s
[regno
]) && is_null
)
4076 __release_reference_state(state
, id
);
4078 for (i
= 0; i
< MAX_BPF_REG
; i
++)
4079 mark_ptr_or_null_reg(state
, ®s
[i
], id
, is_null
);
4081 for (j
= 0; j
<= vstate
->curframe
; j
++) {
4082 state
= vstate
->frame
[j
];
4083 bpf_for_each_spilled_reg(i
, state
, reg
) {
4086 mark_ptr_or_null_reg(state
, reg
, id
, is_null
);
4091 static bool try_match_pkt_pointers(const struct bpf_insn
*insn
,
4092 struct bpf_reg_state
*dst_reg
,
4093 struct bpf_reg_state
*src_reg
,
4094 struct bpf_verifier_state
*this_branch
,
4095 struct bpf_verifier_state
*other_branch
)
4097 if (BPF_SRC(insn
->code
) != BPF_X
)
4100 switch (BPF_OP(insn
->code
)) {
4102 if ((dst_reg
->type
== PTR_TO_PACKET
&&
4103 src_reg
->type
== PTR_TO_PACKET_END
) ||
4104 (dst_reg
->type
== PTR_TO_PACKET_META
&&
4105 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
4106 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
4107 find_good_pkt_pointers(this_branch
, dst_reg
,
4108 dst_reg
->type
, false);
4109 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
4110 src_reg
->type
== PTR_TO_PACKET
) ||
4111 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
4112 src_reg
->type
== PTR_TO_PACKET_META
)) {
4113 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
4114 find_good_pkt_pointers(other_branch
, src_reg
,
4115 src_reg
->type
, true);
4121 if ((dst_reg
->type
== PTR_TO_PACKET
&&
4122 src_reg
->type
== PTR_TO_PACKET_END
) ||
4123 (dst_reg
->type
== PTR_TO_PACKET_META
&&
4124 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
4125 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
4126 find_good_pkt_pointers(other_branch
, dst_reg
,
4127 dst_reg
->type
, true);
4128 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
4129 src_reg
->type
== PTR_TO_PACKET
) ||
4130 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
4131 src_reg
->type
== PTR_TO_PACKET_META
)) {
4132 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
4133 find_good_pkt_pointers(this_branch
, src_reg
,
4134 src_reg
->type
, false);
4140 if ((dst_reg
->type
== PTR_TO_PACKET
&&
4141 src_reg
->type
== PTR_TO_PACKET_END
) ||
4142 (dst_reg
->type
== PTR_TO_PACKET_META
&&
4143 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
4144 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
4145 find_good_pkt_pointers(this_branch
, dst_reg
,
4146 dst_reg
->type
, true);
4147 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
4148 src_reg
->type
== PTR_TO_PACKET
) ||
4149 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
4150 src_reg
->type
== PTR_TO_PACKET_META
)) {
4151 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
4152 find_good_pkt_pointers(other_branch
, src_reg
,
4153 src_reg
->type
, false);
4159 if ((dst_reg
->type
== PTR_TO_PACKET
&&
4160 src_reg
->type
== PTR_TO_PACKET_END
) ||
4161 (dst_reg
->type
== PTR_TO_PACKET_META
&&
4162 reg_is_init_pkt_pointer(src_reg
, PTR_TO_PACKET
))) {
4163 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
4164 find_good_pkt_pointers(other_branch
, dst_reg
,
4165 dst_reg
->type
, false);
4166 } else if ((dst_reg
->type
== PTR_TO_PACKET_END
&&
4167 src_reg
->type
== PTR_TO_PACKET
) ||
4168 (reg_is_init_pkt_pointer(dst_reg
, PTR_TO_PACKET
) &&
4169 src_reg
->type
== PTR_TO_PACKET_META
)) {
4170 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
4171 find_good_pkt_pointers(this_branch
, src_reg
,
4172 src_reg
->type
, true);
4184 static int check_cond_jmp_op(struct bpf_verifier_env
*env
,
4185 struct bpf_insn
*insn
, int *insn_idx
)
4187 struct bpf_verifier_state
*this_branch
= env
->cur_state
;
4188 struct bpf_verifier_state
*other_branch
;
4189 struct bpf_reg_state
*regs
= this_branch
->frame
[this_branch
->curframe
]->regs
;
4190 struct bpf_reg_state
*dst_reg
, *other_branch_regs
;
4191 u8 opcode
= BPF_OP(insn
->code
);
4194 if (opcode
> BPF_JSLE
) {
4195 verbose(env
, "invalid BPF_JMP opcode %x\n", opcode
);
4199 if (BPF_SRC(insn
->code
) == BPF_X
) {
4200 if (insn
->imm
!= 0) {
4201 verbose(env
, "BPF_JMP uses reserved fields\n");
4205 /* check src1 operand */
4206 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
4210 if (is_pointer_value(env
, insn
->src_reg
)) {
4211 verbose(env
, "R%d pointer comparison prohibited\n",
4216 if (insn
->src_reg
!= BPF_REG_0
) {
4217 verbose(env
, "BPF_JMP uses reserved fields\n");
4222 /* check src2 operand */
4223 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
4227 dst_reg
= ®s
[insn
->dst_reg
];
4229 if (BPF_SRC(insn
->code
) == BPF_K
) {
4230 int pred
= is_branch_taken(dst_reg
, insn
->imm
, opcode
);
4233 /* only follow the goto, ignore fall-through */
4234 *insn_idx
+= insn
->off
;
4236 } else if (pred
== 0) {
4237 /* only follow fall-through branch, since
4238 * that's where the program will go
4244 other_branch
= push_stack(env
, *insn_idx
+ insn
->off
+ 1, *insn_idx
);
4247 other_branch_regs
= other_branch
->frame
[other_branch
->curframe
]->regs
;
4249 /* detect if we are comparing against a constant value so we can adjust
4250 * our min/max values for our dst register.
4251 * this is only legit if both are scalars (or pointers to the same
4252 * object, I suppose, but we don't support that right now), because
4253 * otherwise the different base pointers mean the offsets aren't
4256 if (BPF_SRC(insn
->code
) == BPF_X
) {
4257 if (dst_reg
->type
== SCALAR_VALUE
&&
4258 regs
[insn
->src_reg
].type
== SCALAR_VALUE
) {
4259 if (tnum_is_const(regs
[insn
->src_reg
].var_off
))
4260 reg_set_min_max(&other_branch_regs
[insn
->dst_reg
],
4261 dst_reg
, regs
[insn
->src_reg
].var_off
.value
,
4263 else if (tnum_is_const(dst_reg
->var_off
))
4264 reg_set_min_max_inv(&other_branch_regs
[insn
->src_reg
],
4265 ®s
[insn
->src_reg
],
4266 dst_reg
->var_off
.value
, opcode
);
4267 else if (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
)
4268 /* Comparing for equality, we can combine knowledge */
4269 reg_combine_min_max(&other_branch_regs
[insn
->src_reg
],
4270 &other_branch_regs
[insn
->dst_reg
],
4271 ®s
[insn
->src_reg
],
4272 ®s
[insn
->dst_reg
], opcode
);
4274 } else if (dst_reg
->type
== SCALAR_VALUE
) {
4275 reg_set_min_max(&other_branch_regs
[insn
->dst_reg
],
4276 dst_reg
, insn
->imm
, opcode
);
4279 /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */
4280 if (BPF_SRC(insn
->code
) == BPF_K
&&
4281 insn
->imm
== 0 && (opcode
== BPF_JEQ
|| opcode
== BPF_JNE
) &&
4282 reg_type_may_be_null(dst_reg
->type
)) {
4283 /* Mark all identical registers in each branch as either
4284 * safe or unknown depending R == 0 or R != 0 conditional.
4286 mark_ptr_or_null_regs(this_branch
, insn
->dst_reg
,
4288 mark_ptr_or_null_regs(other_branch
, insn
->dst_reg
,
4290 } else if (!try_match_pkt_pointers(insn
, dst_reg
, ®s
[insn
->src_reg
],
4291 this_branch
, other_branch
) &&
4292 is_pointer_value(env
, insn
->dst_reg
)) {
4293 verbose(env
, "R%d pointer comparison prohibited\n",
4298 print_verifier_state(env
, this_branch
->frame
[this_branch
->curframe
]);
4302 /* return the map pointer stored inside BPF_LD_IMM64 instruction */
4303 static struct bpf_map
*ld_imm64_to_map_ptr(struct bpf_insn
*insn
)
4305 u64 imm64
= ((u64
) (u32
) insn
[0].imm
) | ((u64
) (u32
) insn
[1].imm
) << 32;
4307 return (struct bpf_map
*) (unsigned long) imm64
;
4310 /* verify BPF_LD_IMM64 instruction */
4311 static int check_ld_imm(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
4313 struct bpf_reg_state
*regs
= cur_regs(env
);
4316 if (BPF_SIZE(insn
->code
) != BPF_DW
) {
4317 verbose(env
, "invalid BPF_LD_IMM insn\n");
4320 if (insn
->off
!= 0) {
4321 verbose(env
, "BPF_LD_IMM64 uses reserved fields\n");
4325 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP
);
4329 if (insn
->src_reg
== 0) {
4330 u64 imm
= ((u64
)(insn
+ 1)->imm
<< 32) | (u32
)insn
->imm
;
4332 regs
[insn
->dst_reg
].type
= SCALAR_VALUE
;
4333 __mark_reg_known(®s
[insn
->dst_reg
], imm
);
4337 /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */
4338 BUG_ON(insn
->src_reg
!= BPF_PSEUDO_MAP_FD
);
4340 regs
[insn
->dst_reg
].type
= CONST_PTR_TO_MAP
;
4341 regs
[insn
->dst_reg
].map_ptr
= ld_imm64_to_map_ptr(insn
);
4345 static bool may_access_skb(enum bpf_prog_type type
)
4348 case BPF_PROG_TYPE_SOCKET_FILTER
:
4349 case BPF_PROG_TYPE_SCHED_CLS
:
4350 case BPF_PROG_TYPE_SCHED_ACT
:
4357 /* verify safety of LD_ABS|LD_IND instructions:
4358 * - they can only appear in the programs where ctx == skb
4359 * - since they are wrappers of function calls, they scratch R1-R5 registers,
4360 * preserve R6-R9, and store return value into R0
4363 * ctx == skb == R6 == CTX
4366 * SRC == any register
4367 * IMM == 32-bit immediate
4370 * R0 - 8/16/32-bit skb data converted to cpu endianness
4372 static int check_ld_abs(struct bpf_verifier_env
*env
, struct bpf_insn
*insn
)
4374 struct bpf_reg_state
*regs
= cur_regs(env
);
4375 u8 mode
= BPF_MODE(insn
->code
);
4378 if (!may_access_skb(env
->prog
->type
)) {
4379 verbose(env
, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
4383 if (!env
->ops
->gen_ld_abs
) {
4384 verbose(env
, "bpf verifier is misconfigured\n");
4388 if (env
->subprog_cnt
> 1) {
4389 /* when program has LD_ABS insn JITs and interpreter assume
4390 * that r1 == ctx == skb which is not the case for callees
4391 * that can have arbitrary arguments. It's problematic
4392 * for main prog as well since JITs would need to analyze
4393 * all functions in order to make proper register save/restore
4394 * decisions in the main prog. Hence disallow LD_ABS with calls
4396 verbose(env
, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
4400 if (insn
->dst_reg
!= BPF_REG_0
|| insn
->off
!= 0 ||
4401 BPF_SIZE(insn
->code
) == BPF_DW
||
4402 (mode
== BPF_ABS
&& insn
->src_reg
!= BPF_REG_0
)) {
4403 verbose(env
, "BPF_LD_[ABS|IND] uses reserved fields\n");
4407 /* check whether implicit source operand (register R6) is readable */
4408 err
= check_reg_arg(env
, BPF_REG_6
, SRC_OP
);
4412 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
4413 * gen_ld_abs() may terminate the program at runtime, leading to
4416 err
= check_reference_leak(env
);
4418 verbose(env
, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
4422 if (regs
[BPF_REG_6
].type
!= PTR_TO_CTX
) {
4424 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
4428 if (mode
== BPF_IND
) {
4429 /* check explicit source operand */
4430 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
4435 /* reset caller saved regs to unreadable */
4436 for (i
= 0; i
< CALLER_SAVED_REGS
; i
++) {
4437 mark_reg_not_init(env
, regs
, caller_saved
[i
]);
4438 check_reg_arg(env
, caller_saved
[i
], DST_OP_NO_MARK
);
4441 /* mark destination R0 register as readable, since it contains
4442 * the value fetched from the packet.
4443 * Already marked as written above.
4445 mark_reg_unknown(env
, regs
, BPF_REG_0
);
4449 static int check_return_code(struct bpf_verifier_env
*env
)
4451 struct bpf_reg_state
*reg
;
4452 struct tnum range
= tnum_range(0, 1);
4454 switch (env
->prog
->type
) {
4455 case BPF_PROG_TYPE_CGROUP_SKB
:
4456 case BPF_PROG_TYPE_CGROUP_SOCK
:
4457 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR
:
4458 case BPF_PROG_TYPE_SOCK_OPS
:
4459 case BPF_PROG_TYPE_CGROUP_DEVICE
:
4465 reg
= cur_regs(env
) + BPF_REG_0
;
4466 if (reg
->type
!= SCALAR_VALUE
) {
4467 verbose(env
, "At program exit the register R0 is not a known value (%s)\n",
4468 reg_type_str
[reg
->type
]);
4472 if (!tnum_in(range
, reg
->var_off
)) {
4473 verbose(env
, "At program exit the register R0 ");
4474 if (!tnum_is_unknown(reg
->var_off
)) {
4477 tnum_strn(tn_buf
, sizeof(tn_buf
), reg
->var_off
);
4478 verbose(env
, "has value %s", tn_buf
);
4480 verbose(env
, "has unknown scalar value");
4482 verbose(env
, " should have been 0 or 1\n");
4488 /* non-recursive DFS pseudo code
4489 * 1 procedure DFS-iterative(G,v):
4490 * 2 label v as discovered
4491 * 3 let S be a stack
4493 * 5 while S is not empty
4495 * 7 if t is what we're looking for:
4497 * 9 for all edges e in G.adjacentEdges(t) do
4498 * 10 if edge e is already labelled
4499 * 11 continue with the next edge
4500 * 12 w <- G.adjacentVertex(t,e)
4501 * 13 if vertex w is not discovered and not explored
4502 * 14 label e as tree-edge
4503 * 15 label w as discovered
4506 * 18 else if vertex w is discovered
4507 * 19 label e as back-edge
4509 * 21 // vertex w is explored
4510 * 22 label e as forward- or cross-edge
4511 * 23 label t as explored
4516 * 0x11 - discovered and fall-through edge labelled
4517 * 0x12 - discovered and fall-through and branch edges labelled
4528 #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L)
4530 static int *insn_stack
; /* stack of insns to process */
4531 static int cur_stack
; /* current stack index */
4532 static int *insn_state
;
4534 /* t, w, e - match pseudo-code above:
4535 * t - index of current instruction
4536 * w - next instruction
4539 static int push_insn(int t
, int w
, int e
, struct bpf_verifier_env
*env
)
4541 if (e
== FALLTHROUGH
&& insn_state
[t
] >= (DISCOVERED
| FALLTHROUGH
))
4544 if (e
== BRANCH
&& insn_state
[t
] >= (DISCOVERED
| BRANCH
))
4547 if (w
< 0 || w
>= env
->prog
->len
) {
4548 verbose(env
, "jump out of range from insn %d to %d\n", t
, w
);
4553 /* mark branch target for state pruning */
4554 env
->explored_states
[w
] = STATE_LIST_MARK
;
4556 if (insn_state
[w
] == 0) {
4558 insn_state
[t
] = DISCOVERED
| e
;
4559 insn_state
[w
] = DISCOVERED
;
4560 if (cur_stack
>= env
->prog
->len
)
4562 insn_stack
[cur_stack
++] = w
;
4564 } else if ((insn_state
[w
] & 0xF0) == DISCOVERED
) {
4565 verbose(env
, "back-edge from insn %d to %d\n", t
, w
);
4567 } else if (insn_state
[w
] == EXPLORED
) {
4568 /* forward- or cross-edge */
4569 insn_state
[t
] = DISCOVERED
| e
;
4571 verbose(env
, "insn state internal bug\n");
4577 /* non-recursive depth-first-search to detect loops in BPF program
4578 * loop == back-edge in directed graph
4580 static int check_cfg(struct bpf_verifier_env
*env
)
4582 struct bpf_insn
*insns
= env
->prog
->insnsi
;
4583 int insn_cnt
= env
->prog
->len
;
4587 ret
= check_subprogs(env
);
4591 insn_state
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
4595 insn_stack
= kcalloc(insn_cnt
, sizeof(int), GFP_KERNEL
);
4601 insn_state
[0] = DISCOVERED
; /* mark 1st insn as discovered */
4602 insn_stack
[0] = 0; /* 0 is the first instruction */
4608 t
= insn_stack
[cur_stack
- 1];
4610 if (BPF_CLASS(insns
[t
].code
) == BPF_JMP
) {
4611 u8 opcode
= BPF_OP(insns
[t
].code
);
4613 if (opcode
== BPF_EXIT
) {
4615 } else if (opcode
== BPF_CALL
) {
4616 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
4621 if (t
+ 1 < insn_cnt
)
4622 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
4623 if (insns
[t
].src_reg
== BPF_PSEUDO_CALL
) {
4624 env
->explored_states
[t
] = STATE_LIST_MARK
;
4625 ret
= push_insn(t
, t
+ insns
[t
].imm
+ 1, BRANCH
, env
);
4631 } else if (opcode
== BPF_JA
) {
4632 if (BPF_SRC(insns
[t
].code
) != BPF_K
) {
4636 /* unconditional jump with single edge */
4637 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1,
4643 /* tell verifier to check for equivalent states
4644 * after every call and jump
4646 if (t
+ 1 < insn_cnt
)
4647 env
->explored_states
[t
+ 1] = STATE_LIST_MARK
;
4649 /* conditional jump with two edges */
4650 env
->explored_states
[t
] = STATE_LIST_MARK
;
4651 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
4657 ret
= push_insn(t
, t
+ insns
[t
].off
+ 1, BRANCH
, env
);
4664 /* all other non-branch instructions with single
4667 ret
= push_insn(t
, t
+ 1, FALLTHROUGH
, env
);
4675 insn_state
[t
] = EXPLORED
;
4676 if (cur_stack
-- <= 0) {
4677 verbose(env
, "pop stack internal bug\n");
4684 for (i
= 0; i
< insn_cnt
; i
++) {
4685 if (insn_state
[i
] != EXPLORED
) {
4686 verbose(env
, "unreachable insn %d\n", i
);
4691 ret
= 0; /* cfg looks good */
4699 /* check %cur's range satisfies %old's */
4700 static bool range_within(struct bpf_reg_state
*old
,
4701 struct bpf_reg_state
*cur
)
4703 return old
->umin_value
<= cur
->umin_value
&&
4704 old
->umax_value
>= cur
->umax_value
&&
4705 old
->smin_value
<= cur
->smin_value
&&
4706 old
->smax_value
>= cur
->smax_value
;
4709 /* Maximum number of register states that can exist at once */
4710 #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
4716 /* If in the old state two registers had the same id, then they need to have
4717 * the same id in the new state as well. But that id could be different from
4718 * the old state, so we need to track the mapping from old to new ids.
4719 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
4720 * regs with old id 5 must also have new id 9 for the new state to be safe. But
4721 * regs with a different old id could still have new id 9, we don't care about
4723 * So we look through our idmap to see if this old id has been seen before. If
4724 * so, we require the new id to match; otherwise, we add the id pair to the map.
4726 static bool check_ids(u32 old_id
, u32 cur_id
, struct idpair
*idmap
)
4730 for (i
= 0; i
< ID_MAP_SIZE
; i
++) {
4731 if (!idmap
[i
].old
) {
4732 /* Reached an empty slot; haven't seen this id before */
4733 idmap
[i
].old
= old_id
;
4734 idmap
[i
].cur
= cur_id
;
4737 if (idmap
[i
].old
== old_id
)
4738 return idmap
[i
].cur
== cur_id
;
4740 /* We ran out of idmap slots, which should be impossible */
4745 /* Returns true if (rold safe implies rcur safe) */
4746 static bool regsafe(struct bpf_reg_state
*rold
, struct bpf_reg_state
*rcur
,
4747 struct idpair
*idmap
)
4751 if (!(rold
->live
& REG_LIVE_READ
))
4752 /* explored state didn't use this */
4755 equal
= memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, parent
)) == 0;
4757 if (rold
->type
== PTR_TO_STACK
)
4758 /* two stack pointers are equal only if they're pointing to
4759 * the same stack frame, since fp-8 in foo != fp-8 in bar
4761 return equal
&& rold
->frameno
== rcur
->frameno
;
4766 if (rold
->type
== NOT_INIT
)
4767 /* explored state can't have used this */
4769 if (rcur
->type
== NOT_INIT
)
4771 switch (rold
->type
) {
4773 if (rcur
->type
== SCALAR_VALUE
) {
4774 /* new val must satisfy old val knowledge */
4775 return range_within(rold
, rcur
) &&
4776 tnum_in(rold
->var_off
, rcur
->var_off
);
4778 /* We're trying to use a pointer in place of a scalar.
4779 * Even if the scalar was unbounded, this could lead to
4780 * pointer leaks because scalars are allowed to leak
4781 * while pointers are not. We could make this safe in
4782 * special cases if root is calling us, but it's
4783 * probably not worth the hassle.
4787 case PTR_TO_MAP_VALUE
:
4788 /* If the new min/max/var_off satisfy the old ones and
4789 * everything else matches, we are OK.
4790 * We don't care about the 'id' value, because nothing
4791 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
4793 return memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)) == 0 &&
4794 range_within(rold
, rcur
) &&
4795 tnum_in(rold
->var_off
, rcur
->var_off
);
4796 case PTR_TO_MAP_VALUE_OR_NULL
:
4797 /* a PTR_TO_MAP_VALUE could be safe to use as a
4798 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
4799 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
4800 * checked, doing so could have affected others with the same
4801 * id, and we can't check for that because we lost the id when
4802 * we converted to a PTR_TO_MAP_VALUE.
4804 if (rcur
->type
!= PTR_TO_MAP_VALUE_OR_NULL
)
4806 if (memcmp(rold
, rcur
, offsetof(struct bpf_reg_state
, id
)))
4808 /* Check our ids match any regs they're supposed to */
4809 return check_ids(rold
->id
, rcur
->id
, idmap
);
4810 case PTR_TO_PACKET_META
:
4812 if (rcur
->type
!= rold
->type
)
4814 /* We must have at least as much range as the old ptr
4815 * did, so that any accesses which were safe before are
4816 * still safe. This is true even if old range < old off,
4817 * since someone could have accessed through (ptr - k), or
4818 * even done ptr -= k in a register, to get a safe access.
4820 if (rold
->range
> rcur
->range
)
4822 /* If the offsets don't match, we can't trust our alignment;
4823 * nor can we be sure that we won't fall out of range.
4825 if (rold
->off
!= rcur
->off
)
4827 /* id relations must be preserved */
4828 if (rold
->id
&& !check_ids(rold
->id
, rcur
->id
, idmap
))
4830 /* new val must satisfy old val knowledge */
4831 return range_within(rold
, rcur
) &&
4832 tnum_in(rold
->var_off
, rcur
->var_off
);
4834 case CONST_PTR_TO_MAP
:
4835 case PTR_TO_PACKET_END
:
4836 case PTR_TO_FLOW_KEYS
:
4838 case PTR_TO_SOCKET_OR_NULL
:
4839 /* Only valid matches are exact, which memcmp() above
4840 * would have accepted
4843 /* Don't know what's going on, just say it's not safe */
4847 /* Shouldn't get here; if we do, say it's not safe */
4852 static bool stacksafe(struct bpf_func_state
*old
,
4853 struct bpf_func_state
*cur
,
4854 struct idpair
*idmap
)
4858 /* if explored stack has more populated slots than current stack
4859 * such stacks are not equivalent
4861 if (old
->allocated_stack
> cur
->allocated_stack
)
4864 /* walk slots of the explored stack and ignore any additional
4865 * slots in the current stack, since explored(safe) state
4868 for (i
= 0; i
< old
->allocated_stack
; i
++) {
4869 spi
= i
/ BPF_REG_SIZE
;
4871 if (!(old
->stack
[spi
].spilled_ptr
.live
& REG_LIVE_READ
))
4872 /* explored state didn't use this */
4875 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_INVALID
)
4877 /* if old state was safe with misc data in the stack
4878 * it will be safe with zero-initialized stack.
4879 * The opposite is not true
4881 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_MISC
&&
4882 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] == STACK_ZERO
)
4884 if (old
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
] !=
4885 cur
->stack
[spi
].slot_type
[i
% BPF_REG_SIZE
])
4886 /* Ex: old explored (safe) state has STACK_SPILL in
4887 * this stack slot, but current has has STACK_MISC ->
4888 * this verifier states are not equivalent,
4889 * return false to continue verification of this path
4892 if (i
% BPF_REG_SIZE
)
4894 if (old
->stack
[spi
].slot_type
[0] != STACK_SPILL
)
4896 if (!regsafe(&old
->stack
[spi
].spilled_ptr
,
4897 &cur
->stack
[spi
].spilled_ptr
,
4899 /* when explored and current stack slot are both storing
4900 * spilled registers, check that stored pointers types
4901 * are the same as well.
4902 * Ex: explored safe path could have stored
4903 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
4904 * but current path has stored:
4905 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
4906 * such verifier states are not equivalent.
4907 * return false to continue verification of this path
4914 static bool refsafe(struct bpf_func_state
*old
, struct bpf_func_state
*cur
)
4916 if (old
->acquired_refs
!= cur
->acquired_refs
)
4918 return !memcmp(old
->refs
, cur
->refs
,
4919 sizeof(*old
->refs
) * old
->acquired_refs
);
4922 /* compare two verifier states
4924 * all states stored in state_list are known to be valid, since
4925 * verifier reached 'bpf_exit' instruction through them
4927 * this function is called when verifier exploring different branches of
4928 * execution popped from the state stack. If it sees an old state that has
4929 * more strict register state and more strict stack state then this execution
4930 * branch doesn't need to be explored further, since verifier already
4931 * concluded that more strict state leads to valid finish.
4933 * Therefore two states are equivalent if register state is more conservative
4934 * and explored stack state is more conservative than the current one.
4937 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
4938 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
4940 * In other words if current stack state (one being explored) has more
4941 * valid slots than old one that already passed validation, it means
4942 * the verifier can stop exploring and conclude that current state is valid too
4944 * Similarly with registers. If explored state has register type as invalid
4945 * whereas register type in current state is meaningful, it means that
4946 * the current state will reach 'bpf_exit' instruction safely
4948 static bool func_states_equal(struct bpf_func_state
*old
,
4949 struct bpf_func_state
*cur
)
4951 struct idpair
*idmap
;
4955 idmap
= kcalloc(ID_MAP_SIZE
, sizeof(struct idpair
), GFP_KERNEL
);
4956 /* If we failed to allocate the idmap, just say it's not safe */
4960 for (i
= 0; i
< MAX_BPF_REG
; i
++) {
4961 if (!regsafe(&old
->regs
[i
], &cur
->regs
[i
], idmap
))
4965 if (!stacksafe(old
, cur
, idmap
))
4968 if (!refsafe(old
, cur
))
4976 static bool states_equal(struct bpf_verifier_env
*env
,
4977 struct bpf_verifier_state
*old
,
4978 struct bpf_verifier_state
*cur
)
4982 if (old
->curframe
!= cur
->curframe
)
4985 /* for states to be equal callsites have to be the same
4986 * and all frame states need to be equivalent
4988 for (i
= 0; i
<= old
->curframe
; i
++) {
4989 if (old
->frame
[i
]->callsite
!= cur
->frame
[i
]->callsite
)
4991 if (!func_states_equal(old
->frame
[i
], cur
->frame
[i
]))
4997 /* A write screens off any subsequent reads; but write marks come from the
4998 * straight-line code between a state and its parent. When we arrive at an
4999 * equivalent state (jump target or such) we didn't arrive by the straight-line
5000 * code, so read marks in the state must propagate to the parent regardless
5001 * of the state's write marks. That's what 'parent == state->parent' comparison
5002 * in mark_reg_read() is for.
5004 static int propagate_liveness(struct bpf_verifier_env
*env
,
5005 const struct bpf_verifier_state
*vstate
,
5006 struct bpf_verifier_state
*vparent
)
5008 int i
, frame
, err
= 0;
5009 struct bpf_func_state
*state
, *parent
;
5011 if (vparent
->curframe
!= vstate
->curframe
) {
5012 WARN(1, "propagate_live: parent frame %d current frame %d\n",
5013 vparent
->curframe
, vstate
->curframe
);
5016 /* Propagate read liveness of registers... */
5017 BUILD_BUG_ON(BPF_REG_FP
+ 1 != MAX_BPF_REG
);
5018 /* We don't need to worry about FP liveness because it's read-only */
5019 for (i
= 0; i
< BPF_REG_FP
; i
++) {
5020 if (vparent
->frame
[vparent
->curframe
]->regs
[i
].live
& REG_LIVE_READ
)
5022 if (vstate
->frame
[vstate
->curframe
]->regs
[i
].live
& REG_LIVE_READ
) {
5023 err
= mark_reg_read(env
, &vstate
->frame
[vstate
->curframe
]->regs
[i
],
5024 &vparent
->frame
[vstate
->curframe
]->regs
[i
]);
5030 /* ... and stack slots */
5031 for (frame
= 0; frame
<= vstate
->curframe
; frame
++) {
5032 state
= vstate
->frame
[frame
];
5033 parent
= vparent
->frame
[frame
];
5034 for (i
= 0; i
< state
->allocated_stack
/ BPF_REG_SIZE
&&
5035 i
< parent
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
5036 if (parent
->stack
[i
].spilled_ptr
.live
& REG_LIVE_READ
)
5038 if (state
->stack
[i
].spilled_ptr
.live
& REG_LIVE_READ
)
5039 mark_reg_read(env
, &state
->stack
[i
].spilled_ptr
,
5040 &parent
->stack
[i
].spilled_ptr
);
5046 static int is_state_visited(struct bpf_verifier_env
*env
, int insn_idx
)
5048 struct bpf_verifier_state_list
*new_sl
;
5049 struct bpf_verifier_state_list
*sl
;
5050 struct bpf_verifier_state
*cur
= env
->cur_state
, *new;
5051 int i
, j
, err
, states_cnt
= 0;
5053 sl
= env
->explored_states
[insn_idx
];
5055 /* this 'insn_idx' instruction wasn't marked, so we will not
5056 * be doing state search here
5060 while (sl
!= STATE_LIST_MARK
) {
5061 if (states_equal(env
, &sl
->state
, cur
)) {
5062 /* reached equivalent register/stack state,
5064 * Registers read by the continuation are read by us.
5065 * If we have any write marks in env->cur_state, they
5066 * will prevent corresponding reads in the continuation
5067 * from reaching our parent (an explored_state). Our
5068 * own state will get the read marks recorded, but
5069 * they'll be immediately forgotten as we're pruning
5070 * this state and will pop a new one.
5072 err
= propagate_liveness(env
, &sl
->state
, cur
);
5081 if (!env
->allow_ptr_leaks
&& states_cnt
> BPF_COMPLEXITY_LIMIT_STATES
)
5084 /* there were no equivalent states, remember current one.
5085 * technically the current state is not proven to be safe yet,
5086 * but it will either reach outer most bpf_exit (which means it's safe)
5087 * or it will be rejected. Since there are no loops, we won't be
5088 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
5089 * again on the way to bpf_exit
5091 new_sl
= kzalloc(sizeof(struct bpf_verifier_state_list
), GFP_KERNEL
);
5095 /* add new state to the head of linked list */
5096 new = &new_sl
->state
;
5097 err
= copy_verifier_state(new, cur
);
5099 free_verifier_state(new, false);
5103 new_sl
->next
= env
->explored_states
[insn_idx
];
5104 env
->explored_states
[insn_idx
] = new_sl
;
5105 /* connect new state to parentage chain */
5106 for (i
= 0; i
< BPF_REG_FP
; i
++)
5107 cur_regs(env
)[i
].parent
= &new->frame
[new->curframe
]->regs
[i
];
5108 /* clear write marks in current state: the writes we did are not writes
5109 * our child did, so they don't screen off its reads from us.
5110 * (There are no read marks in current state, because reads always mark
5111 * their parent and current state never has children yet. Only
5112 * explored_states can get read marks.)
5114 for (i
= 0; i
< BPF_REG_FP
; i
++)
5115 cur
->frame
[cur
->curframe
]->regs
[i
].live
= REG_LIVE_NONE
;
5117 /* all stack frames are accessible from callee, clear them all */
5118 for (j
= 0; j
<= cur
->curframe
; j
++) {
5119 struct bpf_func_state
*frame
= cur
->frame
[j
];
5120 struct bpf_func_state
*newframe
= new->frame
[j
];
5122 for (i
= 0; i
< frame
->allocated_stack
/ BPF_REG_SIZE
; i
++) {
5123 frame
->stack
[i
].spilled_ptr
.live
= REG_LIVE_NONE
;
5124 frame
->stack
[i
].spilled_ptr
.parent
=
5125 &newframe
->stack
[i
].spilled_ptr
;
5131 /* Return true if it's OK to have the same insn return a different type. */
5132 static bool reg_type_mismatch_ok(enum bpf_reg_type type
)
5137 case PTR_TO_SOCKET_OR_NULL
:
5144 /* If an instruction was previously used with particular pointer types, then we
5145 * need to be careful to avoid cases such as the below, where it may be ok
5146 * for one branch accessing the pointer, but not ok for the other branch:
5151 * R1 = some_other_valid_ptr;
5154 * R2 = *(u32 *)(R1 + 0);
5156 static bool reg_type_mismatch(enum bpf_reg_type src
, enum bpf_reg_type prev
)
5158 return src
!= prev
&& (!reg_type_mismatch_ok(src
) ||
5159 !reg_type_mismatch_ok(prev
));
5162 static int do_check(struct bpf_verifier_env
*env
)
5164 struct bpf_verifier_state
*state
;
5165 struct bpf_insn
*insns
= env
->prog
->insnsi
;
5166 struct bpf_reg_state
*regs
;
5167 int insn_cnt
= env
->prog
->len
, i
;
5168 int insn_idx
, prev_insn_idx
= 0;
5169 int insn_processed
= 0;
5170 bool do_print_state
= false;
5172 state
= kzalloc(sizeof(struct bpf_verifier_state
), GFP_KERNEL
);
5175 state
->curframe
= 0;
5176 state
->frame
[0] = kzalloc(sizeof(struct bpf_func_state
), GFP_KERNEL
);
5177 if (!state
->frame
[0]) {
5181 env
->cur_state
= state
;
5182 init_func_state(env
, state
->frame
[0],
5183 BPF_MAIN_FUNC
/* callsite */,
5185 0 /* subprogno, zero == main subprog */);
5188 struct bpf_insn
*insn
;
5192 if (insn_idx
>= insn_cnt
) {
5193 verbose(env
, "invalid insn idx %d insn_cnt %d\n",
5194 insn_idx
, insn_cnt
);
5198 insn
= &insns
[insn_idx
];
5199 class = BPF_CLASS(insn
->code
);
5201 if (++insn_processed
> BPF_COMPLEXITY_LIMIT_INSNS
) {
5203 "BPF program is too large. Processed %d insn\n",
5208 err
= is_state_visited(env
, insn_idx
);
5212 /* found equivalent state, can prune the search */
5213 if (env
->log
.level
) {
5215 verbose(env
, "\nfrom %d to %d: safe\n",
5216 prev_insn_idx
, insn_idx
);
5218 verbose(env
, "%d: safe\n", insn_idx
);
5220 goto process_bpf_exit
;
5223 if (signal_pending(current
))
5229 if (env
->log
.level
> 1 || (env
->log
.level
&& do_print_state
)) {
5230 if (env
->log
.level
> 1)
5231 verbose(env
, "%d:", insn_idx
);
5233 verbose(env
, "\nfrom %d to %d:",
5234 prev_insn_idx
, insn_idx
);
5235 print_verifier_state(env
, state
->frame
[state
->curframe
]);
5236 do_print_state
= false;
5239 if (env
->log
.level
) {
5240 const struct bpf_insn_cbs cbs
= {
5241 .cb_print
= verbose
,
5242 .private_data
= env
,
5245 verbose(env
, "%d: ", insn_idx
);
5246 print_bpf_insn(&cbs
, insn
, env
->allow_ptr_leaks
);
5249 if (bpf_prog_is_dev_bound(env
->prog
->aux
)) {
5250 err
= bpf_prog_offload_verify_insn(env
, insn_idx
,
5256 regs
= cur_regs(env
);
5257 env
->insn_aux_data
[insn_idx
].seen
= true;
5259 if (class == BPF_ALU
|| class == BPF_ALU64
) {
5260 err
= check_alu_op(env
, insn
);
5264 } else if (class == BPF_LDX
) {
5265 enum bpf_reg_type
*prev_src_type
, src_reg_type
;
5267 /* check for reserved fields is already done */
5269 /* check src operand */
5270 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
5274 err
= check_reg_arg(env
, insn
->dst_reg
, DST_OP_NO_MARK
);
5278 src_reg_type
= regs
[insn
->src_reg
].type
;
5280 /* check that memory (src_reg + off) is readable,
5281 * the state of dst_reg will be updated by this func
5283 err
= check_mem_access(env
, insn_idx
, insn
->src_reg
, insn
->off
,
5284 BPF_SIZE(insn
->code
), BPF_READ
,
5285 insn
->dst_reg
, false);
5289 prev_src_type
= &env
->insn_aux_data
[insn_idx
].ptr_type
;
5291 if (*prev_src_type
== NOT_INIT
) {
5293 * dst_reg = *(u32 *)(src_reg + off)
5294 * save type to validate intersecting paths
5296 *prev_src_type
= src_reg_type
;
5298 } else if (reg_type_mismatch(src_reg_type
, *prev_src_type
)) {
5299 /* ABuser program is trying to use the same insn
5300 * dst_reg = *(u32*) (src_reg + off)
5301 * with different pointer types:
5302 * src_reg == ctx in one branch and
5303 * src_reg == stack|map in some other branch.
5306 verbose(env
, "same insn cannot be used with different pointers\n");
5310 } else if (class == BPF_STX
) {
5311 enum bpf_reg_type
*prev_dst_type
, dst_reg_type
;
5313 if (BPF_MODE(insn
->code
) == BPF_XADD
) {
5314 err
= check_xadd(env
, insn_idx
, insn
);
5321 /* check src1 operand */
5322 err
= check_reg_arg(env
, insn
->src_reg
, SRC_OP
);
5325 /* check src2 operand */
5326 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
5330 dst_reg_type
= regs
[insn
->dst_reg
].type
;
5332 /* check that memory (dst_reg + off) is writeable */
5333 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
5334 BPF_SIZE(insn
->code
), BPF_WRITE
,
5335 insn
->src_reg
, false);
5339 prev_dst_type
= &env
->insn_aux_data
[insn_idx
].ptr_type
;
5341 if (*prev_dst_type
== NOT_INIT
) {
5342 *prev_dst_type
= dst_reg_type
;
5343 } else if (reg_type_mismatch(dst_reg_type
, *prev_dst_type
)) {
5344 verbose(env
, "same insn cannot be used with different pointers\n");
5348 } else if (class == BPF_ST
) {
5349 if (BPF_MODE(insn
->code
) != BPF_MEM
||
5350 insn
->src_reg
!= BPF_REG_0
) {
5351 verbose(env
, "BPF_ST uses reserved fields\n");
5354 /* check src operand */
5355 err
= check_reg_arg(env
, insn
->dst_reg
, SRC_OP
);
5359 if (is_ctx_reg(env
, insn
->dst_reg
)) {
5360 verbose(env
, "BPF_ST stores into R%d %s is not allowed\n",
5362 reg_type_str
[reg_state(env
, insn
->dst_reg
)->type
]);
5366 /* check that memory (dst_reg + off) is writeable */
5367 err
= check_mem_access(env
, insn_idx
, insn
->dst_reg
, insn
->off
,
5368 BPF_SIZE(insn
->code
), BPF_WRITE
,
5373 } else if (class == BPF_JMP
) {
5374 u8 opcode
= BPF_OP(insn
->code
);
5376 if (opcode
== BPF_CALL
) {
5377 if (BPF_SRC(insn
->code
) != BPF_K
||
5379 (insn
->src_reg
!= BPF_REG_0
&&
5380 insn
->src_reg
!= BPF_PSEUDO_CALL
) ||
5381 insn
->dst_reg
!= BPF_REG_0
) {
5382 verbose(env
, "BPF_CALL uses reserved fields\n");
5386 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
5387 err
= check_func_call(env
, insn
, &insn_idx
);
5389 err
= check_helper_call(env
, insn
->imm
, insn_idx
);
5393 } else if (opcode
== BPF_JA
) {
5394 if (BPF_SRC(insn
->code
) != BPF_K
||
5396 insn
->src_reg
!= BPF_REG_0
||
5397 insn
->dst_reg
!= BPF_REG_0
) {
5398 verbose(env
, "BPF_JA uses reserved fields\n");
5402 insn_idx
+= insn
->off
+ 1;
5405 } else if (opcode
== BPF_EXIT
) {
5406 if (BPF_SRC(insn
->code
) != BPF_K
||
5408 insn
->src_reg
!= BPF_REG_0
||
5409 insn
->dst_reg
!= BPF_REG_0
) {
5410 verbose(env
, "BPF_EXIT uses reserved fields\n");
5414 if (state
->curframe
) {
5415 /* exit from nested function */
5416 prev_insn_idx
= insn_idx
;
5417 err
= prepare_func_exit(env
, &insn_idx
);
5420 do_print_state
= true;
5424 err
= check_reference_leak(env
);
5428 /* eBPF calling convetion is such that R0 is used
5429 * to return the value from eBPF program.
5430 * Make sure that it's readable at this time
5431 * of bpf_exit, which means that program wrote
5432 * something into it earlier
5434 err
= check_reg_arg(env
, BPF_REG_0
, SRC_OP
);
5438 if (is_pointer_value(env
, BPF_REG_0
)) {
5439 verbose(env
, "R0 leaks addr as return value\n");
5443 err
= check_return_code(env
);
5447 err
= pop_stack(env
, &prev_insn_idx
, &insn_idx
);
5453 do_print_state
= true;
5457 err
= check_cond_jmp_op(env
, insn
, &insn_idx
);
5461 } else if (class == BPF_LD
) {
5462 u8 mode
= BPF_MODE(insn
->code
);
5464 if (mode
== BPF_ABS
|| mode
== BPF_IND
) {
5465 err
= check_ld_abs(env
, insn
);
5469 } else if (mode
== BPF_IMM
) {
5470 err
= check_ld_imm(env
, insn
);
5475 env
->insn_aux_data
[insn_idx
].seen
= true;
5477 verbose(env
, "invalid BPF_LD mode\n");
5481 verbose(env
, "unknown insn class %d\n", class);
5488 verbose(env
, "processed %d insns (limit %d), stack depth ",
5489 insn_processed
, BPF_COMPLEXITY_LIMIT_INSNS
);
5490 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
5491 u32 depth
= env
->subprog_info
[i
].stack_depth
;
5493 verbose(env
, "%d", depth
);
5494 if (i
+ 1 < env
->subprog_cnt
)
5498 env
->prog
->aux
->stack_depth
= env
->subprog_info
[0].stack_depth
;
5502 static int check_map_prealloc(struct bpf_map
*map
)
5504 return (map
->map_type
!= BPF_MAP_TYPE_HASH
&&
5505 map
->map_type
!= BPF_MAP_TYPE_PERCPU_HASH
&&
5506 map
->map_type
!= BPF_MAP_TYPE_HASH_OF_MAPS
) ||
5507 !(map
->map_flags
& BPF_F_NO_PREALLOC
);
5510 static int check_map_prog_compatibility(struct bpf_verifier_env
*env
,
5511 struct bpf_map
*map
,
5512 struct bpf_prog
*prog
)
5515 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
5516 * preallocated hash maps, since doing memory allocation
5517 * in overflow_handler can crash depending on where nmi got
5520 if (prog
->type
== BPF_PROG_TYPE_PERF_EVENT
) {
5521 if (!check_map_prealloc(map
)) {
5522 verbose(env
, "perf_event programs can only use preallocated hash map\n");
5525 if (map
->inner_map_meta
&&
5526 !check_map_prealloc(map
->inner_map_meta
)) {
5527 verbose(env
, "perf_event programs can only use preallocated inner hash map\n");
5532 if ((bpf_prog_is_dev_bound(prog
->aux
) || bpf_map_is_dev_bound(map
)) &&
5533 !bpf_offload_prog_map_match(prog
, map
)) {
5534 verbose(env
, "offload device mismatch between prog and map\n");
5541 static bool bpf_map_is_cgroup_storage(struct bpf_map
*map
)
5543 return (map
->map_type
== BPF_MAP_TYPE_CGROUP_STORAGE
||
5544 map
->map_type
== BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE
);
5547 /* look for pseudo eBPF instructions that access map FDs and
5548 * replace them with actual map pointers
5550 static int replace_map_fd_with_map_ptr(struct bpf_verifier_env
*env
)
5552 struct bpf_insn
*insn
= env
->prog
->insnsi
;
5553 int insn_cnt
= env
->prog
->len
;
5556 err
= bpf_prog_calc_tag(env
->prog
);
5560 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
5561 if (BPF_CLASS(insn
->code
) == BPF_LDX
&&
5562 (BPF_MODE(insn
->code
) != BPF_MEM
|| insn
->imm
!= 0)) {
5563 verbose(env
, "BPF_LDX uses reserved fields\n");
5567 if (BPF_CLASS(insn
->code
) == BPF_STX
&&
5568 ((BPF_MODE(insn
->code
) != BPF_MEM
&&
5569 BPF_MODE(insn
->code
) != BPF_XADD
) || insn
->imm
!= 0)) {
5570 verbose(env
, "BPF_STX uses reserved fields\n");
5574 if (insn
[0].code
== (BPF_LD
| BPF_IMM
| BPF_DW
)) {
5575 struct bpf_map
*map
;
5578 if (i
== insn_cnt
- 1 || insn
[1].code
!= 0 ||
5579 insn
[1].dst_reg
!= 0 || insn
[1].src_reg
!= 0 ||
5581 verbose(env
, "invalid bpf_ld_imm64 insn\n");
5585 if (insn
->src_reg
== 0)
5586 /* valid generic load 64-bit imm */
5589 if (insn
->src_reg
!= BPF_PSEUDO_MAP_FD
) {
5591 "unrecognized bpf_ld_imm64 insn\n");
5595 f
= fdget(insn
->imm
);
5596 map
= __bpf_map_get(f
);
5598 verbose(env
, "fd %d is not pointing to valid bpf_map\n",
5600 return PTR_ERR(map
);
5603 err
= check_map_prog_compatibility(env
, map
, env
->prog
);
5609 /* store map pointer inside BPF_LD_IMM64 instruction */
5610 insn
[0].imm
= (u32
) (unsigned long) map
;
5611 insn
[1].imm
= ((u64
) (unsigned long) map
) >> 32;
5613 /* check whether we recorded this map already */
5614 for (j
= 0; j
< env
->used_map_cnt
; j
++)
5615 if (env
->used_maps
[j
] == map
) {
5620 if (env
->used_map_cnt
>= MAX_USED_MAPS
) {
5625 /* hold the map. If the program is rejected by verifier,
5626 * the map will be released by release_maps() or it
5627 * will be used by the valid program until it's unloaded
5628 * and all maps are released in free_used_maps()
5630 map
= bpf_map_inc(map
, false);
5633 return PTR_ERR(map
);
5635 env
->used_maps
[env
->used_map_cnt
++] = map
;
5637 if (bpf_map_is_cgroup_storage(map
) &&
5638 bpf_cgroup_storage_assign(env
->prog
, map
)) {
5639 verbose(env
, "only one cgroup storage of each type is allowed\n");
5651 /* Basic sanity check before we invest more work here. */
5652 if (!bpf_opcode_in_insntable(insn
->code
)) {
5653 verbose(env
, "unknown opcode %02x\n", insn
->code
);
5658 /* now all pseudo BPF_LD_IMM64 instructions load valid
5659 * 'struct bpf_map *' into a register instead of user map_fd.
5660 * These pointers will be used later by verifier to validate map access.
5665 /* drop refcnt of maps used by the rejected program */
5666 static void release_maps(struct bpf_verifier_env
*env
)
5668 enum bpf_cgroup_storage_type stype
;
5671 for_each_cgroup_storage_type(stype
) {
5672 if (!env
->prog
->aux
->cgroup_storage
[stype
])
5674 bpf_cgroup_storage_release(env
->prog
,
5675 env
->prog
->aux
->cgroup_storage
[stype
]);
5678 for (i
= 0; i
< env
->used_map_cnt
; i
++)
5679 bpf_map_put(env
->used_maps
[i
]);
5682 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
5683 static void convert_pseudo_ld_imm64(struct bpf_verifier_env
*env
)
5685 struct bpf_insn
*insn
= env
->prog
->insnsi
;
5686 int insn_cnt
= env
->prog
->len
;
5689 for (i
= 0; i
< insn_cnt
; i
++, insn
++)
5690 if (insn
->code
== (BPF_LD
| BPF_IMM
| BPF_DW
))
5694 /* single env->prog->insni[off] instruction was replaced with the range
5695 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
5696 * [0, off) and [off, end) to new locations, so the patched range stays zero
5698 static int adjust_insn_aux_data(struct bpf_verifier_env
*env
, u32 prog_len
,
5701 struct bpf_insn_aux_data
*new_data
, *old_data
= env
->insn_aux_data
;
5706 new_data
= vzalloc(array_size(prog_len
,
5707 sizeof(struct bpf_insn_aux_data
)));
5710 memcpy(new_data
, old_data
, sizeof(struct bpf_insn_aux_data
) * off
);
5711 memcpy(new_data
+ off
+ cnt
- 1, old_data
+ off
,
5712 sizeof(struct bpf_insn_aux_data
) * (prog_len
- off
- cnt
+ 1));
5713 for (i
= off
; i
< off
+ cnt
- 1; i
++)
5714 new_data
[i
].seen
= true;
5715 env
->insn_aux_data
= new_data
;
5720 static void adjust_subprog_starts(struct bpf_verifier_env
*env
, u32 off
, u32 len
)
5726 /* NOTE: fake 'exit' subprog should be updated as well. */
5727 for (i
= 0; i
<= env
->subprog_cnt
; i
++) {
5728 if (env
->subprog_info
[i
].start
<= off
)
5730 env
->subprog_info
[i
].start
+= len
- 1;
5734 static struct bpf_prog
*bpf_patch_insn_data(struct bpf_verifier_env
*env
, u32 off
,
5735 const struct bpf_insn
*patch
, u32 len
)
5737 struct bpf_prog
*new_prog
;
5739 new_prog
= bpf_patch_insn_single(env
->prog
, off
, patch
, len
);
5742 if (adjust_insn_aux_data(env
, new_prog
->len
, off
, len
))
5744 adjust_subprog_starts(env
, off
, len
);
5748 /* The verifier does more data flow analysis than llvm and will not
5749 * explore branches that are dead at run time. Malicious programs can
5750 * have dead code too. Therefore replace all dead at-run-time code
5753 * Just nops are not optimal, e.g. if they would sit at the end of the
5754 * program and through another bug we would manage to jump there, then
5755 * we'd execute beyond program memory otherwise. Returning exception
5756 * code also wouldn't work since we can have subprogs where the dead
5757 * code could be located.
5759 static void sanitize_dead_code(struct bpf_verifier_env
*env
)
5761 struct bpf_insn_aux_data
*aux_data
= env
->insn_aux_data
;
5762 struct bpf_insn trap
= BPF_JMP_IMM(BPF_JA
, 0, 0, -1);
5763 struct bpf_insn
*insn
= env
->prog
->insnsi
;
5764 const int insn_cnt
= env
->prog
->len
;
5767 for (i
= 0; i
< insn_cnt
; i
++) {
5768 if (aux_data
[i
].seen
)
5770 memcpy(insn
+ i
, &trap
, sizeof(trap
));
5774 /* convert load instructions that access fields of a context type into a
5775 * sequence of instructions that access fields of the underlying structure:
5776 * struct __sk_buff -> struct sk_buff
5777 * struct bpf_sock_ops -> struct sock
5779 static int convert_ctx_accesses(struct bpf_verifier_env
*env
)
5781 const struct bpf_verifier_ops
*ops
= env
->ops
;
5782 int i
, cnt
, size
, ctx_field_size
, delta
= 0;
5783 const int insn_cnt
= env
->prog
->len
;
5784 struct bpf_insn insn_buf
[16], *insn
;
5785 struct bpf_prog
*new_prog
;
5786 enum bpf_access_type type
;
5787 bool is_narrower_load
;
5790 if (ops
->gen_prologue
|| env
->seen_direct_write
) {
5791 if (!ops
->gen_prologue
) {
5792 verbose(env
, "bpf verifier is misconfigured\n");
5795 cnt
= ops
->gen_prologue(insn_buf
, env
->seen_direct_write
,
5797 if (cnt
>= ARRAY_SIZE(insn_buf
)) {
5798 verbose(env
, "bpf verifier is misconfigured\n");
5801 new_prog
= bpf_patch_insn_data(env
, 0, insn_buf
, cnt
);
5805 env
->prog
= new_prog
;
5810 if (bpf_prog_is_dev_bound(env
->prog
->aux
))
5813 insn
= env
->prog
->insnsi
+ delta
;
5815 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
5816 bpf_convert_ctx_access_t convert_ctx_access
;
5818 if (insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_B
) ||
5819 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_H
) ||
5820 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_W
) ||
5821 insn
->code
== (BPF_LDX
| BPF_MEM
| BPF_DW
))
5823 else if (insn
->code
== (BPF_STX
| BPF_MEM
| BPF_B
) ||
5824 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_H
) ||
5825 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_W
) ||
5826 insn
->code
== (BPF_STX
| BPF_MEM
| BPF_DW
))
5831 if (type
== BPF_WRITE
&&
5832 env
->insn_aux_data
[i
+ delta
].sanitize_stack_off
) {
5833 struct bpf_insn patch
[] = {
5834 /* Sanitize suspicious stack slot with zero.
5835 * There are no memory dependencies for this store,
5836 * since it's only using frame pointer and immediate
5839 BPF_ST_MEM(BPF_DW
, BPF_REG_FP
,
5840 env
->insn_aux_data
[i
+ delta
].sanitize_stack_off
,
5842 /* the original STX instruction will immediately
5843 * overwrite the same stack slot with appropriate value
5848 cnt
= ARRAY_SIZE(patch
);
5849 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patch
, cnt
);
5854 env
->prog
= new_prog
;
5855 insn
= new_prog
->insnsi
+ i
+ delta
;
5859 switch (env
->insn_aux_data
[i
+ delta
].ptr_type
) {
5861 if (!ops
->convert_ctx_access
)
5863 convert_ctx_access
= ops
->convert_ctx_access
;
5866 convert_ctx_access
= bpf_sock_convert_ctx_access
;
5872 ctx_field_size
= env
->insn_aux_data
[i
+ delta
].ctx_field_size
;
5873 size
= BPF_LDST_BYTES(insn
);
5875 /* If the read access is a narrower load of the field,
5876 * convert to a 4/8-byte load, to minimum program type specific
5877 * convert_ctx_access changes. If conversion is successful,
5878 * we will apply proper mask to the result.
5880 is_narrower_load
= size
< ctx_field_size
;
5881 if (is_narrower_load
) {
5882 u32 size_default
= bpf_ctx_off_adjust_machine(ctx_field_size
);
5883 u32 off
= insn
->off
;
5886 if (type
== BPF_WRITE
) {
5887 verbose(env
, "bpf verifier narrow ctx access misconfigured\n");
5892 if (ctx_field_size
== 4)
5894 else if (ctx_field_size
== 8)
5897 insn
->off
= off
& ~(size_default
- 1);
5898 insn
->code
= BPF_LDX
| BPF_MEM
| size_code
;
5902 cnt
= convert_ctx_access(type
, insn
, insn_buf
, env
->prog
,
5904 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
) ||
5905 (ctx_field_size
&& !target_size
)) {
5906 verbose(env
, "bpf verifier is misconfigured\n");
5910 if (is_narrower_load
&& size
< target_size
) {
5911 if (ctx_field_size
<= 4)
5912 insn_buf
[cnt
++] = BPF_ALU32_IMM(BPF_AND
, insn
->dst_reg
,
5913 (1 << size
* 8) - 1);
5915 insn_buf
[cnt
++] = BPF_ALU64_IMM(BPF_AND
, insn
->dst_reg
,
5916 (1 << size
* 8) - 1);
5919 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
5925 /* keep walking new program and skip insns we just inserted */
5926 env
->prog
= new_prog
;
5927 insn
= new_prog
->insnsi
+ i
+ delta
;
5933 static int jit_subprogs(struct bpf_verifier_env
*env
)
5935 struct bpf_prog
*prog
= env
->prog
, **func
, *tmp
;
5936 int i
, j
, subprog_start
, subprog_end
= 0, len
, subprog
;
5937 struct bpf_insn
*insn
;
5941 if (env
->subprog_cnt
<= 1)
5944 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
5945 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
5946 insn
->src_reg
!= BPF_PSEUDO_CALL
)
5948 /* Upon error here we cannot fall back to interpreter but
5949 * need a hard reject of the program. Thus -EFAULT is
5950 * propagated in any case.
5952 subprog
= find_subprog(env
, i
+ insn
->imm
+ 1);
5954 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5958 /* temporarily remember subprog id inside insn instead of
5959 * aux_data, since next loop will split up all insns into funcs
5961 insn
->off
= subprog
;
5962 /* remember original imm in case JIT fails and fallback
5963 * to interpreter will be needed
5965 env
->insn_aux_data
[i
].call_imm
= insn
->imm
;
5966 /* point imm to __bpf_call_base+1 from JITs point of view */
5970 func
= kcalloc(env
->subprog_cnt
, sizeof(prog
), GFP_KERNEL
);
5974 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
5975 subprog_start
= subprog_end
;
5976 subprog_end
= env
->subprog_info
[i
+ 1].start
;
5978 len
= subprog_end
- subprog_start
;
5979 func
[i
] = bpf_prog_alloc(bpf_prog_size(len
), GFP_USER
);
5982 memcpy(func
[i
]->insnsi
, &prog
->insnsi
[subprog_start
],
5983 len
* sizeof(struct bpf_insn
));
5984 func
[i
]->type
= prog
->type
;
5986 if (bpf_prog_calc_tag(func
[i
]))
5988 func
[i
]->is_func
= 1;
5989 /* Use bpf_prog_F_tag to indicate functions in stack traces.
5990 * Long term would need debug info to populate names
5992 func
[i
]->aux
->name
[0] = 'F';
5993 func
[i
]->aux
->stack_depth
= env
->subprog_info
[i
].stack_depth
;
5994 func
[i
]->jit_requested
= 1;
5995 func
[i
] = bpf_int_jit_compile(func
[i
]);
5996 if (!func
[i
]->jited
) {
6002 /* at this point all bpf functions were successfully JITed
6003 * now populate all bpf_calls with correct addresses and
6004 * run last pass of JIT
6006 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
6007 insn
= func
[i
]->insnsi
;
6008 for (j
= 0; j
< func
[i
]->len
; j
++, insn
++) {
6009 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
6010 insn
->src_reg
!= BPF_PSEUDO_CALL
)
6012 subprog
= insn
->off
;
6013 insn
->imm
= (u64 (*)(u64
, u64
, u64
, u64
, u64
))
6014 func
[subprog
]->bpf_func
-
6018 /* we use the aux data to keep a list of the start addresses
6019 * of the JITed images for each function in the program
6021 * for some architectures, such as powerpc64, the imm field
6022 * might not be large enough to hold the offset of the start
6023 * address of the callee's JITed image from __bpf_call_base
6025 * in such cases, we can lookup the start address of a callee
6026 * by using its subprog id, available from the off field of
6027 * the call instruction, as an index for this list
6029 func
[i
]->aux
->func
= func
;
6030 func
[i
]->aux
->func_cnt
= env
->subprog_cnt
;
6032 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
6033 old_bpf_func
= func
[i
]->bpf_func
;
6034 tmp
= bpf_int_jit_compile(func
[i
]);
6035 if (tmp
!= func
[i
] || func
[i
]->bpf_func
!= old_bpf_func
) {
6036 verbose(env
, "JIT doesn't support bpf-to-bpf calls\n");
6043 /* finally lock prog and jit images for all functions and
6046 for (i
= 0; i
< env
->subprog_cnt
; i
++) {
6047 bpf_prog_lock_ro(func
[i
]);
6048 bpf_prog_kallsyms_add(func
[i
]);
6051 /* Last step: make now unused interpreter insns from main
6052 * prog consistent for later dump requests, so they can
6053 * later look the same as if they were interpreted only.
6055 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
6056 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
6057 insn
->src_reg
!= BPF_PSEUDO_CALL
)
6059 insn
->off
= env
->insn_aux_data
[i
].call_imm
;
6060 subprog
= find_subprog(env
, i
+ insn
->off
+ 1);
6061 insn
->imm
= subprog
;
6065 prog
->bpf_func
= func
[0]->bpf_func
;
6066 prog
->aux
->func
= func
;
6067 prog
->aux
->func_cnt
= env
->subprog_cnt
;
6070 for (i
= 0; i
< env
->subprog_cnt
; i
++)
6072 bpf_jit_free(func
[i
]);
6075 /* cleanup main prog to be interpreted */
6076 prog
->jit_requested
= 0;
6077 for (i
= 0, insn
= prog
->insnsi
; i
< prog
->len
; i
++, insn
++) {
6078 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
6079 insn
->src_reg
!= BPF_PSEUDO_CALL
)
6082 insn
->imm
= env
->insn_aux_data
[i
].call_imm
;
6087 static int fixup_call_args(struct bpf_verifier_env
*env
)
6089 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6090 struct bpf_prog
*prog
= env
->prog
;
6091 struct bpf_insn
*insn
= prog
->insnsi
;
6096 if (env
->prog
->jit_requested
&&
6097 !bpf_prog_is_dev_bound(env
->prog
->aux
)) {
6098 err
= jit_subprogs(env
);
6104 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
6105 for (i
= 0; i
< prog
->len
; i
++, insn
++) {
6106 if (insn
->code
!= (BPF_JMP
| BPF_CALL
) ||
6107 insn
->src_reg
!= BPF_PSEUDO_CALL
)
6109 depth
= get_callee_stack_depth(env
, insn
, i
);
6112 bpf_patch_call_args(insn
, depth
);
6119 /* fixup insn->imm field of bpf_call instructions
6120 * and inline eligible helpers as explicit sequence of BPF instructions
6122 * this function is called after eBPF program passed verification
6124 static int fixup_bpf_calls(struct bpf_verifier_env
*env
)
6126 struct bpf_prog
*prog
= env
->prog
;
6127 struct bpf_insn
*insn
= prog
->insnsi
;
6128 const struct bpf_func_proto
*fn
;
6129 const int insn_cnt
= prog
->len
;
6130 const struct bpf_map_ops
*ops
;
6131 struct bpf_insn_aux_data
*aux
;
6132 struct bpf_insn insn_buf
[16];
6133 struct bpf_prog
*new_prog
;
6134 struct bpf_map
*map_ptr
;
6135 int i
, cnt
, delta
= 0;
6137 for (i
= 0; i
< insn_cnt
; i
++, insn
++) {
6138 if (insn
->code
== (BPF_ALU64
| BPF_MOD
| BPF_X
) ||
6139 insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_X
) ||
6140 insn
->code
== (BPF_ALU
| BPF_MOD
| BPF_X
) ||
6141 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
)) {
6142 bool is64
= BPF_CLASS(insn
->code
) == BPF_ALU64
;
6143 struct bpf_insn mask_and_div
[] = {
6144 BPF_MOV32_REG(insn
->src_reg
, insn
->src_reg
),
6146 BPF_JMP_IMM(BPF_JNE
, insn
->src_reg
, 0, 2),
6147 BPF_ALU32_REG(BPF_XOR
, insn
->dst_reg
, insn
->dst_reg
),
6148 BPF_JMP_IMM(BPF_JA
, 0, 0, 1),
6151 struct bpf_insn mask_and_mod
[] = {
6152 BPF_MOV32_REG(insn
->src_reg
, insn
->src_reg
),
6153 /* Rx mod 0 -> Rx */
6154 BPF_JMP_IMM(BPF_JEQ
, insn
->src_reg
, 0, 1),
6157 struct bpf_insn
*patchlet
;
6159 if (insn
->code
== (BPF_ALU64
| BPF_DIV
| BPF_X
) ||
6160 insn
->code
== (BPF_ALU
| BPF_DIV
| BPF_X
)) {
6161 patchlet
= mask_and_div
+ (is64
? 1 : 0);
6162 cnt
= ARRAY_SIZE(mask_and_div
) - (is64
? 1 : 0);
6164 patchlet
= mask_and_mod
+ (is64
? 1 : 0);
6165 cnt
= ARRAY_SIZE(mask_and_mod
) - (is64
? 1 : 0);
6168 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, patchlet
, cnt
);
6173 env
->prog
= prog
= new_prog
;
6174 insn
= new_prog
->insnsi
+ i
+ delta
;
6178 if (BPF_CLASS(insn
->code
) == BPF_LD
&&
6179 (BPF_MODE(insn
->code
) == BPF_ABS
||
6180 BPF_MODE(insn
->code
) == BPF_IND
)) {
6181 cnt
= env
->ops
->gen_ld_abs(insn
, insn_buf
);
6182 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
6183 verbose(env
, "bpf verifier is misconfigured\n");
6187 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
6192 env
->prog
= prog
= new_prog
;
6193 insn
= new_prog
->insnsi
+ i
+ delta
;
6197 if (insn
->code
!= (BPF_JMP
| BPF_CALL
))
6199 if (insn
->src_reg
== BPF_PSEUDO_CALL
)
6202 if (insn
->imm
== BPF_FUNC_get_route_realm
)
6203 prog
->dst_needed
= 1;
6204 if (insn
->imm
== BPF_FUNC_get_prandom_u32
)
6205 bpf_user_rnd_init_once();
6206 if (insn
->imm
== BPF_FUNC_override_return
)
6207 prog
->kprobe_override
= 1;
6208 if (insn
->imm
== BPF_FUNC_tail_call
) {
6209 /* If we tail call into other programs, we
6210 * cannot make any assumptions since they can
6211 * be replaced dynamically during runtime in
6212 * the program array.
6214 prog
->cb_access
= 1;
6215 env
->prog
->aux
->stack_depth
= MAX_BPF_STACK
;
6217 /* mark bpf_tail_call as different opcode to avoid
6218 * conditional branch in the interpeter for every normal
6219 * call and to prevent accidental JITing by JIT compiler
6220 * that doesn't support bpf_tail_call yet
6223 insn
->code
= BPF_JMP
| BPF_TAIL_CALL
;
6225 aux
= &env
->insn_aux_data
[i
+ delta
];
6226 if (!bpf_map_ptr_unpriv(aux
))
6229 /* instead of changing every JIT dealing with tail_call
6230 * emit two extra insns:
6231 * if (index >= max_entries) goto out;
6232 * index &= array->index_mask;
6233 * to avoid out-of-bounds cpu speculation
6235 if (bpf_map_ptr_poisoned(aux
)) {
6236 verbose(env
, "tail_call abusing map_ptr\n");
6240 map_ptr
= BPF_MAP_PTR(aux
->map_state
);
6241 insn_buf
[0] = BPF_JMP_IMM(BPF_JGE
, BPF_REG_3
,
6242 map_ptr
->max_entries
, 2);
6243 insn_buf
[1] = BPF_ALU32_IMM(BPF_AND
, BPF_REG_3
,
6244 container_of(map_ptr
,
6247 insn_buf
[2] = *insn
;
6249 new_prog
= bpf_patch_insn_data(env
, i
+ delta
, insn_buf
, cnt
);
6254 env
->prog
= prog
= new_prog
;
6255 insn
= new_prog
->insnsi
+ i
+ delta
;
6259 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
6260 * and other inlining handlers are currently limited to 64 bit
6263 if (prog
->jit_requested
&& BITS_PER_LONG
== 64 &&
6264 (insn
->imm
== BPF_FUNC_map_lookup_elem
||
6265 insn
->imm
== BPF_FUNC_map_update_elem
||
6266 insn
->imm
== BPF_FUNC_map_delete_elem
||
6267 insn
->imm
== BPF_FUNC_map_push_elem
||
6268 insn
->imm
== BPF_FUNC_map_pop_elem
||
6269 insn
->imm
== BPF_FUNC_map_peek_elem
)) {
6270 aux
= &env
->insn_aux_data
[i
+ delta
];
6271 if (bpf_map_ptr_poisoned(aux
))
6272 goto patch_call_imm
;
6274 map_ptr
= BPF_MAP_PTR(aux
->map_state
);
6276 if (insn
->imm
== BPF_FUNC_map_lookup_elem
&&
6277 ops
->map_gen_lookup
) {
6278 cnt
= ops
->map_gen_lookup(map_ptr
, insn_buf
);
6279 if (cnt
== 0 || cnt
>= ARRAY_SIZE(insn_buf
)) {
6280 verbose(env
, "bpf verifier is misconfigured\n");
6284 new_prog
= bpf_patch_insn_data(env
, i
+ delta
,
6290 env
->prog
= prog
= new_prog
;
6291 insn
= new_prog
->insnsi
+ i
+ delta
;
6295 BUILD_BUG_ON(!__same_type(ops
->map_lookup_elem
,
6296 (void *(*)(struct bpf_map
*map
, void *key
))NULL
));
6297 BUILD_BUG_ON(!__same_type(ops
->map_delete_elem
,
6298 (int (*)(struct bpf_map
*map
, void *key
))NULL
));
6299 BUILD_BUG_ON(!__same_type(ops
->map_update_elem
,
6300 (int (*)(struct bpf_map
*map
, void *key
, void *value
,
6302 BUILD_BUG_ON(!__same_type(ops
->map_push_elem
,
6303 (int (*)(struct bpf_map
*map
, void *value
,
6305 BUILD_BUG_ON(!__same_type(ops
->map_pop_elem
,
6306 (int (*)(struct bpf_map
*map
, void *value
))NULL
));
6307 BUILD_BUG_ON(!__same_type(ops
->map_peek_elem
,
6308 (int (*)(struct bpf_map
*map
, void *value
))NULL
));
6310 switch (insn
->imm
) {
6311 case BPF_FUNC_map_lookup_elem
:
6312 insn
->imm
= BPF_CAST_CALL(ops
->map_lookup_elem
) -
6315 case BPF_FUNC_map_update_elem
:
6316 insn
->imm
= BPF_CAST_CALL(ops
->map_update_elem
) -
6319 case BPF_FUNC_map_delete_elem
:
6320 insn
->imm
= BPF_CAST_CALL(ops
->map_delete_elem
) -
6323 case BPF_FUNC_map_push_elem
:
6324 insn
->imm
= BPF_CAST_CALL(ops
->map_push_elem
) -
6327 case BPF_FUNC_map_pop_elem
:
6328 insn
->imm
= BPF_CAST_CALL(ops
->map_pop_elem
) -
6331 case BPF_FUNC_map_peek_elem
:
6332 insn
->imm
= BPF_CAST_CALL(ops
->map_peek_elem
) -
6337 goto patch_call_imm
;
6341 fn
= env
->ops
->get_func_proto(insn
->imm
, env
->prog
);
6342 /* all functions that have prototype and verifier allowed
6343 * programs to call them, must be real in-kernel functions
6347 "kernel subsystem misconfigured func %s#%d\n",
6348 func_id_name(insn
->imm
), insn
->imm
);
6351 insn
->imm
= fn
->func
- __bpf_call_base
;
6357 static void free_states(struct bpf_verifier_env
*env
)
6359 struct bpf_verifier_state_list
*sl
, *sln
;
6362 if (!env
->explored_states
)
6365 for (i
= 0; i
< env
->prog
->len
; i
++) {
6366 sl
= env
->explored_states
[i
];
6369 while (sl
!= STATE_LIST_MARK
) {
6371 free_verifier_state(&sl
->state
, false);
6377 kfree(env
->explored_states
);
6380 int bpf_check(struct bpf_prog
**prog
, union bpf_attr
*attr
)
6382 struct bpf_verifier_env
*env
;
6383 struct bpf_verifier_log
*log
;
6386 /* no program is valid */
6387 if (ARRAY_SIZE(bpf_verifier_ops
) == 0)
6390 /* 'struct bpf_verifier_env' can be global, but since it's not small,
6391 * allocate/free it every time bpf_check() is called
6393 env
= kzalloc(sizeof(struct bpf_verifier_env
), GFP_KERNEL
);
6398 env
->insn_aux_data
=
6399 vzalloc(array_size(sizeof(struct bpf_insn_aux_data
),
6402 if (!env
->insn_aux_data
)
6405 env
->ops
= bpf_verifier_ops
[env
->prog
->type
];
6407 /* grab the mutex to protect few globals used by verifier */
6408 mutex_lock(&bpf_verifier_lock
);
6410 if (attr
->log_level
|| attr
->log_buf
|| attr
->log_size
) {
6411 /* user requested verbose verifier output
6412 * and supplied buffer to store the verification trace
6414 log
->level
= attr
->log_level
;
6415 log
->ubuf
= (char __user
*) (unsigned long) attr
->log_buf
;
6416 log
->len_total
= attr
->log_size
;
6419 /* log attributes have to be sane */
6420 if (log
->len_total
< 128 || log
->len_total
> UINT_MAX
>> 8 ||
6421 !log
->level
|| !log
->ubuf
)
6425 env
->strict_alignment
= !!(attr
->prog_flags
& BPF_F_STRICT_ALIGNMENT
);
6426 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
))
6427 env
->strict_alignment
= true;
6429 ret
= replace_map_fd_with_map_ptr(env
);
6431 goto skip_full_check
;
6433 if (bpf_prog_is_dev_bound(env
->prog
->aux
)) {
6434 ret
= bpf_prog_offload_verifier_prep(env
);
6436 goto skip_full_check
;
6439 env
->explored_states
= kcalloc(env
->prog
->len
,
6440 sizeof(struct bpf_verifier_state_list
*),
6443 if (!env
->explored_states
)
6444 goto skip_full_check
;
6446 env
->allow_ptr_leaks
= capable(CAP_SYS_ADMIN
);
6448 ret
= check_cfg(env
);
6450 goto skip_full_check
;
6452 ret
= do_check(env
);
6453 if (env
->cur_state
) {
6454 free_verifier_state(env
->cur_state
, true);
6455 env
->cur_state
= NULL
;
6458 if (ret
== 0 && bpf_prog_is_dev_bound(env
->prog
->aux
))
6459 ret
= bpf_prog_offload_finalize(env
);
6462 while (!pop_stack(env
, NULL
, NULL
));
6466 sanitize_dead_code(env
);
6469 ret
= check_max_stack_depth(env
);
6472 /* program is valid, convert *(u32*)(ctx + off) accesses */
6473 ret
= convert_ctx_accesses(env
);
6476 ret
= fixup_bpf_calls(env
);
6479 ret
= fixup_call_args(env
);
6481 if (log
->level
&& bpf_verifier_log_full(log
))
6483 if (log
->level
&& !log
->ubuf
) {
6485 goto err_release_maps
;
6488 if (ret
== 0 && env
->used_map_cnt
) {
6489 /* if program passed verifier, update used_maps in bpf_prog_info */
6490 env
->prog
->aux
->used_maps
= kmalloc_array(env
->used_map_cnt
,
6491 sizeof(env
->used_maps
[0]),
6494 if (!env
->prog
->aux
->used_maps
) {
6496 goto err_release_maps
;
6499 memcpy(env
->prog
->aux
->used_maps
, env
->used_maps
,
6500 sizeof(env
->used_maps
[0]) * env
->used_map_cnt
);
6501 env
->prog
->aux
->used_map_cnt
= env
->used_map_cnt
;
6503 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
6504 * bpf_ld_imm64 instructions
6506 convert_pseudo_ld_imm64(env
);
6510 if (!env
->prog
->aux
->used_maps
)
6511 /* if we didn't copy map pointers into bpf_prog_info, release
6512 * them now. Otherwise free_used_maps() will release them.
6517 mutex_unlock(&bpf_verifier_lock
);
6518 vfree(env
->insn_aux_data
);