1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/btf.h> /* for struct btf and btf_id() */
9 #include <linux/filter.h> /* for MAX_BPF_STACK */
10 #include <linux/tnum.h>
12 /* Maximum variable offset umax_value permitted when resolving memory accesses.
13 * In practice this is far bigger than any realistic pointer offset; this limit
14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
16 #define BPF_MAX_VAR_OFF (1 << 29)
17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18 * that converting umax_value to int cannot overflow.
20 #define BPF_MAX_VAR_SIZ (1 << 29)
21 /* size of tmp_str_buf in bpf_verifier.
22 * we need at least 306 bytes to fit full stack mask representation
23 * (in the "-8,-16,...,-512" form)
25 #define TMP_STR_BUF_LEN 320
27 /* Liveness marks, used for registers and spilled-regs (in stack slots).
28 * Read marks propagate upwards until they find a write mark; they record that
29 * "one of this state's descendants read this reg" (and therefore the reg is
30 * relevant for states_equal() checks).
31 * Write marks collect downwards and do not propagate; they record that "the
32 * straight-line code that reached this state (from its parent) wrote this reg"
33 * (and therefore that reads propagated from this state or its descendants
34 * should not propagate to its parent).
35 * A state with a write mark can receive read marks; it just won't propagate
36 * them to its parent, since the write mark is a property, not of the state,
37 * but of the link between it and its parent. See mark_reg_read() and
38 * mark_stack_slot_read() in kernel/bpf/verifier.c.
40 enum bpf_reg_liveness
{
41 REG_LIVE_NONE
= 0, /* reg hasn't been read or written this branch */
42 REG_LIVE_READ32
= 0x1, /* reg was read, so we're sensitive to initial value */
43 REG_LIVE_READ64
= 0x2, /* likewise, but full 64-bit content matters */
44 REG_LIVE_READ
= REG_LIVE_READ32
| REG_LIVE_READ64
,
45 REG_LIVE_WRITTEN
= 0x4, /* reg was written first, screening off later reads */
46 REG_LIVE_DONE
= 0x8, /* liveness won't be updating this register anymore */
49 /* For every reg representing a map value or allocated object pointer,
50 * we consider the tuple of (ptr, id) for them to be unique in verifier
51 * context and conside them to not alias each other for the purposes of
52 * tracking lock state.
54 struct bpf_active_lock
{
55 /* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
56 * there's no active lock held, and other fields have no
57 * meaning. If non-NULL, it indicates that a lock is held and
58 * id member has the reg->id of the register which can be >= 0.
61 /* This will be reg->id */
65 #define ITER_PREFIX "bpf_iter_"
68 BPF_ITER_STATE_INVALID
, /* for non-first slot */
69 BPF_ITER_STATE_ACTIVE
,
70 BPF_ITER_STATE_DRAINED
,
73 struct bpf_reg_state
{
74 /* Ordering of fields matters. See states_equal() */
75 enum bpf_reg_type type
;
76 /* Fixed part of pointer offset, pointer types only */
79 /* valid when type == PTR_TO_PACKET */
82 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
83 * PTR_TO_MAP_VALUE_OR_NULL
86 struct bpf_map
*map_ptr
;
87 /* To distinguish map lookups from outer map
88 * the map_uid is non-zero for registers
89 * pointing to inner maps.
94 /* for PTR_TO_BTF_ID */
100 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
102 u32 dynptr_id
; /* for dynptr slices */
105 /* For dynptr stack slots */
107 enum bpf_dynptr_type type
;
108 /* A dynptr is 16 bytes so it takes up 2 stack slots.
109 * We need to track which slot is the first slot
110 * to protect against cases where the user may try to
111 * pass in an address starting at the second slot of the
117 /* For bpf_iter stack slots */
119 /* BTF container and BTF type ID describing
120 * struct bpf_iter_<type> of an iterator state
124 /* packing following two fields to fit iter state into 16 bytes */
125 enum bpf_iter_state state
:2;
129 /* Max size from any of the above. */
135 u32 subprogno
; /* for PTR_TO_FUNC */
137 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
139 * For pointer types, this represents the variable part of the offset
140 * from the pointed-to object, and is shared with all bpf_reg_states
141 * with the same id as us.
144 /* Used to determine if any memory access using this register will
145 * result in a bad access.
146 * These refer to the same value as var_off, not necessarily the actual
147 * contents of the register.
149 s64 smin_value
; /* minimum possible (s64)value */
150 s64 smax_value
; /* maximum possible (s64)value */
151 u64 umin_value
; /* minimum possible (u64)value */
152 u64 umax_value
; /* maximum possible (u64)value */
153 s32 s32_min_value
; /* minimum possible (s32)value */
154 s32 s32_max_value
; /* maximum possible (s32)value */
155 u32 u32_min_value
; /* minimum possible (u32)value */
156 u32 u32_max_value
; /* maximum possible (u32)value */
157 /* For PTR_TO_PACKET, used to find other pointers with the same variable
158 * offset, so they can share range knowledge.
159 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
160 * came from, when one is tested for != NULL.
161 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
162 * for the purpose of tracking that it's freed.
163 * For PTR_TO_SOCKET this is used to share which pointers retain the
164 * same reference to the socket, to determine proper reference freeing.
165 * For stack slots that are dynptrs, this is used to track references to
166 * the dynptr to determine proper reference freeing.
167 * Similarly to dynptrs, we use ID to track "belonging" of a reference
168 * to a specific instance of bpf_iter.
171 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
172 * from a pointer-cast helper, bpf_sk_fullsock() and
175 * Consider the following where "sk" is a reference counted
176 * pointer returned from "sk = bpf_sk_lookup_tcp();":
178 * 1: sk = bpf_sk_lookup_tcp();
179 * 2: if (!sk) { return 0; }
180 * 3: fullsock = bpf_sk_fullsock(sk);
181 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
182 * 5: tp = bpf_tcp_sock(fullsock);
183 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
184 * 7: bpf_sk_release(sk);
185 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
187 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
188 * "tp" ptr should be invalidated also. In order to do that,
189 * the reg holding "fullsock" and "sk" need to remember
190 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
191 * such that the verifier can reset all regs which have
192 * ref_obj_id matching the sk_reg->id.
194 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
195 * sk_reg->id will stay as NULL-marking purpose only.
196 * After NULL-marking is done, sk_reg->id can be reset to 0.
198 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
199 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
201 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
202 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
203 * which is the same as sk_reg->ref_obj_id.
205 * From the verifier perspective, if sk, fullsock and tp
206 * are not NULL, they are the same ptr with different
207 * reg->type. In particular, bpf_sk_release(tp) is also
208 * allowed and has the same effect as bpf_sk_release(sk).
211 /* parentage chain for liveness checking */
212 struct bpf_reg_state
*parent
;
213 /* Inside the callee two registers can be both PTR_TO_STACK like
214 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
215 * while another to the caller's stack. To differentiate them 'frameno'
216 * is used which is an index in bpf_verifier_state->frame[] array
217 * pointing to bpf_func_state.
220 /* Tracks subreg definition. The stored value is the insn_idx of the
221 * writing insn. This is safe because subreg_def is used before any insn
222 * patching which only happens after main verification finished.
225 enum bpf_reg_liveness live
;
226 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
230 enum bpf_stack_slot_type
{
231 STACK_INVALID
, /* nothing was stored in this stack slot */
232 STACK_SPILL
, /* register spilled into stack */
233 STACK_MISC
, /* BPF program wrote some data into this slot */
234 STACK_ZERO
, /* BPF program wrote constant zero */
235 /* A dynptr is stored in this stack slot. The type of dynptr
236 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
242 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
244 #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
245 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
248 #define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
249 #define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
251 struct bpf_stack_state
{
252 struct bpf_reg_state spilled_ptr
;
253 u8 slot_type
[BPF_REG_SIZE
];
256 struct bpf_reference_state
{
257 /* Track each reference created with a unique id, even if the same
258 * instruction creates the reference multiple times (eg, via CALL).
261 /* Instruction where the allocation of this reference occurred. This
262 * is used purely to inform the user of a reference leak.
265 /* There can be a case like:
270 * Hence for frame 4, if callback_ref just stored boolean, it would be
271 * impossible to distinguish nested callback refs. Hence store the
272 * frameno and compare that to callback_ref in check_reference_leak when
273 * exiting a callback function.
278 /* state of the program:
279 * type of all registers and stack info
281 struct bpf_func_state
{
282 struct bpf_reg_state regs
[MAX_BPF_REG
];
283 /* index of call instruction that called into this func */
285 /* stack frame number of this function state from pov of
286 * enclosing bpf_verifier_state.
287 * 0 = main function, 1 = first callee.
290 /* subprog number == index within subprog_info
291 * zero == main subprog
294 /* Every bpf_timer_start will increment async_entry_cnt.
295 * It's used to distinguish:
296 * void foo(void) { for(;;); }
297 * void foo(void) { bpf_timer_set_callback(,foo); }
301 struct tnum callback_ret_range
;
302 bool in_async_callback_fn
;
303 bool in_exception_callback_fn
;
305 /* The following fields should be last. See copy_func_state() */
307 struct bpf_reference_state
*refs
;
309 struct bpf_stack_state
*stack
;
312 struct bpf_idx_pair
{
317 #define MAX_CALL_FRAMES 8
318 /* Maximum number of register states that can exist at once */
319 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
320 struct bpf_verifier_state
{
321 /* call stack tracking */
322 struct bpf_func_state
*frame
[MAX_CALL_FRAMES
];
323 struct bpf_verifier_state
*parent
;
325 * 'branches' field is the number of branches left to explore:
326 * 0 - all possible paths from this state reached bpf_exit or
328 * 1 - at least one path is being explored.
329 * This state hasn't reached bpf_exit
330 * 2 - at least two paths are being explored.
331 * This state is an immediate parent of two children.
332 * One is fallthrough branch with branches==1 and another
333 * state is pushed into stack (to be explored later) also with
334 * branches==1. The parent of this state has branches==1.
335 * The verifier state tree connected via 'parent' pointer looks like:
338 * 2 -> 1 (first 'if' pushed into stack)
340 * 2 -> 1 (second 'if' pushed into stack)
345 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
346 * and the verifier state tree will look:
349 * 2 -> 1 (first 'if' pushed into stack)
351 * 1 -> 1 (second 'if' pushed into stack)
355 * After pop_stack() the do_check() will resume at second 'if'.
357 * If is_state_visited() sees a state with branches > 0 it means
358 * there is a loop. If such state is exactly equal to the current state
359 * it's an infinite loop. Note states_equal() checks for states
360 * equivalency, so two states being 'states_equal' does not mean
361 * infinite loop. The exact comparison is provided by
362 * states_maybe_looping() function. It's a stronger pre-check and
363 * much faster than states_equal().
365 * This algorithm may not find all possible infinite loops or
366 * loop iteration count may be too high.
367 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
373 struct bpf_active_lock active_lock
;
375 bool active_rcu_lock
;
376 /* If this state was ever pointed-to by other state's loop_entry field
377 * this flag would be set to true. Used to avoid freeing such states
378 * while they are still in use.
380 bool used_as_loop_entry
;
382 /* first and last insn idx of this verifier state */
385 /* If this state is a part of states loop this field points to some
386 * parent of this state such that:
387 * - it is also a member of the same states loop;
388 * - DFS states traversal starting from initial state visits loop_entry
389 * state before this state.
390 * Used to compute topmost loop entry for state loops.
391 * State loops might appear because of open coded iterators logic.
392 * See get_loop_entry() for more information.
394 struct bpf_verifier_state
*loop_entry
;
395 /* jmp history recorded from first to last.
396 * backtracking is using it to go from last to first.
397 * For most states jmp_history_cnt is [0-3].
398 * For loops can go up to ~40.
400 struct bpf_idx_pair
*jmp_history
;
405 #define bpf_get_spilled_reg(slot, frame, mask) \
406 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
407 ((1 << frame->stack[slot].slot_type[0]) & (mask))) \
408 ? &frame->stack[slot].spilled_ptr : NULL)
410 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
411 #define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
412 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
413 iter < frame->allocated_stack / BPF_REG_SIZE; \
414 iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
416 #define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
418 struct bpf_verifier_state *___vstate = __vst; \
420 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
421 struct bpf_reg_state *___regs; \
422 __state = ___vstate->frame[___i]; \
423 ___regs = __state->regs; \
424 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
425 __reg = &___regs[___j]; \
428 bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
436 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
437 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
438 bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
440 /* linked list of verifier states used to prune search */
441 struct bpf_verifier_state_list
{
442 struct bpf_verifier_state state
;
443 struct bpf_verifier_state_list
*next
;
444 int miss_cnt
, hit_cnt
;
447 struct bpf_loop_inline_state
{
448 unsigned int initialized
:1; /* set to true upon first entry */
449 unsigned int fit_for_inline
:1; /* true if callback function is the same
450 * at each call and flags are always zero
452 u32 callback_subprogno
; /* valid when fit_for_inline is true */
455 /* Possible states for alu_state member. */
456 #define BPF_ALU_SANITIZE_SRC (1U << 0)
457 #define BPF_ALU_SANITIZE_DST (1U << 1)
458 #define BPF_ALU_NEG_VALUE (1U << 2)
459 #define BPF_ALU_NON_POINTER (1U << 3)
460 #define BPF_ALU_IMMEDIATE (1U << 4)
461 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
462 BPF_ALU_SANITIZE_DST)
464 struct bpf_insn_aux_data
{
466 enum bpf_reg_type ptr_type
; /* pointer type for load/store insns */
467 unsigned long map_ptr_state
; /* pointer/poison value for maps */
468 s32 call_imm
; /* saved imm field of call insn */
469 u32 alu_limit
; /* limit for add/sub register with pointer */
471 u32 map_index
; /* index into used_maps[] */
472 u32 map_off
; /* offset from value base address */
475 enum bpf_reg_type reg_type
; /* type of pseudo_btf_id */
479 u32 btf_id
; /* btf_id for struct typed var */
481 u32 mem_size
; /* mem_size for non-struct typed var */
484 /* if instruction is a call to bpf_loop this field tracks
485 * the state of the relevant registers to make decision about inlining
487 struct bpf_loop_inline_state loop_inline_state
;
490 /* remember the size of type passed to bpf_obj_new to rewrite R1 */
492 /* remember the offset of node field within type to rewrite */
495 struct btf_struct_meta
*kptr_struct_meta
;
496 u64 map_key_state
; /* constant (32 bit) key tracking for maps */
497 int ctx_field_size
; /* the ctx field size for load insn, maybe 0 */
498 u32 seen
; /* this insn was processed by the verifier at env->pass_cnt */
499 bool sanitize_stack_spill
; /* subject to Spectre v4 sanitation */
500 bool zext_dst
; /* this insn zero extends dst reg */
501 bool storage_get_func_atomic
; /* bpf_*_storage_get() with atomic memory alloc */
502 bool is_iter_next
; /* bpf_iter_<type>_next() kfunc call */
503 bool call_with_percpu_alloc_ptr
; /* {this,per}_cpu_ptr() with prog percpu alloc */
504 u8 alu_state
; /* used in combination with alu_limit */
506 /* below fields are initialized once */
507 unsigned int orig_idx
; /* original instruction index */
510 /* ensure we check state equivalence and save state checkpoint and
511 * this instruction, regardless of any heuristics
513 bool force_checkpoint
;
516 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
517 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
519 #define BPF_VERIFIER_TMP_LOG_SIZE 1024
521 struct bpf_verifier_log
{
522 /* Logical start and end positions of a "log window" of the verifier log.
523 * start_pos == 0 means we haven't truncated anything.
524 * Once truncation starts to happen, start_pos + len_total == end_pos,
525 * except during log reset situations, in which (end_pos - start_pos)
526 * might get smaller than len_total (see bpf_vlog_reset()).
527 * Generally, (end_pos - start_pos) gives number of useful data in
536 char kbuf
[BPF_VERIFIER_TMP_LOG_SIZE
];
539 #define BPF_LOG_LEVEL1 1
540 #define BPF_LOG_LEVEL2 2
541 #define BPF_LOG_STATS 4
542 #define BPF_LOG_FIXED 8
543 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
544 #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
545 #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
546 #define BPF_LOG_MIN_ALIGNMENT 8U
547 #define BPF_LOG_ALIGNMENT 40U
549 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log
*log
)
551 return log
&& log
->level
;
554 #define BPF_MAX_SUBPROGS 256
556 struct bpf_subprog_info
{
557 /* 'start' has to be the first field otherwise find_subprog() won't work */
558 u32 start
; /* insn idx of function entry point */
559 u32 linfo_idx
; /* The idx to the main_prog->aux->linfo */
560 u16 stack_depth
; /* max. stack depth used by this function */
562 bool tail_call_reachable
;
566 bool is_exception_cb
;
569 struct bpf_verifier_env
;
571 struct backtrack_state
{
572 struct bpf_verifier_env
*env
;
574 u32 reg_masks
[MAX_CALL_FRAMES
];
575 u64 stack_masks
[MAX_CALL_FRAMES
];
585 struct bpf_id_pair map
[BPF_ID_MAP_SIZE
];
590 u32 ids
[BPF_ID_MAP_SIZE
];
593 /* single container for all structs
594 * one verifier_env per bpf_check() call
596 struct bpf_verifier_env
{
599 struct bpf_prog
*prog
; /* eBPF program being verified */
600 const struct bpf_verifier_ops
*ops
;
601 struct bpf_verifier_stack_elem
*head
; /* stack of verifier states to be processed */
602 int stack_size
; /* number of states to be processed */
603 bool strict_alignment
; /* perform strict pointer alignment checks */
604 bool test_state_freq
; /* test verifier with different pruning frequency */
605 struct bpf_verifier_state
*cur_state
; /* current verifier state */
606 struct bpf_verifier_state_list
**explored_states
; /* search pruning optimization */
607 struct bpf_verifier_state_list
*free_list
;
608 struct bpf_map
*used_maps
[MAX_USED_MAPS
]; /* array of map's used by eBPF program */
609 struct btf_mod_pair used_btfs
[MAX_USED_BTFS
]; /* array of BTF's used by BPF program */
610 u32 used_map_cnt
; /* number of used maps */
611 u32 used_btf_cnt
; /* number of used BTF objects */
612 u32 id_gen
; /* used to generate unique reg IDs */
613 u32 hidden_subprog_cnt
; /* number of hidden subprogs */
614 int exception_callback_subprog
;
615 bool explore_alu_limits
;
616 bool allow_ptr_leaks
;
617 bool allow_uninit_stack
;
621 bool seen_direct_write
;
623 struct bpf_insn_aux_data
*insn_aux_data
; /* array of per-insn state */
624 const struct bpf_line_info
*prev_linfo
;
625 struct bpf_verifier_log log
;
626 struct bpf_subprog_info subprog_info
[BPF_MAX_SUBPROGS
+ 2]; /* max + 2 for the fake and exception subprogs */
628 struct bpf_idmap idmap_scratch
;
629 struct bpf_idset idset_scratch
;
636 struct backtrack_state bt
;
637 u32 pass_cnt
; /* number of times do_check() was called */
639 /* number of instructions analyzed by the verifier */
640 u32 prev_insn_processed
, insn_processed
;
641 /* number of jmps, calls, exits analyzed so far */
642 u32 prev_jmps_processed
, jmps_processed
;
643 /* total verification time */
644 u64 verification_time
;
645 /* maximum number of verifier states kept in 'branching' instructions */
646 u32 max_states_per_insn
;
647 /* total number of allocated verifier states */
649 /* some states are freed during program analysis.
650 * this is peak number of states. this number dominates kernel
651 * memory consumption during verification
654 /* longest register parentage chain walked for liveness marking */
655 u32 longest_mark_read_walk
;
658 /* bit mask to keep track of whether a register has been accessed
659 * since the last time the function state was printed
662 /* Same as scratched_regs but for stack slots */
663 u64 scratched_stack_slots
;
664 u64 prev_log_pos
, prev_insn_print_pos
;
665 /* buffer used to generate temporary string representations,
666 * e.g., in reg_type_str() to generate reg_type string
668 char tmp_str_buf
[TMP_STR_BUF_LEN
];
671 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log
*log
,
672 const char *fmt
, va_list args
);
673 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env
*env
,
674 const char *fmt
, ...);
675 __printf(2, 3) void bpf_log(struct bpf_verifier_log
*log
,
676 const char *fmt
, ...);
677 int bpf_vlog_init(struct bpf_verifier_log
*log
, u32 log_level
,
678 char __user
*log_buf
, u32 log_size
);
679 void bpf_vlog_reset(struct bpf_verifier_log
*log
, u64 new_pos
);
680 int bpf_vlog_finalize(struct bpf_verifier_log
*log
, u32
*log_size_actual
);
682 static inline struct bpf_func_state
*cur_func(struct bpf_verifier_env
*env
)
684 struct bpf_verifier_state
*cur
= env
->cur_state
;
686 return cur
->frame
[cur
->curframe
];
689 static inline struct bpf_reg_state
*cur_regs(struct bpf_verifier_env
*env
)
691 return cur_func(env
)->regs
;
694 int bpf_prog_offload_verifier_prep(struct bpf_prog
*prog
);
695 int bpf_prog_offload_verify_insn(struct bpf_verifier_env
*env
,
696 int insn_idx
, int prev_insn_idx
);
697 int bpf_prog_offload_finalize(struct bpf_verifier_env
*env
);
699 bpf_prog_offload_replace_insn(struct bpf_verifier_env
*env
, u32 off
,
700 struct bpf_insn
*insn
);
702 bpf_prog_offload_remove_insns(struct bpf_verifier_env
*env
, u32 off
, u32 cnt
);
704 int check_ptr_off_reg(struct bpf_verifier_env
*env
,
705 const struct bpf_reg_state
*reg
, int regno
);
706 int check_func_arg_reg_off(struct bpf_verifier_env
*env
,
707 const struct bpf_reg_state
*reg
, int regno
,
708 enum bpf_arg_type arg_type
);
709 int check_mem_reg(struct bpf_verifier_env
*env
, struct bpf_reg_state
*reg
,
710 u32 regno
, u32 mem_size
);
712 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
713 static inline u64
bpf_trampoline_compute_key(const struct bpf_prog
*tgt_prog
,
714 struct btf
*btf
, u32 btf_id
)
717 return ((u64
)tgt_prog
->aux
->id
<< 32) | btf_id
;
719 return ((u64
)btf_obj_id(btf
) << 32) | 0x80000000 | btf_id
;
722 /* unpack the IDs from the key as constructed above */
723 static inline void bpf_trampoline_unpack_key(u64 key
, u32
*obj_id
, u32
*btf_id
)
728 *btf_id
= key
& 0x7FFFFFFF;
731 int bpf_check_attach_target(struct bpf_verifier_log
*log
,
732 const struct bpf_prog
*prog
,
733 const struct bpf_prog
*tgt_prog
,
735 struct bpf_attach_target_info
*tgt_info
);
736 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab
*tab
);
738 int mark_chain_precision(struct bpf_verifier_env
*env
, int regno
);
740 #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
742 /* extract base type from bpf_{arg, return, reg}_type. */
743 static inline u32
base_type(u32 type
)
745 return type
& BPF_BASE_TYPE_MASK
;
748 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
749 static inline u32
type_flag(u32 type
)
751 return type
& ~BPF_BASE_TYPE_MASK
;
754 /* only use after check_attach_btf_id() */
755 static inline enum bpf_prog_type
resolve_prog_type(const struct bpf_prog
*prog
)
757 return prog
->type
== BPF_PROG_TYPE_EXT
?
758 prog
->aux
->dst_prog
->type
: prog
->type
;
761 static inline bool bpf_prog_check_recur(const struct bpf_prog
*prog
)
763 switch (resolve_prog_type(prog
)) {
764 case BPF_PROG_TYPE_TRACING
:
765 return prog
->expected_attach_type
!= BPF_TRACE_ITER
;
766 case BPF_PROG_TYPE_STRUCT_OPS
:
767 case BPF_PROG_TYPE_LSM
:
774 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
776 static inline bool bpf_type_has_unsafe_modifiers(u32 type
)
778 return type_flag(type
) & ~BPF_REG_TRUSTED_MODIFIERS
;
781 #endif /* _LINUX_BPF_VERIFIER_H */