1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Facebook */
4 #include <uapi/linux/btf.h>
5 #include <uapi/linux/bpf.h>
6 #include <uapi/linux/bpf_perf_event.h>
7 #include <uapi/linux/types.h>
8 #include <linux/seq_file.h>
9 #include <linux/compiler.h>
10 #include <linux/ctype.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/uaccess.h>
16 #include <linux/kernel.h>
17 #include <linux/idr.h>
18 #include <linux/sort.h>
19 #include <linux/bpf_verifier.h>
20 #include <linux/btf.h>
21 #include <linux/btf_ids.h>
22 #include <linux/bpf_lsm.h>
23 #include <linux/skmsg.h>
24 #include <linux/perf_event.h>
25 #include <linux/bsearch.h>
26 #include <linux/kobject.h>
27 #include <linux/sysfs.h>
29 #include <net/netfilter/nf_bpf_link.h>
33 #include "../tools/lib/bpf/relo_core.h"
35 /* BTF (BPF Type Format) is the meta data format which describes
36 * the data types of BPF program/map. Hence, it basically focus
37 * on the C programming language which the modern BPF is primary
42 * The BTF data is stored under the ".BTF" ELF section
46 * Each 'struct btf_type' object describes a C data type.
47 * Depending on the type it is describing, a 'struct btf_type'
48 * object may be followed by more data. F.e.
49 * To describe an array, 'struct btf_type' is followed by
52 * 'struct btf_type' and any extra data following it are
57 * The BTF type section contains a list of 'struct btf_type' objects.
58 * Each one describes a C type. Recall from the above section
59 * that a 'struct btf_type' object could be immediately followed by extra
60 * data in order to describe some particular C types.
64 * Each btf_type object is identified by a type_id. The type_id
65 * is implicitly implied by the location of the btf_type object in
66 * the BTF type section. The first one has type_id 1. The second
67 * one has type_id 2...etc. Hence, an earlier btf_type has
70 * A btf_type object may refer to another btf_type object by using
71 * type_id (i.e. the "type" in the "struct btf_type").
73 * NOTE that we cannot assume any reference-order.
74 * A btf_type object can refer to an earlier btf_type object
75 * but it can also refer to a later btf_type object.
77 * For example, to describe "const void *". A btf_type
78 * object describing "const" may refer to another btf_type
79 * object describing "void *". This type-reference is done
80 * by specifying type_id:
82 * [1] CONST (anon) type_id=2
83 * [2] PTR (anon) type_id=0
85 * The above is the btf_verifier debug log:
86 * - Each line started with "[?]" is a btf_type object
87 * - [?] is the type_id of the btf_type object.
88 * - CONST/PTR is the BTF_KIND_XXX
89 * - "(anon)" is the name of the type. It just
90 * happens that CONST and PTR has no name.
91 * - type_id=XXX is the 'u32 type' in btf_type
93 * NOTE: "void" has type_id 0
97 * The BTF string section contains the names used by the type section.
98 * Each string is referred by an "offset" from the beginning of the
101 * Each string is '\0' terminated.
103 * The first character in the string section must be '\0'
104 * which is used to mean 'anonymous'. Some btf_type may not
110 * To verify BTF data, two passes are needed.
114 * The first pass is to collect all btf_type objects to
115 * an array: "btf->types".
117 * Depending on the C type that a btf_type is describing,
118 * a btf_type may be followed by extra data. We don't know
119 * how many btf_type is there, and more importantly we don't
120 * know where each btf_type is located in the type section.
122 * Without knowing the location of each type_id, most verifications
123 * cannot be done. e.g. an earlier btf_type may refer to a later
124 * btf_type (recall the "const void *" above), so we cannot
125 * check this type-reference in the first pass.
127 * In the first pass, it still does some verifications (e.g.
128 * checking the name is a valid offset to the string section).
132 * The main focus is to resolve a btf_type that is referring
135 * We have to ensure the referring type:
136 * 1) does exist in the BTF (i.e. in btf->types[])
137 * 2) does not cause a loop:
146 * btf_type_needs_resolve() decides if a btf_type needs
149 * The needs_resolve type implements the "resolve()" ops which
150 * essentially does a DFS and detects backedge.
152 * During resolve (or DFS), different C types have different
153 * "RESOLVED" conditions.
155 * When resolving a BTF_KIND_STRUCT, we need to resolve all its
156 * members because a member is always referring to another
157 * type. A struct's member can be treated as "RESOLVED" if
158 * it is referring to a BTF_KIND_PTR. Otherwise, the
159 * following valid C struct would be rejected:
166 * When resolving a BTF_KIND_PTR, it needs to keep resolving if
167 * it is referring to another BTF_KIND_PTR. Otherwise, we cannot
168 * detect a pointer loop, e.g.:
169 * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR +
171 * +-----------------------------------------+
175 #define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
176 #define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
177 #define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
178 #define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
179 #define BITS_ROUNDUP_BYTES(bits) \
180 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
182 #define BTF_INFO_MASK 0x9f00ffff
183 #define BTF_INT_MASK 0x0fffffff
184 #define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
185 #define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
187 /* 16MB for 64k structs and each has 16 members and
188 * a few MB spaces for the string section.
189 * The hard limit is S32_MAX.
191 #define BTF_MAX_SIZE (16 * 1024 * 1024)
193 #define for_each_member_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_member(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
198 #define for_each_vsi_from(i, from, struct_type, member) \
199 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
200 i < btf_type_vlen(struct_type); \
204 DEFINE_SPINLOCK(btf_idr_lock
);
206 enum btf_kfunc_hook
{
207 BTF_KFUNC_HOOK_COMMON
,
210 BTF_KFUNC_HOOK_STRUCT_OPS
,
211 BTF_KFUNC_HOOK_TRACING
,
212 BTF_KFUNC_HOOK_SYSCALL
,
213 BTF_KFUNC_HOOK_FMODRET
,
214 BTF_KFUNC_HOOK_CGROUP_SKB
,
215 BTF_KFUNC_HOOK_SCHED_ACT
,
216 BTF_KFUNC_HOOK_SK_SKB
,
217 BTF_KFUNC_HOOK_SOCKET_FILTER
,
219 BTF_KFUNC_HOOK_NETFILTER
,
224 BTF_KFUNC_SET_MAX_CNT
= 256,
225 BTF_DTOR_KFUNC_MAX_CNT
= 256,
226 BTF_KFUNC_FILTER_MAX_CNT
= 16,
229 struct btf_kfunc_hook_filter
{
230 btf_kfunc_filter_t filters
[BTF_KFUNC_FILTER_MAX_CNT
];
234 struct btf_kfunc_set_tab
{
235 struct btf_id_set8
*sets
[BTF_KFUNC_HOOK_MAX
];
236 struct btf_kfunc_hook_filter hook_filters
[BTF_KFUNC_HOOK_MAX
];
239 struct btf_id_dtor_kfunc_tab
{
241 struct btf_id_dtor_kfunc dtors
[];
246 struct btf_type
**types
;
251 struct btf_header hdr
;
252 u32 nr_types
; /* includes VOID for base BTF */
258 struct btf_kfunc_set_tab
*kfunc_set_tab
;
259 struct btf_id_dtor_kfunc_tab
*dtor_kfunc_tab
;
260 struct btf_struct_metas
*struct_meta_tab
;
262 /* split BTF support */
263 struct btf
*base_btf
;
264 u32 start_id
; /* first type ID in this BTF (0 for base BTF) */
265 u32 start_str_off
; /* first string offset (0 for base BTF) */
266 char name
[MODULE_NAME_LEN
];
270 enum verifier_phase
{
275 struct resolve_vertex
{
276 const struct btf_type
*t
;
288 RESOLVE_TBD
, /* To Be Determined */
289 RESOLVE_PTR
, /* Resolving for Pointer */
290 RESOLVE_STRUCT_OR_ARRAY
, /* Resolving for struct/union
295 #define MAX_RESOLVE_DEPTH 32
297 struct btf_sec_info
{
302 struct btf_verifier_env
{
305 struct resolve_vertex stack
[MAX_RESOLVE_DEPTH
];
306 struct bpf_verifier_log log
;
309 enum verifier_phase phase
;
310 enum resolve_mode resolve_mode
;
313 static const char * const btf_kind_str
[NR_BTF_KINDS
] = {
314 [BTF_KIND_UNKN
] = "UNKNOWN",
315 [BTF_KIND_INT
] = "INT",
316 [BTF_KIND_PTR
] = "PTR",
317 [BTF_KIND_ARRAY
] = "ARRAY",
318 [BTF_KIND_STRUCT
] = "STRUCT",
319 [BTF_KIND_UNION
] = "UNION",
320 [BTF_KIND_ENUM
] = "ENUM",
321 [BTF_KIND_FWD
] = "FWD",
322 [BTF_KIND_TYPEDEF
] = "TYPEDEF",
323 [BTF_KIND_VOLATILE
] = "VOLATILE",
324 [BTF_KIND_CONST
] = "CONST",
325 [BTF_KIND_RESTRICT
] = "RESTRICT",
326 [BTF_KIND_FUNC
] = "FUNC",
327 [BTF_KIND_FUNC_PROTO
] = "FUNC_PROTO",
328 [BTF_KIND_VAR
] = "VAR",
329 [BTF_KIND_DATASEC
] = "DATASEC",
330 [BTF_KIND_FLOAT
] = "FLOAT",
331 [BTF_KIND_DECL_TAG
] = "DECL_TAG",
332 [BTF_KIND_TYPE_TAG
] = "TYPE_TAG",
333 [BTF_KIND_ENUM64
] = "ENUM64",
336 const char *btf_type_str(const struct btf_type
*t
)
338 return btf_kind_str
[BTF_INFO_KIND(t
->info
)];
341 /* Chunk size we use in safe copy of data to be shown. */
342 #define BTF_SHOW_OBJ_SAFE_SIZE 32
345 * This is the maximum size of a base type value (equivalent to a
346 * 128-bit int); if we are at the end of our safe buffer and have
347 * less than 16 bytes space we can't be assured of being able
348 * to copy the next type safely, so in such cases we will initiate
351 #define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
354 #define BTF_SHOW_NAME_SIZE 80
357 * The suffix of a type that indicates it cannot alias another type when
358 * comparing BTF IDs for kfunc invocations.
360 #define NOCAST_ALIAS_SUFFIX "___init"
363 * Common data to all BTF show operations. Private show functions can add
364 * their own data to a structure containing a struct btf_show and consult it
365 * in the show callback. See btf_type_show() below.
367 * One challenge with showing nested data is we want to skip 0-valued
368 * data, but in order to figure out whether a nested object is all zeros
369 * we need to walk through it. As a result, we need to make two passes
370 * when handling structs, unions and arrays; the first path simply looks
371 * for nonzero data, while the second actually does the display. The first
372 * pass is signalled by show->state.depth_check being set, and if we
373 * encounter a non-zero value we set show->state.depth_to_show to
374 * the depth at which we encountered it. When we have completed the
375 * first pass, we will know if anything needs to be displayed if
376 * depth_to_show > depth. See btf_[struct,array]_show() for the
377 * implementation of this.
379 * Another problem is we want to ensure the data for display is safe to
380 * access. To support this, the anonymous "struct {} obj" tracks the data
381 * object and our safe copy of it. We copy portions of the data needed
382 * to the object "copy" buffer, but because its size is limited to
383 * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
384 * traverse larger objects for display.
386 * The various data type show functions all start with a call to
387 * btf_show_start_type() which returns a pointer to the safe copy
388 * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
389 * raw data itself). btf_show_obj_safe() is responsible for
390 * using copy_from_kernel_nofault() to update the safe data if necessary
391 * as we traverse the object's data. skbuff-like semantics are
394 * - obj.head points to the start of the toplevel object for display
395 * - obj.size is the size of the toplevel object
396 * - obj.data points to the current point in the original data at
397 * which our safe data starts. obj.data will advance as we copy
398 * portions of the data.
400 * In most cases a single copy will suffice, but larger data structures
401 * such as "struct task_struct" will require many copies. The logic in
402 * btf_show_obj_safe() handles the logic that determines if a new
403 * copy_from_kernel_nofault() is needed.
407 void *target
; /* target of show operation (seq file, buffer) */
408 void (*showfn
)(struct btf_show
*show
, const char *fmt
, va_list args
);
409 const struct btf
*btf
;
410 /* below are used during iteration */
419 int status
; /* non-zero for error */
420 const struct btf_type
*type
;
421 const struct btf_member
*member
;
422 char name
[BTF_SHOW_NAME_SIZE
]; /* space for member name/type */
428 u8 safe
[BTF_SHOW_OBJ_SAFE_SIZE
];
432 struct btf_kind_operations
{
433 s32 (*check_meta
)(struct btf_verifier_env
*env
,
434 const struct btf_type
*t
,
436 int (*resolve
)(struct btf_verifier_env
*env
,
437 const struct resolve_vertex
*v
);
438 int (*check_member
)(struct btf_verifier_env
*env
,
439 const struct btf_type
*struct_type
,
440 const struct btf_member
*member
,
441 const struct btf_type
*member_type
);
442 int (*check_kflag_member
)(struct btf_verifier_env
*env
,
443 const struct btf_type
*struct_type
,
444 const struct btf_member
*member
,
445 const struct btf_type
*member_type
);
446 void (*log_details
)(struct btf_verifier_env
*env
,
447 const struct btf_type
*t
);
448 void (*show
)(const struct btf
*btf
, const struct btf_type
*t
,
449 u32 type_id
, void *data
, u8 bits_offsets
,
450 struct btf_show
*show
);
453 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
];
454 static struct btf_type btf_void
;
456 static int btf_resolve(struct btf_verifier_env
*env
,
457 const struct btf_type
*t
, u32 type_id
);
459 static int btf_func_check(struct btf_verifier_env
*env
,
460 const struct btf_type
*t
);
462 static bool btf_type_is_modifier(const struct btf_type
*t
)
464 /* Some of them is not strictly a C modifier
465 * but they are grouped into the same bucket
467 * A type (t) that refers to another
468 * type through t->type AND its size cannot
469 * be determined without following the t->type.
471 * ptr does not fall into this bucket
472 * because its size is always sizeof(void *).
474 switch (BTF_INFO_KIND(t
->info
)) {
475 case BTF_KIND_TYPEDEF
:
476 case BTF_KIND_VOLATILE
:
478 case BTF_KIND_RESTRICT
:
479 case BTF_KIND_TYPE_TAG
:
486 bool btf_type_is_void(const struct btf_type
*t
)
488 return t
== &btf_void
;
491 static bool btf_type_is_fwd(const struct btf_type
*t
)
493 return BTF_INFO_KIND(t
->info
) == BTF_KIND_FWD
;
496 static bool btf_type_is_datasec(const struct btf_type
*t
)
498 return BTF_INFO_KIND(t
->info
) == BTF_KIND_DATASEC
;
501 static bool btf_type_is_decl_tag(const struct btf_type
*t
)
503 return BTF_INFO_KIND(t
->info
) == BTF_KIND_DECL_TAG
;
506 static bool btf_type_nosize(const struct btf_type
*t
)
508 return btf_type_is_void(t
) || btf_type_is_fwd(t
) ||
509 btf_type_is_func(t
) || btf_type_is_func_proto(t
) ||
510 btf_type_is_decl_tag(t
);
513 static bool btf_type_nosize_or_null(const struct btf_type
*t
)
515 return !t
|| btf_type_nosize(t
);
518 static bool btf_type_is_decl_tag_target(const struct btf_type
*t
)
520 return btf_type_is_func(t
) || btf_type_is_struct(t
) ||
521 btf_type_is_var(t
) || btf_type_is_typedef(t
);
524 u32
btf_nr_types(const struct btf
*btf
)
529 total
+= btf
->nr_types
;
536 s32
btf_find_by_name_kind(const struct btf
*btf
, const char *name
, u8 kind
)
538 const struct btf_type
*t
;
542 total
= btf_nr_types(btf
);
543 for (i
= 1; i
< total
; i
++) {
544 t
= btf_type_by_id(btf
, i
);
545 if (BTF_INFO_KIND(t
->info
) != kind
)
548 tname
= btf_name_by_offset(btf
, t
->name_off
);
549 if (!strcmp(tname
, name
))
556 s32
bpf_find_btf_id(const char *name
, u32 kind
, struct btf
**btf_p
)
562 btf
= bpf_get_btf_vmlinux();
568 ret
= btf_find_by_name_kind(btf
, name
, kind
);
569 /* ret is never zero, since btf_find_by_name_kind returns
570 * positive btf_id or negative error.
578 /* If name is not found in vmlinux's BTF then search in module's BTFs */
579 spin_lock_bh(&btf_idr_lock
);
580 idr_for_each_entry(&btf_idr
, btf
, id
) {
581 if (!btf_is_module(btf
))
583 /* linear search could be slow hence unlock/lock
584 * the IDR to avoiding holding it for too long
587 spin_unlock_bh(&btf_idr_lock
);
588 ret
= btf_find_by_name_kind(btf
, name
, kind
);
594 spin_lock_bh(&btf_idr_lock
);
596 spin_unlock_bh(&btf_idr_lock
);
600 const struct btf_type
*btf_type_skip_modifiers(const struct btf
*btf
,
603 const struct btf_type
*t
= btf_type_by_id(btf
, id
);
605 while (btf_type_is_modifier(t
)) {
607 t
= btf_type_by_id(btf
, t
->type
);
616 const struct btf_type
*btf_type_resolve_ptr(const struct btf
*btf
,
619 const struct btf_type
*t
;
621 t
= btf_type_skip_modifiers(btf
, id
, NULL
);
622 if (!btf_type_is_ptr(t
))
625 return btf_type_skip_modifiers(btf
, t
->type
, res_id
);
628 const struct btf_type
*btf_type_resolve_func_ptr(const struct btf
*btf
,
631 const struct btf_type
*ptype
;
633 ptype
= btf_type_resolve_ptr(btf
, id
, res_id
);
634 if (ptype
&& btf_type_is_func_proto(ptype
))
640 /* Types that act only as a source, not sink or intermediate
641 * type when resolving.
643 static bool btf_type_is_resolve_source_only(const struct btf_type
*t
)
645 return btf_type_is_var(t
) ||
646 btf_type_is_decl_tag(t
) ||
647 btf_type_is_datasec(t
);
650 /* What types need to be resolved?
652 * btf_type_is_modifier() is an obvious one.
654 * btf_type_is_struct() because its member refers to
655 * another type (through member->type).
657 * btf_type_is_var() because the variable refers to
658 * another type. btf_type_is_datasec() holds multiple
659 * btf_type_is_var() types that need resolving.
661 * btf_type_is_array() because its element (array->type)
662 * refers to another type. Array can be thought of a
663 * special case of struct while array just has the same
664 * member-type repeated by array->nelems of times.
666 static bool btf_type_needs_resolve(const struct btf_type
*t
)
668 return btf_type_is_modifier(t
) ||
669 btf_type_is_ptr(t
) ||
670 btf_type_is_struct(t
) ||
671 btf_type_is_array(t
) ||
672 btf_type_is_var(t
) ||
673 btf_type_is_func(t
) ||
674 btf_type_is_decl_tag(t
) ||
675 btf_type_is_datasec(t
);
678 /* t->size can be used */
679 static bool btf_type_has_size(const struct btf_type
*t
)
681 switch (BTF_INFO_KIND(t
->info
)) {
683 case BTF_KIND_STRUCT
:
686 case BTF_KIND_DATASEC
:
688 case BTF_KIND_ENUM64
:
695 static const char *btf_int_encoding_str(u8 encoding
)
699 else if (encoding
== BTF_INT_SIGNED
)
701 else if (encoding
== BTF_INT_CHAR
)
703 else if (encoding
== BTF_INT_BOOL
)
709 static u32
btf_type_int(const struct btf_type
*t
)
711 return *(u32
*)(t
+ 1);
714 static const struct btf_array
*btf_type_array(const struct btf_type
*t
)
716 return (const struct btf_array
*)(t
+ 1);
719 static const struct btf_enum
*btf_type_enum(const struct btf_type
*t
)
721 return (const struct btf_enum
*)(t
+ 1);
724 static const struct btf_var
*btf_type_var(const struct btf_type
*t
)
726 return (const struct btf_var
*)(t
+ 1);
729 static const struct btf_decl_tag
*btf_type_decl_tag(const struct btf_type
*t
)
731 return (const struct btf_decl_tag
*)(t
+ 1);
734 static const struct btf_enum64
*btf_type_enum64(const struct btf_type
*t
)
736 return (const struct btf_enum64
*)(t
+ 1);
739 static const struct btf_kind_operations
*btf_type_ops(const struct btf_type
*t
)
741 return kind_ops
[BTF_INFO_KIND(t
->info
)];
744 static bool btf_name_offset_valid(const struct btf
*btf
, u32 offset
)
746 if (!BTF_STR_OFFSET_VALID(offset
))
749 while (offset
< btf
->start_str_off
)
752 offset
-= btf
->start_str_off
;
753 return offset
< btf
->hdr
.str_len
;
756 static bool __btf_name_char_ok(char c
, bool first
)
758 if ((first
? !isalpha(c
) :
766 static const char *btf_str_by_offset(const struct btf
*btf
, u32 offset
)
768 while (offset
< btf
->start_str_off
)
771 offset
-= btf
->start_str_off
;
772 if (offset
< btf
->hdr
.str_len
)
773 return &btf
->strings
[offset
];
778 static bool __btf_name_valid(const struct btf
*btf
, u32 offset
)
780 /* offset must be valid */
781 const char *src
= btf_str_by_offset(btf
, offset
);
782 const char *src_limit
;
784 if (!__btf_name_char_ok(*src
, true))
787 /* set a limit on identifier length */
788 src_limit
= src
+ KSYM_NAME_LEN
;
790 while (*src
&& src
< src_limit
) {
791 if (!__btf_name_char_ok(*src
, false))
799 static bool btf_name_valid_identifier(const struct btf
*btf
, u32 offset
)
801 return __btf_name_valid(btf
, offset
);
804 static bool btf_name_valid_section(const struct btf
*btf
, u32 offset
)
806 return __btf_name_valid(btf
, offset
);
809 static const char *__btf_name_by_offset(const struct btf
*btf
, u32 offset
)
816 name
= btf_str_by_offset(btf
, offset
);
817 return name
?: "(invalid-name-offset)";
820 const char *btf_name_by_offset(const struct btf
*btf
, u32 offset
)
822 return btf_str_by_offset(btf
, offset
);
825 const struct btf_type
*btf_type_by_id(const struct btf
*btf
, u32 type_id
)
827 while (type_id
< btf
->start_id
)
830 type_id
-= btf
->start_id
;
831 if (type_id
>= btf
->nr_types
)
833 return btf
->types
[type_id
];
835 EXPORT_SYMBOL_GPL(btf_type_by_id
);
838 * Regular int is not a bit field and it must be either
839 * u8/u16/u32/u64 or __int128.
841 static bool btf_type_int_is_regular(const struct btf_type
*t
)
843 u8 nr_bits
, nr_bytes
;
846 int_data
= btf_type_int(t
);
847 nr_bits
= BTF_INT_BITS(int_data
);
848 nr_bytes
= BITS_ROUNDUP_BYTES(nr_bits
);
849 if (BITS_PER_BYTE_MASKED(nr_bits
) ||
850 BTF_INT_OFFSET(int_data
) ||
851 (nr_bytes
!= sizeof(u8
) && nr_bytes
!= sizeof(u16
) &&
852 nr_bytes
!= sizeof(u32
) && nr_bytes
!= sizeof(u64
) &&
853 nr_bytes
!= (2 * sizeof(u64
)))) {
861 * Check that given struct member is a regular int with expected
864 bool btf_member_is_reg_int(const struct btf
*btf
, const struct btf_type
*s
,
865 const struct btf_member
*m
,
866 u32 expected_offset
, u32 expected_size
)
868 const struct btf_type
*t
;
873 t
= btf_type_id_size(btf
, &id
, NULL
);
874 if (!t
|| !btf_type_is_int(t
))
877 int_data
= btf_type_int(t
);
878 nr_bits
= BTF_INT_BITS(int_data
);
879 if (btf_type_kflag(s
)) {
880 u32 bitfield_size
= BTF_MEMBER_BITFIELD_SIZE(m
->offset
);
881 u32 bit_offset
= BTF_MEMBER_BIT_OFFSET(m
->offset
);
883 /* if kflag set, int should be a regular int and
884 * bit offset should be at byte boundary.
886 return !bitfield_size
&&
887 BITS_ROUNDUP_BYTES(bit_offset
) == expected_offset
&&
888 BITS_ROUNDUP_BYTES(nr_bits
) == expected_size
;
891 if (BTF_INT_OFFSET(int_data
) ||
892 BITS_PER_BYTE_MASKED(m
->offset
) ||
893 BITS_ROUNDUP_BYTES(m
->offset
) != expected_offset
||
894 BITS_PER_BYTE_MASKED(nr_bits
) ||
895 BITS_ROUNDUP_BYTES(nr_bits
) != expected_size
)
901 /* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
902 static const struct btf_type
*btf_type_skip_qualifiers(const struct btf
*btf
,
905 const struct btf_type
*t
= btf_type_by_id(btf
, id
);
907 while (btf_type_is_modifier(t
) &&
908 BTF_INFO_KIND(t
->info
) != BTF_KIND_TYPEDEF
) {
909 t
= btf_type_by_id(btf
, t
->type
);
915 #define BTF_SHOW_MAX_ITER 10
917 #define BTF_KIND_BIT(kind) (1ULL << kind)
920 * Populate show->state.name with type name information.
921 * Format of type name is
923 * [.member_name = ] (type_name)
925 static const char *btf_show_name(struct btf_show
*show
)
927 /* BTF_MAX_ITER array suffixes "[]" */
928 const char *array_suffixes
= "[][][][][][][][][][]";
929 const char *array_suffix
= &array_suffixes
[strlen(array_suffixes
)];
930 /* BTF_MAX_ITER pointer suffixes "*" */
931 const char *ptr_suffixes
= "**********";
932 const char *ptr_suffix
= &ptr_suffixes
[strlen(ptr_suffixes
)];
933 const char *name
= NULL
, *prefix
= "", *parens
= "";
934 const struct btf_member
*m
= show
->state
.member
;
935 const struct btf_type
*t
;
936 const struct btf_array
*array
;
937 u32 id
= show
->state
.type_id
;
938 const char *member
= NULL
;
939 bool show_member
= false;
943 show
->state
.name
[0] = '\0';
946 * Don't show type name if we're showing an array member;
947 * in that case we show the array type so don't need to repeat
948 * ourselves for each member.
950 if (show
->state
.array_member
)
953 /* Retrieve member name, if any. */
955 member
= btf_name_by_offset(show
->btf
, m
->name_off
);
956 show_member
= strlen(member
) > 0;
961 * Start with type_id, as we have resolved the struct btf_type *
962 * via btf_modifier_show() past the parent typedef to the child
963 * struct, int etc it is defined as. In such cases, the type_id
964 * still represents the starting type while the struct btf_type *
965 * in our show->state points at the resolved type of the typedef.
967 t
= btf_type_by_id(show
->btf
, id
);
972 * The goal here is to build up the right number of pointer and
973 * array suffixes while ensuring the type name for a typedef
974 * is represented. Along the way we accumulate a list of
975 * BTF kinds we have encountered, since these will inform later
976 * display; for example, pointer types will not require an
977 * opening "{" for struct, we will just display the pointer value.
979 * We also want to accumulate the right number of pointer or array
980 * indices in the format string while iterating until we get to
981 * the typedef/pointee/array member target type.
983 * We start by pointing at the end of pointer and array suffix
984 * strings; as we accumulate pointers and arrays we move the pointer
985 * or array string backwards so it will show the expected number of
986 * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
987 * and/or arrays and typedefs are supported as a precaution.
989 * We also want to get typedef name while proceeding to resolve
990 * type it points to so that we can add parentheses if it is a
991 * "typedef struct" etc.
993 for (i
= 0; i
< BTF_SHOW_MAX_ITER
; i
++) {
995 switch (BTF_INFO_KIND(t
->info
)) {
996 case BTF_KIND_TYPEDEF
:
998 name
= btf_name_by_offset(show
->btf
,
1000 kinds
|= BTF_KIND_BIT(BTF_KIND_TYPEDEF
);
1003 case BTF_KIND_ARRAY
:
1004 kinds
|= BTF_KIND_BIT(BTF_KIND_ARRAY
);
1008 array
= btf_type_array(t
);
1009 if (array_suffix
> array_suffixes
)
1014 kinds
|= BTF_KIND_BIT(BTF_KIND_PTR
);
1015 if (ptr_suffix
> ptr_suffixes
)
1025 t
= btf_type_skip_qualifiers(show
->btf
, id
);
1027 /* We may not be able to represent this type; bail to be safe */
1028 if (i
== BTF_SHOW_MAX_ITER
)
1032 name
= btf_name_by_offset(show
->btf
, t
->name_off
);
1034 switch (BTF_INFO_KIND(t
->info
)) {
1035 case BTF_KIND_STRUCT
:
1036 case BTF_KIND_UNION
:
1037 prefix
= BTF_INFO_KIND(t
->info
) == BTF_KIND_STRUCT
?
1039 /* if it's an array of struct/union, parens is already set */
1040 if (!(kinds
& (BTF_KIND_BIT(BTF_KIND_ARRAY
))))
1044 case BTF_KIND_ENUM64
:
1051 /* pointer does not require parens */
1052 if (kinds
& BTF_KIND_BIT(BTF_KIND_PTR
))
1054 /* typedef does not require struct/union/enum prefix */
1055 if (kinds
& BTF_KIND_BIT(BTF_KIND_TYPEDEF
))
1061 /* Even if we don't want type name info, we want parentheses etc */
1062 if (show
->flags
& BTF_SHOW_NONAME
)
1063 snprintf(show
->state
.name
, sizeof(show
->state
.name
), "%s",
1066 snprintf(show
->state
.name
, sizeof(show
->state
.name
),
1067 "%s%s%s(%s%s%s%s%s%s)%s",
1068 /* first 3 strings comprise ".member = " */
1069 show_member
? "." : "",
1070 show_member
? member
: "",
1071 show_member
? " = " : "",
1072 /* ...next is our prefix (struct, enum, etc) */
1074 strlen(prefix
) > 0 && strlen(name
) > 0 ? " " : "",
1075 /* ...this is the type name itself */
1077 /* ...suffixed by the appropriate '*', '[]' suffixes */
1078 strlen(ptr_suffix
) > 0 ? " " : "", ptr_suffix
,
1079 array_suffix
, parens
);
1081 return show
->state
.name
;
1084 static const char *__btf_show_indent(struct btf_show
*show
)
1086 const char *indents
= " ";
1087 const char *indent
= &indents
[strlen(indents
)];
1089 if ((indent
- show
->state
.depth
) >= indents
)
1090 return indent
- show
->state
.depth
;
1094 static const char *btf_show_indent(struct btf_show
*show
)
1096 return show
->flags
& BTF_SHOW_COMPACT
? "" : __btf_show_indent(show
);
1099 static const char *btf_show_newline(struct btf_show
*show
)
1101 return show
->flags
& BTF_SHOW_COMPACT
? "" : "\n";
1104 static const char *btf_show_delim(struct btf_show
*show
)
1106 if (show
->state
.depth
== 0)
1109 if ((show
->flags
& BTF_SHOW_COMPACT
) && show
->state
.type
&&
1110 BTF_INFO_KIND(show
->state
.type
->info
) == BTF_KIND_UNION
)
1116 __printf(2, 3) static void btf_show(struct btf_show
*show
, const char *fmt
, ...)
1120 if (!show
->state
.depth_check
) {
1121 va_start(args
, fmt
);
1122 show
->showfn(show
, fmt
, args
);
1127 /* Macros are used here as btf_show_type_value[s]() prepends and appends
1128 * format specifiers to the format specifier passed in; these do the work of
1129 * adding indentation, delimiters etc while the caller simply has to specify
1130 * the type value(s) in the format specifier + value(s).
1132 #define btf_show_type_value(show, fmt, value) \
1134 if ((value) != (__typeof__(value))0 || \
1135 (show->flags & BTF_SHOW_ZERO) || \
1136 show->state.depth == 0) { \
1137 btf_show(show, "%s%s" fmt "%s%s", \
1138 btf_show_indent(show), \
1139 btf_show_name(show), \
1140 value, btf_show_delim(show), \
1141 btf_show_newline(show)); \
1142 if (show->state.depth > show->state.depth_to_show) \
1143 show->state.depth_to_show = show->state.depth; \
1147 #define btf_show_type_values(show, fmt, ...) \
1149 btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
1150 btf_show_name(show), \
1151 __VA_ARGS__, btf_show_delim(show), \
1152 btf_show_newline(show)); \
1153 if (show->state.depth > show->state.depth_to_show) \
1154 show->state.depth_to_show = show->state.depth; \
1157 /* How much is left to copy to safe buffer after @data? */
1158 static int btf_show_obj_size_left(struct btf_show
*show
, void *data
)
1160 return show
->obj
.head
+ show
->obj
.size
- data
;
1163 /* Is object pointed to by @data of @size already copied to our safe buffer? */
1164 static bool btf_show_obj_is_safe(struct btf_show
*show
, void *data
, int size
)
1166 return data
>= show
->obj
.data
&&
1167 (data
+ size
) < (show
->obj
.data
+ BTF_SHOW_OBJ_SAFE_SIZE
);
1171 * If object pointed to by @data of @size falls within our safe buffer, return
1172 * the equivalent pointer to the same safe data. Assumes
1173 * copy_from_kernel_nofault() has already happened and our safe buffer is
1176 static void *__btf_show_obj_safe(struct btf_show
*show
, void *data
, int size
)
1178 if (btf_show_obj_is_safe(show
, data
, size
))
1179 return show
->obj
.safe
+ (data
- show
->obj
.data
);
1184 * Return a safe-to-access version of data pointed to by @data.
1185 * We do this by copying the relevant amount of information
1186 * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
1188 * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
1189 * safe copy is needed.
1191 * Otherwise we need to determine if we have the required amount
1192 * of data (determined by the @data pointer and the size of the
1193 * largest base type we can encounter (represented by
1194 * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
1195 * that we will be able to print some of the current object,
1196 * and if more is needed a copy will be triggered.
1197 * Some objects such as structs will not fit into the buffer;
1198 * in such cases additional copies when we iterate over their
1199 * members may be needed.
1201 * btf_show_obj_safe() is used to return a safe buffer for
1202 * btf_show_start_type(); this ensures that as we recurse into
1203 * nested types we always have safe data for the given type.
1204 * This approach is somewhat wasteful; it's possible for example
1205 * that when iterating over a large union we'll end up copying the
1206 * same data repeatedly, but the goal is safety not performance.
1207 * We use stack data as opposed to per-CPU buffers because the
1208 * iteration over a type can take some time, and preemption handling
1209 * would greatly complicate use of the safe buffer.
1211 static void *btf_show_obj_safe(struct btf_show
*show
,
1212 const struct btf_type
*t
,
1215 const struct btf_type
*rt
;
1216 int size_left
, size
;
1219 if (show
->flags
& BTF_SHOW_UNSAFE
)
1222 rt
= btf_resolve_size(show
->btf
, t
, &size
);
1224 show
->state
.status
= PTR_ERR(rt
);
1229 * Is this toplevel object? If so, set total object size and
1230 * initialize pointers. Otherwise check if we still fall within
1231 * our safe object data.
1233 if (show
->state
.depth
== 0) {
1234 show
->obj
.size
= size
;
1235 show
->obj
.head
= data
;
1238 * If the size of the current object is > our remaining
1239 * safe buffer we _may_ need to do a new copy. However
1240 * consider the case of a nested struct; it's size pushes
1241 * us over the safe buffer limit, but showing any individual
1242 * struct members does not. In such cases, we don't need
1243 * to initiate a fresh copy yet; however we definitely need
1244 * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
1245 * in our buffer, regardless of the current object size.
1246 * The logic here is that as we resolve types we will
1247 * hit a base type at some point, and we need to be sure
1248 * the next chunk of data is safely available to display
1249 * that type info safely. We cannot rely on the size of
1250 * the current object here because it may be much larger
1251 * than our current buffer (e.g. task_struct is 8k).
1252 * All we want to do here is ensure that we can print the
1253 * next basic type, which we can if either
1254 * - the current type size is within the safe buffer; or
1255 * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
1258 safe
= __btf_show_obj_safe(show
, data
,
1260 BTF_SHOW_OBJ_BASE_TYPE_SIZE
));
1264 * We need a new copy to our safe object, either because we haven't
1265 * yet copied and are initializing safe data, or because the data
1266 * we want falls outside the boundaries of the safe object.
1269 size_left
= btf_show_obj_size_left(show
, data
);
1270 if (size_left
> BTF_SHOW_OBJ_SAFE_SIZE
)
1271 size_left
= BTF_SHOW_OBJ_SAFE_SIZE
;
1272 show
->state
.status
= copy_from_kernel_nofault(show
->obj
.safe
,
1274 if (!show
->state
.status
) {
1275 show
->obj
.data
= data
;
1276 safe
= show
->obj
.safe
;
1284 * Set the type we are starting to show and return a safe data pointer
1285 * to be used for showing the associated data.
1287 static void *btf_show_start_type(struct btf_show
*show
,
1288 const struct btf_type
*t
,
1289 u32 type_id
, void *data
)
1291 show
->state
.type
= t
;
1292 show
->state
.type_id
= type_id
;
1293 show
->state
.name
[0] = '\0';
1295 return btf_show_obj_safe(show
, t
, data
);
1298 static void btf_show_end_type(struct btf_show
*show
)
1300 show
->state
.type
= NULL
;
1301 show
->state
.type_id
= 0;
1302 show
->state
.name
[0] = '\0';
1305 static void *btf_show_start_aggr_type(struct btf_show
*show
,
1306 const struct btf_type
*t
,
1307 u32 type_id
, void *data
)
1309 void *safe_data
= btf_show_start_type(show
, t
, type_id
, data
);
1314 btf_show(show
, "%s%s%s", btf_show_indent(show
),
1315 btf_show_name(show
),
1316 btf_show_newline(show
));
1317 show
->state
.depth
++;
1321 static void btf_show_end_aggr_type(struct btf_show
*show
,
1324 show
->state
.depth
--;
1325 btf_show(show
, "%s%s%s%s", btf_show_indent(show
), suffix
,
1326 btf_show_delim(show
), btf_show_newline(show
));
1327 btf_show_end_type(show
);
1330 static void btf_show_start_member(struct btf_show
*show
,
1331 const struct btf_member
*m
)
1333 show
->state
.member
= m
;
1336 static void btf_show_start_array_member(struct btf_show
*show
)
1338 show
->state
.array_member
= 1;
1339 btf_show_start_member(show
, NULL
);
1342 static void btf_show_end_member(struct btf_show
*show
)
1344 show
->state
.member
= NULL
;
1347 static void btf_show_end_array_member(struct btf_show
*show
)
1349 show
->state
.array_member
= 0;
1350 btf_show_end_member(show
);
1353 static void *btf_show_start_array_type(struct btf_show
*show
,
1354 const struct btf_type
*t
,
1359 show
->state
.array_encoding
= array_encoding
;
1360 show
->state
.array_terminated
= 0;
1361 return btf_show_start_aggr_type(show
, t
, type_id
, data
);
1364 static void btf_show_end_array_type(struct btf_show
*show
)
1366 show
->state
.array_encoding
= 0;
1367 show
->state
.array_terminated
= 0;
1368 btf_show_end_aggr_type(show
, "]");
1371 static void *btf_show_start_struct_type(struct btf_show
*show
,
1372 const struct btf_type
*t
,
1376 return btf_show_start_aggr_type(show
, t
, type_id
, data
);
1379 static void btf_show_end_struct_type(struct btf_show
*show
)
1381 btf_show_end_aggr_type(show
, "}");
1384 __printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log
*log
,
1385 const char *fmt
, ...)
1389 va_start(args
, fmt
);
1390 bpf_verifier_vlog(log
, fmt
, args
);
1394 __printf(2, 3) static void btf_verifier_log(struct btf_verifier_env
*env
,
1395 const char *fmt
, ...)
1397 struct bpf_verifier_log
*log
= &env
->log
;
1400 if (!bpf_verifier_log_needed(log
))
1403 va_start(args
, fmt
);
1404 bpf_verifier_vlog(log
, fmt
, args
);
1408 __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env
*env
,
1409 const struct btf_type
*t
,
1411 const char *fmt
, ...)
1413 struct bpf_verifier_log
*log
= &env
->log
;
1414 struct btf
*btf
= env
->btf
;
1417 if (!bpf_verifier_log_needed(log
))
1420 if (log
->level
== BPF_LOG_KERNEL
) {
1421 /* btf verifier prints all types it is processing via
1422 * btf_verifier_log_type(..., fmt = NULL).
1423 * Skip those prints for in-kernel BTF verification.
1428 /* Skip logging when loading module BTF with mismatches permitted */
1429 if (env
->btf
->base_btf
&& IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH
))
1433 __btf_verifier_log(log
, "[%u] %s %s%s",
1436 __btf_name_by_offset(btf
, t
->name_off
),
1437 log_details
? " " : "");
1440 btf_type_ops(t
)->log_details(env
, t
);
1443 __btf_verifier_log(log
, " ");
1444 va_start(args
, fmt
);
1445 bpf_verifier_vlog(log
, fmt
, args
);
1449 __btf_verifier_log(log
, "\n");
1452 #define btf_verifier_log_type(env, t, ...) \
1453 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
1454 #define btf_verifier_log_basic(env, t, ...) \
1455 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
1458 static void btf_verifier_log_member(struct btf_verifier_env
*env
,
1459 const struct btf_type
*struct_type
,
1460 const struct btf_member
*member
,
1461 const char *fmt
, ...)
1463 struct bpf_verifier_log
*log
= &env
->log
;
1464 struct btf
*btf
= env
->btf
;
1467 if (!bpf_verifier_log_needed(log
))
1470 if (log
->level
== BPF_LOG_KERNEL
) {
1474 /* Skip logging when loading module BTF with mismatches permitted */
1475 if (env
->btf
->base_btf
&& IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH
))
1479 /* The CHECK_META phase already did a btf dump.
1481 * If member is logged again, it must hit an error in
1482 * parsing this member. It is useful to print out which
1483 * struct this member belongs to.
1485 if (env
->phase
!= CHECK_META
)
1486 btf_verifier_log_type(env
, struct_type
, NULL
);
1488 if (btf_type_kflag(struct_type
))
1489 __btf_verifier_log(log
,
1490 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
1491 __btf_name_by_offset(btf
, member
->name_off
),
1493 BTF_MEMBER_BITFIELD_SIZE(member
->offset
),
1494 BTF_MEMBER_BIT_OFFSET(member
->offset
));
1496 __btf_verifier_log(log
, "\t%s type_id=%u bits_offset=%u",
1497 __btf_name_by_offset(btf
, member
->name_off
),
1498 member
->type
, member
->offset
);
1501 __btf_verifier_log(log
, " ");
1502 va_start(args
, fmt
);
1503 bpf_verifier_vlog(log
, fmt
, args
);
1507 __btf_verifier_log(log
, "\n");
1511 static void btf_verifier_log_vsi(struct btf_verifier_env
*env
,
1512 const struct btf_type
*datasec_type
,
1513 const struct btf_var_secinfo
*vsi
,
1514 const char *fmt
, ...)
1516 struct bpf_verifier_log
*log
= &env
->log
;
1519 if (!bpf_verifier_log_needed(log
))
1521 if (log
->level
== BPF_LOG_KERNEL
&& !fmt
)
1523 if (env
->phase
!= CHECK_META
)
1524 btf_verifier_log_type(env
, datasec_type
, NULL
);
1526 __btf_verifier_log(log
, "\t type_id=%u offset=%u size=%u",
1527 vsi
->type
, vsi
->offset
, vsi
->size
);
1529 __btf_verifier_log(log
, " ");
1530 va_start(args
, fmt
);
1531 bpf_verifier_vlog(log
, fmt
, args
);
1535 __btf_verifier_log(log
, "\n");
1538 static void btf_verifier_log_hdr(struct btf_verifier_env
*env
,
1541 struct bpf_verifier_log
*log
= &env
->log
;
1542 const struct btf
*btf
= env
->btf
;
1543 const struct btf_header
*hdr
;
1545 if (!bpf_verifier_log_needed(log
))
1548 if (log
->level
== BPF_LOG_KERNEL
)
1551 __btf_verifier_log(log
, "magic: 0x%x\n", hdr
->magic
);
1552 __btf_verifier_log(log
, "version: %u\n", hdr
->version
);
1553 __btf_verifier_log(log
, "flags: 0x%x\n", hdr
->flags
);
1554 __btf_verifier_log(log
, "hdr_len: %u\n", hdr
->hdr_len
);
1555 __btf_verifier_log(log
, "type_off: %u\n", hdr
->type_off
);
1556 __btf_verifier_log(log
, "type_len: %u\n", hdr
->type_len
);
1557 __btf_verifier_log(log
, "str_off: %u\n", hdr
->str_off
);
1558 __btf_verifier_log(log
, "str_len: %u\n", hdr
->str_len
);
1559 __btf_verifier_log(log
, "btf_total_size: %u\n", btf_data_size
);
1562 static int btf_add_type(struct btf_verifier_env
*env
, struct btf_type
*t
)
1564 struct btf
*btf
= env
->btf
;
1566 if (btf
->types_size
== btf
->nr_types
) {
1567 /* Expand 'types' array */
1569 struct btf_type
**new_types
;
1570 u32 expand_by
, new_size
;
1572 if (btf
->start_id
+ btf
->types_size
== BTF_MAX_TYPE
) {
1573 btf_verifier_log(env
, "Exceeded max num of types");
1577 expand_by
= max_t(u32
, btf
->types_size
>> 2, 16);
1578 new_size
= min_t(u32
, BTF_MAX_TYPE
,
1579 btf
->types_size
+ expand_by
);
1581 new_types
= kvcalloc(new_size
, sizeof(*new_types
),
1582 GFP_KERNEL
| __GFP_NOWARN
);
1586 if (btf
->nr_types
== 0) {
1587 if (!btf
->base_btf
) {
1588 /* lazily init VOID type */
1589 new_types
[0] = &btf_void
;
1593 memcpy(new_types
, btf
->types
,
1594 sizeof(*btf
->types
) * btf
->nr_types
);
1598 btf
->types
= new_types
;
1599 btf
->types_size
= new_size
;
1602 btf
->types
[btf
->nr_types
++] = t
;
1607 static int btf_alloc_id(struct btf
*btf
)
1611 idr_preload(GFP_KERNEL
);
1612 spin_lock_bh(&btf_idr_lock
);
1613 id
= idr_alloc_cyclic(&btf_idr
, btf
, 1, INT_MAX
, GFP_ATOMIC
);
1616 spin_unlock_bh(&btf_idr_lock
);
1619 if (WARN_ON_ONCE(!id
))
1622 return id
> 0 ? 0 : id
;
1625 static void btf_free_id(struct btf
*btf
)
1627 unsigned long flags
;
1630 * In map-in-map, calling map_delete_elem() on outer
1631 * map will call bpf_map_put on the inner map.
1632 * It will then eventually call btf_free_id()
1633 * on the inner map. Some of the map_delete_elem()
1634 * implementation may have irq disabled, so
1635 * we need to use the _irqsave() version instead
1636 * of the _bh() version.
1638 spin_lock_irqsave(&btf_idr_lock
, flags
);
1639 idr_remove(&btf_idr
, btf
->id
);
1640 spin_unlock_irqrestore(&btf_idr_lock
, flags
);
1643 static void btf_free_kfunc_set_tab(struct btf
*btf
)
1645 struct btf_kfunc_set_tab
*tab
= btf
->kfunc_set_tab
;
1650 /* For module BTF, we directly assign the sets being registered, so
1651 * there is nothing to free except kfunc_set_tab.
1653 if (btf_is_module(btf
))
1655 for (hook
= 0; hook
< ARRAY_SIZE(tab
->sets
); hook
++)
1656 kfree(tab
->sets
[hook
]);
1659 btf
->kfunc_set_tab
= NULL
;
1662 static void btf_free_dtor_kfunc_tab(struct btf
*btf
)
1664 struct btf_id_dtor_kfunc_tab
*tab
= btf
->dtor_kfunc_tab
;
1669 btf
->dtor_kfunc_tab
= NULL
;
1672 static void btf_struct_metas_free(struct btf_struct_metas
*tab
)
1678 for (i
= 0; i
< tab
->cnt
; i
++)
1679 btf_record_free(tab
->types
[i
].record
);
1683 static void btf_free_struct_meta_tab(struct btf
*btf
)
1685 struct btf_struct_metas
*tab
= btf
->struct_meta_tab
;
1687 btf_struct_metas_free(tab
);
1688 btf
->struct_meta_tab
= NULL
;
1691 static void btf_free(struct btf
*btf
)
1693 btf_free_struct_meta_tab(btf
);
1694 btf_free_dtor_kfunc_tab(btf
);
1695 btf_free_kfunc_set_tab(btf
);
1697 kvfree(btf
->resolved_sizes
);
1698 kvfree(btf
->resolved_ids
);
1703 static void btf_free_rcu(struct rcu_head
*rcu
)
1705 struct btf
*btf
= container_of(rcu
, struct btf
, rcu
);
1710 void btf_get(struct btf
*btf
)
1712 refcount_inc(&btf
->refcnt
);
1715 void btf_put(struct btf
*btf
)
1717 if (btf
&& refcount_dec_and_test(&btf
->refcnt
)) {
1719 call_rcu(&btf
->rcu
, btf_free_rcu
);
1723 static int env_resolve_init(struct btf_verifier_env
*env
)
1725 struct btf
*btf
= env
->btf
;
1726 u32 nr_types
= btf
->nr_types
;
1727 u32
*resolved_sizes
= NULL
;
1728 u32
*resolved_ids
= NULL
;
1729 u8
*visit_states
= NULL
;
1731 resolved_sizes
= kvcalloc(nr_types
, sizeof(*resolved_sizes
),
1732 GFP_KERNEL
| __GFP_NOWARN
);
1733 if (!resolved_sizes
)
1736 resolved_ids
= kvcalloc(nr_types
, sizeof(*resolved_ids
),
1737 GFP_KERNEL
| __GFP_NOWARN
);
1741 visit_states
= kvcalloc(nr_types
, sizeof(*visit_states
),
1742 GFP_KERNEL
| __GFP_NOWARN
);
1746 btf
->resolved_sizes
= resolved_sizes
;
1747 btf
->resolved_ids
= resolved_ids
;
1748 env
->visit_states
= visit_states
;
1753 kvfree(resolved_sizes
);
1754 kvfree(resolved_ids
);
1755 kvfree(visit_states
);
1759 static void btf_verifier_env_free(struct btf_verifier_env
*env
)
1761 kvfree(env
->visit_states
);
1765 static bool env_type_is_resolve_sink(const struct btf_verifier_env
*env
,
1766 const struct btf_type
*next_type
)
1768 switch (env
->resolve_mode
) {
1770 /* int, enum or void is a sink */
1771 return !btf_type_needs_resolve(next_type
);
1773 /* int, enum, void, struct, array, func or func_proto is a sink
1776 return !btf_type_is_modifier(next_type
) &&
1777 !btf_type_is_ptr(next_type
);
1778 case RESOLVE_STRUCT_OR_ARRAY
:
1779 /* int, enum, void, ptr, func or func_proto is a sink
1780 * for struct and array
1782 return !btf_type_is_modifier(next_type
) &&
1783 !btf_type_is_array(next_type
) &&
1784 !btf_type_is_struct(next_type
);
1790 static bool env_type_is_resolved(const struct btf_verifier_env
*env
,
1793 /* base BTF types should be resolved by now */
1794 if (type_id
< env
->btf
->start_id
)
1797 return env
->visit_states
[type_id
- env
->btf
->start_id
] == RESOLVED
;
1800 static int env_stack_push(struct btf_verifier_env
*env
,
1801 const struct btf_type
*t
, u32 type_id
)
1803 const struct btf
*btf
= env
->btf
;
1804 struct resolve_vertex
*v
;
1806 if (env
->top_stack
== MAX_RESOLVE_DEPTH
)
1809 if (type_id
< btf
->start_id
1810 || env
->visit_states
[type_id
- btf
->start_id
] != NOT_VISITED
)
1813 env
->visit_states
[type_id
- btf
->start_id
] = VISITED
;
1815 v
= &env
->stack
[env
->top_stack
++];
1817 v
->type_id
= type_id
;
1820 if (env
->resolve_mode
== RESOLVE_TBD
) {
1821 if (btf_type_is_ptr(t
))
1822 env
->resolve_mode
= RESOLVE_PTR
;
1823 else if (btf_type_is_struct(t
) || btf_type_is_array(t
))
1824 env
->resolve_mode
= RESOLVE_STRUCT_OR_ARRAY
;
1830 static void env_stack_set_next_member(struct btf_verifier_env
*env
,
1833 env
->stack
[env
->top_stack
- 1].next_member
= next_member
;
1836 static void env_stack_pop_resolved(struct btf_verifier_env
*env
,
1837 u32 resolved_type_id
,
1840 u32 type_id
= env
->stack
[--(env
->top_stack
)].type_id
;
1841 struct btf
*btf
= env
->btf
;
1843 type_id
-= btf
->start_id
; /* adjust to local type id */
1844 btf
->resolved_sizes
[type_id
] = resolved_size
;
1845 btf
->resolved_ids
[type_id
] = resolved_type_id
;
1846 env
->visit_states
[type_id
] = RESOLVED
;
1849 static const struct resolve_vertex
*env_stack_peak(struct btf_verifier_env
*env
)
1851 return env
->top_stack
? &env
->stack
[env
->top_stack
- 1] : NULL
;
1854 /* Resolve the size of a passed-in "type"
1856 * type: is an array (e.g. u32 array[x][y])
1857 * return type: type "u32[x][y]", i.e. BTF_KIND_ARRAY,
1858 * *type_size: (x * y * sizeof(u32)). Hence, *type_size always
1859 * corresponds to the return type.
1861 * *elem_id: id of u32
1862 * *total_nelems: (x * y). Hence, individual elem size is
1863 * (*type_size / *total_nelems)
1864 * *type_id: id of type if it's changed within the function, 0 if not
1866 * type: is not an array (e.g. const struct X)
1867 * return type: type "struct X"
1868 * *type_size: sizeof(struct X)
1869 * *elem_type: same as return type ("struct X")
1872 * *type_id: id of type if it's changed within the function, 0 if not
1874 static const struct btf_type
*
1875 __btf_resolve_size(const struct btf
*btf
, const struct btf_type
*type
,
1876 u32
*type_size
, const struct btf_type
**elem_type
,
1877 u32
*elem_id
, u32
*total_nelems
, u32
*type_id
)
1879 const struct btf_type
*array_type
= NULL
;
1880 const struct btf_array
*array
= NULL
;
1881 u32 i
, size
, nelems
= 1, id
= 0;
1883 for (i
= 0; i
< MAX_RESOLVE_DEPTH
; i
++) {
1884 switch (BTF_INFO_KIND(type
->info
)) {
1885 /* type->size can be used */
1887 case BTF_KIND_STRUCT
:
1888 case BTF_KIND_UNION
:
1890 case BTF_KIND_FLOAT
:
1891 case BTF_KIND_ENUM64
:
1896 size
= sizeof(void *);
1900 case BTF_KIND_TYPEDEF
:
1901 case BTF_KIND_VOLATILE
:
1902 case BTF_KIND_CONST
:
1903 case BTF_KIND_RESTRICT
:
1904 case BTF_KIND_TYPE_TAG
:
1906 type
= btf_type_by_id(btf
, type
->type
);
1909 case BTF_KIND_ARRAY
:
1912 array
= btf_type_array(type
);
1913 if (nelems
&& array
->nelems
> U32_MAX
/ nelems
)
1914 return ERR_PTR(-EINVAL
);
1915 nelems
*= array
->nelems
;
1916 type
= btf_type_by_id(btf
, array
->type
);
1919 /* type without size */
1921 return ERR_PTR(-EINVAL
);
1925 return ERR_PTR(-EINVAL
);
1928 if (nelems
&& size
> U32_MAX
/ nelems
)
1929 return ERR_PTR(-EINVAL
);
1931 *type_size
= nelems
* size
;
1933 *total_nelems
= nelems
;
1937 *elem_id
= array
? array
->type
: 0;
1941 return array_type
? : type
;
1944 const struct btf_type
*
1945 btf_resolve_size(const struct btf
*btf
, const struct btf_type
*type
,
1948 return __btf_resolve_size(btf
, type
, type_size
, NULL
, NULL
, NULL
, NULL
);
1951 static u32
btf_resolved_type_id(const struct btf
*btf
, u32 type_id
)
1953 while (type_id
< btf
->start_id
)
1954 btf
= btf
->base_btf
;
1956 return btf
->resolved_ids
[type_id
- btf
->start_id
];
1959 /* The input param "type_id" must point to a needs_resolve type */
1960 static const struct btf_type
*btf_type_id_resolve(const struct btf
*btf
,
1963 *type_id
= btf_resolved_type_id(btf
, *type_id
);
1964 return btf_type_by_id(btf
, *type_id
);
1967 static u32
btf_resolved_type_size(const struct btf
*btf
, u32 type_id
)
1969 while (type_id
< btf
->start_id
)
1970 btf
= btf
->base_btf
;
1972 return btf
->resolved_sizes
[type_id
- btf
->start_id
];
1975 const struct btf_type
*btf_type_id_size(const struct btf
*btf
,
1976 u32
*type_id
, u32
*ret_size
)
1978 const struct btf_type
*size_type
;
1979 u32 size_type_id
= *type_id
;
1982 size_type
= btf_type_by_id(btf
, size_type_id
);
1983 if (btf_type_nosize_or_null(size_type
))
1986 if (btf_type_has_size(size_type
)) {
1987 size
= size_type
->size
;
1988 } else if (btf_type_is_array(size_type
)) {
1989 size
= btf_resolved_type_size(btf
, size_type_id
);
1990 } else if (btf_type_is_ptr(size_type
)) {
1991 size
= sizeof(void *);
1993 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type
) &&
1994 !btf_type_is_var(size_type
)))
1997 size_type_id
= btf_resolved_type_id(btf
, size_type_id
);
1998 size_type
= btf_type_by_id(btf
, size_type_id
);
1999 if (btf_type_nosize_or_null(size_type
))
2001 else if (btf_type_has_size(size_type
))
2002 size
= size_type
->size
;
2003 else if (btf_type_is_array(size_type
))
2004 size
= btf_resolved_type_size(btf
, size_type_id
);
2005 else if (btf_type_is_ptr(size_type
))
2006 size
= sizeof(void *);
2011 *type_id
= size_type_id
;
2018 static int btf_df_check_member(struct btf_verifier_env
*env
,
2019 const struct btf_type
*struct_type
,
2020 const struct btf_member
*member
,
2021 const struct btf_type
*member_type
)
2023 btf_verifier_log_basic(env
, struct_type
,
2024 "Unsupported check_member");
2028 static int btf_df_check_kflag_member(struct btf_verifier_env
*env
,
2029 const struct btf_type
*struct_type
,
2030 const struct btf_member
*member
,
2031 const struct btf_type
*member_type
)
2033 btf_verifier_log_basic(env
, struct_type
,
2034 "Unsupported check_kflag_member");
2038 /* Used for ptr, array struct/union and float type members.
2039 * int, enum and modifier types have their specific callback functions.
2041 static int btf_generic_check_kflag_member(struct btf_verifier_env
*env
,
2042 const struct btf_type
*struct_type
,
2043 const struct btf_member
*member
,
2044 const struct btf_type
*member_type
)
2046 if (BTF_MEMBER_BITFIELD_SIZE(member
->offset
)) {
2047 btf_verifier_log_member(env
, struct_type
, member
,
2048 "Invalid member bitfield_size");
2052 /* bitfield size is 0, so member->offset represents bit offset only.
2053 * It is safe to call non kflag check_member variants.
2055 return btf_type_ops(member_type
)->check_member(env
, struct_type
,
2060 static int btf_df_resolve(struct btf_verifier_env
*env
,
2061 const struct resolve_vertex
*v
)
2063 btf_verifier_log_basic(env
, v
->t
, "Unsupported resolve");
2067 static void btf_df_show(const struct btf
*btf
, const struct btf_type
*t
,
2068 u32 type_id
, void *data
, u8 bits_offsets
,
2069 struct btf_show
*show
)
2071 btf_show(show
, "<unsupported kind:%u>", BTF_INFO_KIND(t
->info
));
2074 static int btf_int_check_member(struct btf_verifier_env
*env
,
2075 const struct btf_type
*struct_type
,
2076 const struct btf_member
*member
,
2077 const struct btf_type
*member_type
)
2079 u32 int_data
= btf_type_int(member_type
);
2080 u32 struct_bits_off
= member
->offset
;
2081 u32 struct_size
= struct_type
->size
;
2085 if (U32_MAX
- struct_bits_off
< BTF_INT_OFFSET(int_data
)) {
2086 btf_verifier_log_member(env
, struct_type
, member
,
2087 "bits_offset exceeds U32_MAX");
2091 struct_bits_off
+= BTF_INT_OFFSET(int_data
);
2092 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2093 nr_copy_bits
= BTF_INT_BITS(int_data
) +
2094 BITS_PER_BYTE_MASKED(struct_bits_off
);
2096 if (nr_copy_bits
> BITS_PER_U128
) {
2097 btf_verifier_log_member(env
, struct_type
, member
,
2098 "nr_copy_bits exceeds 128");
2102 if (struct_size
< bytes_offset
||
2103 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
2104 btf_verifier_log_member(env
, struct_type
, member
,
2105 "Member exceeds struct_size");
2112 static int btf_int_check_kflag_member(struct btf_verifier_env
*env
,
2113 const struct btf_type
*struct_type
,
2114 const struct btf_member
*member
,
2115 const struct btf_type
*member_type
)
2117 u32 struct_bits_off
, nr_bits
, nr_int_data_bits
, bytes_offset
;
2118 u32 int_data
= btf_type_int(member_type
);
2119 u32 struct_size
= struct_type
->size
;
2122 /* a regular int type is required for the kflag int member */
2123 if (!btf_type_int_is_regular(member_type
)) {
2124 btf_verifier_log_member(env
, struct_type
, member
,
2125 "Invalid member base type");
2129 /* check sanity of bitfield size */
2130 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
2131 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
2132 nr_int_data_bits
= BTF_INT_BITS(int_data
);
2134 /* Not a bitfield member, member offset must be at byte
2137 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2138 btf_verifier_log_member(env
, struct_type
, member
,
2139 "Invalid member offset");
2143 nr_bits
= nr_int_data_bits
;
2144 } else if (nr_bits
> nr_int_data_bits
) {
2145 btf_verifier_log_member(env
, struct_type
, member
,
2146 "Invalid member bitfield_size");
2150 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2151 nr_copy_bits
= nr_bits
+ BITS_PER_BYTE_MASKED(struct_bits_off
);
2152 if (nr_copy_bits
> BITS_PER_U128
) {
2153 btf_verifier_log_member(env
, struct_type
, member
,
2154 "nr_copy_bits exceeds 128");
2158 if (struct_size
< bytes_offset
||
2159 struct_size
- bytes_offset
< BITS_ROUNDUP_BYTES(nr_copy_bits
)) {
2160 btf_verifier_log_member(env
, struct_type
, member
,
2161 "Member exceeds struct_size");
2168 static s32
btf_int_check_meta(struct btf_verifier_env
*env
,
2169 const struct btf_type
*t
,
2172 u32 int_data
, nr_bits
, meta_needed
= sizeof(int_data
);
2175 if (meta_left
< meta_needed
) {
2176 btf_verifier_log_basic(env
, t
,
2177 "meta_left:%u meta_needed:%u",
2178 meta_left
, meta_needed
);
2182 if (btf_type_vlen(t
)) {
2183 btf_verifier_log_type(env
, t
, "vlen != 0");
2187 if (btf_type_kflag(t
)) {
2188 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2192 int_data
= btf_type_int(t
);
2193 if (int_data
& ~BTF_INT_MASK
) {
2194 btf_verifier_log_basic(env
, t
, "Invalid int_data:%x",
2199 nr_bits
= BTF_INT_BITS(int_data
) + BTF_INT_OFFSET(int_data
);
2201 if (nr_bits
> BITS_PER_U128
) {
2202 btf_verifier_log_type(env
, t
, "nr_bits exceeds %zu",
2207 if (BITS_ROUNDUP_BYTES(nr_bits
) > t
->size
) {
2208 btf_verifier_log_type(env
, t
, "nr_bits exceeds type_size");
2213 * Only one of the encoding bits is allowed and it
2214 * should be sufficient for the pretty print purpose (i.e. decoding).
2215 * Multiple bits can be allowed later if it is found
2216 * to be insufficient.
2218 encoding
= BTF_INT_ENCODING(int_data
);
2220 encoding
!= BTF_INT_SIGNED
&&
2221 encoding
!= BTF_INT_CHAR
&&
2222 encoding
!= BTF_INT_BOOL
) {
2223 btf_verifier_log_type(env
, t
, "Unsupported encoding");
2227 btf_verifier_log_type(env
, t
, NULL
);
2232 static void btf_int_log(struct btf_verifier_env
*env
,
2233 const struct btf_type
*t
)
2235 int int_data
= btf_type_int(t
);
2237 btf_verifier_log(env
,
2238 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
2239 t
->size
, BTF_INT_OFFSET(int_data
),
2240 BTF_INT_BITS(int_data
),
2241 btf_int_encoding_str(BTF_INT_ENCODING(int_data
)));
2244 static void btf_int128_print(struct btf_show
*show
, void *data
)
2246 /* data points to a __int128 number.
2248 * int128_num = *(__int128 *)data;
2249 * The below formulas shows what upper_num and lower_num represents:
2250 * upper_num = int128_num >> 64;
2251 * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
2253 u64 upper_num
, lower_num
;
2255 #ifdef __BIG_ENDIAN_BITFIELD
2256 upper_num
= *(u64
*)data
;
2257 lower_num
= *(u64
*)(data
+ 8);
2259 upper_num
= *(u64
*)(data
+ 8);
2260 lower_num
= *(u64
*)data
;
2263 btf_show_type_value(show
, "0x%llx", lower_num
);
2265 btf_show_type_values(show
, "0x%llx%016llx", upper_num
,
2269 static void btf_int128_shift(u64
*print_num
, u16 left_shift_bits
,
2270 u16 right_shift_bits
)
2272 u64 upper_num
, lower_num
;
2274 #ifdef __BIG_ENDIAN_BITFIELD
2275 upper_num
= print_num
[0];
2276 lower_num
= print_num
[1];
2278 upper_num
= print_num
[1];
2279 lower_num
= print_num
[0];
2282 /* shake out un-needed bits by shift/or operations */
2283 if (left_shift_bits
>= 64) {
2284 upper_num
= lower_num
<< (left_shift_bits
- 64);
2287 upper_num
= (upper_num
<< left_shift_bits
) |
2288 (lower_num
>> (64 - left_shift_bits
));
2289 lower_num
= lower_num
<< left_shift_bits
;
2292 if (right_shift_bits
>= 64) {
2293 lower_num
= upper_num
>> (right_shift_bits
- 64);
2296 lower_num
= (lower_num
>> right_shift_bits
) |
2297 (upper_num
<< (64 - right_shift_bits
));
2298 upper_num
= upper_num
>> right_shift_bits
;
2301 #ifdef __BIG_ENDIAN_BITFIELD
2302 print_num
[0] = upper_num
;
2303 print_num
[1] = lower_num
;
2305 print_num
[0] = lower_num
;
2306 print_num
[1] = upper_num
;
2310 static void btf_bitfield_show(void *data
, u8 bits_offset
,
2311 u8 nr_bits
, struct btf_show
*show
)
2313 u16 left_shift_bits
, right_shift_bits
;
2316 u64 print_num
[2] = {};
2318 nr_copy_bits
= nr_bits
+ bits_offset
;
2319 nr_copy_bytes
= BITS_ROUNDUP_BYTES(nr_copy_bits
);
2321 memcpy(print_num
, data
, nr_copy_bytes
);
2323 #ifdef __BIG_ENDIAN_BITFIELD
2324 left_shift_bits
= bits_offset
;
2326 left_shift_bits
= BITS_PER_U128
- nr_copy_bits
;
2328 right_shift_bits
= BITS_PER_U128
- nr_bits
;
2330 btf_int128_shift(print_num
, left_shift_bits
, right_shift_bits
);
2331 btf_int128_print(show
, print_num
);
2335 static void btf_int_bits_show(const struct btf
*btf
,
2336 const struct btf_type
*t
,
2337 void *data
, u8 bits_offset
,
2338 struct btf_show
*show
)
2340 u32 int_data
= btf_type_int(t
);
2341 u8 nr_bits
= BTF_INT_BITS(int_data
);
2342 u8 total_bits_offset
;
2345 * bits_offset is at most 7.
2346 * BTF_INT_OFFSET() cannot exceed 128 bits.
2348 total_bits_offset
= bits_offset
+ BTF_INT_OFFSET(int_data
);
2349 data
+= BITS_ROUNDDOWN_BYTES(total_bits_offset
);
2350 bits_offset
= BITS_PER_BYTE_MASKED(total_bits_offset
);
2351 btf_bitfield_show(data
, bits_offset
, nr_bits
, show
);
2354 static void btf_int_show(const struct btf
*btf
, const struct btf_type
*t
,
2355 u32 type_id
, void *data
, u8 bits_offset
,
2356 struct btf_show
*show
)
2358 u32 int_data
= btf_type_int(t
);
2359 u8 encoding
= BTF_INT_ENCODING(int_data
);
2360 bool sign
= encoding
& BTF_INT_SIGNED
;
2361 u8 nr_bits
= BTF_INT_BITS(int_data
);
2364 safe_data
= btf_show_start_type(show
, t
, type_id
, data
);
2368 if (bits_offset
|| BTF_INT_OFFSET(int_data
) ||
2369 BITS_PER_BYTE_MASKED(nr_bits
)) {
2370 btf_int_bits_show(btf
, t
, safe_data
, bits_offset
, show
);
2376 btf_int128_print(show
, safe_data
);
2380 btf_show_type_value(show
, "%lld", *(s64
*)safe_data
);
2382 btf_show_type_value(show
, "%llu", *(u64
*)safe_data
);
2386 btf_show_type_value(show
, "%d", *(s32
*)safe_data
);
2388 btf_show_type_value(show
, "%u", *(u32
*)safe_data
);
2392 btf_show_type_value(show
, "%d", *(s16
*)safe_data
);
2394 btf_show_type_value(show
, "%u", *(u16
*)safe_data
);
2397 if (show
->state
.array_encoding
== BTF_INT_CHAR
) {
2398 /* check for null terminator */
2399 if (show
->state
.array_terminated
)
2401 if (*(char *)data
== '\0') {
2402 show
->state
.array_terminated
= 1;
2405 if (isprint(*(char *)data
)) {
2406 btf_show_type_value(show
, "'%c'",
2407 *(char *)safe_data
);
2412 btf_show_type_value(show
, "%d", *(s8
*)safe_data
);
2414 btf_show_type_value(show
, "%u", *(u8
*)safe_data
);
2417 btf_int_bits_show(btf
, t
, safe_data
, bits_offset
, show
);
2421 btf_show_end_type(show
);
2424 static const struct btf_kind_operations int_ops
= {
2425 .check_meta
= btf_int_check_meta
,
2426 .resolve
= btf_df_resolve
,
2427 .check_member
= btf_int_check_member
,
2428 .check_kflag_member
= btf_int_check_kflag_member
,
2429 .log_details
= btf_int_log
,
2430 .show
= btf_int_show
,
2433 static int btf_modifier_check_member(struct btf_verifier_env
*env
,
2434 const struct btf_type
*struct_type
,
2435 const struct btf_member
*member
,
2436 const struct btf_type
*member_type
)
2438 const struct btf_type
*resolved_type
;
2439 u32 resolved_type_id
= member
->type
;
2440 struct btf_member resolved_member
;
2441 struct btf
*btf
= env
->btf
;
2443 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
2444 if (!resolved_type
) {
2445 btf_verifier_log_member(env
, struct_type
, member
,
2450 resolved_member
= *member
;
2451 resolved_member
.type
= resolved_type_id
;
2453 return btf_type_ops(resolved_type
)->check_member(env
, struct_type
,
2458 static int btf_modifier_check_kflag_member(struct btf_verifier_env
*env
,
2459 const struct btf_type
*struct_type
,
2460 const struct btf_member
*member
,
2461 const struct btf_type
*member_type
)
2463 const struct btf_type
*resolved_type
;
2464 u32 resolved_type_id
= member
->type
;
2465 struct btf_member resolved_member
;
2466 struct btf
*btf
= env
->btf
;
2468 resolved_type
= btf_type_id_size(btf
, &resolved_type_id
, NULL
);
2469 if (!resolved_type
) {
2470 btf_verifier_log_member(env
, struct_type
, member
,
2475 resolved_member
= *member
;
2476 resolved_member
.type
= resolved_type_id
;
2478 return btf_type_ops(resolved_type
)->check_kflag_member(env
, struct_type
,
2483 static int btf_ptr_check_member(struct btf_verifier_env
*env
,
2484 const struct btf_type
*struct_type
,
2485 const struct btf_member
*member
,
2486 const struct btf_type
*member_type
)
2488 u32 struct_size
, struct_bits_off
, bytes_offset
;
2490 struct_size
= struct_type
->size
;
2491 struct_bits_off
= member
->offset
;
2492 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2494 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2495 btf_verifier_log_member(env
, struct_type
, member
,
2496 "Member is not byte aligned");
2500 if (struct_size
- bytes_offset
< sizeof(void *)) {
2501 btf_verifier_log_member(env
, struct_type
, member
,
2502 "Member exceeds struct_size");
2509 static int btf_ref_type_check_meta(struct btf_verifier_env
*env
,
2510 const struct btf_type
*t
,
2515 if (btf_type_vlen(t
)) {
2516 btf_verifier_log_type(env
, t
, "vlen != 0");
2520 if (btf_type_kflag(t
)) {
2521 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2525 if (!BTF_TYPE_ID_VALID(t
->type
)) {
2526 btf_verifier_log_type(env
, t
, "Invalid type_id");
2530 /* typedef/type_tag type must have a valid name, and other ref types,
2531 * volatile, const, restrict, should have a null name.
2533 if (BTF_INFO_KIND(t
->info
) == BTF_KIND_TYPEDEF
) {
2535 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2536 btf_verifier_log_type(env
, t
, "Invalid name");
2539 } else if (BTF_INFO_KIND(t
->info
) == BTF_KIND_TYPE_TAG
) {
2540 value
= btf_name_by_offset(env
->btf
, t
->name_off
);
2541 if (!value
|| !value
[0]) {
2542 btf_verifier_log_type(env
, t
, "Invalid name");
2547 btf_verifier_log_type(env
, t
, "Invalid name");
2552 btf_verifier_log_type(env
, t
, NULL
);
2557 static int btf_modifier_resolve(struct btf_verifier_env
*env
,
2558 const struct resolve_vertex
*v
)
2560 const struct btf_type
*t
= v
->t
;
2561 const struct btf_type
*next_type
;
2562 u32 next_type_id
= t
->type
;
2563 struct btf
*btf
= env
->btf
;
2565 next_type
= btf_type_by_id(btf
, next_type_id
);
2566 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
2567 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2571 if (!env_type_is_resolve_sink(env
, next_type
) &&
2572 !env_type_is_resolved(env
, next_type_id
))
2573 return env_stack_push(env
, next_type
, next_type_id
);
2575 /* Figure out the resolved next_type_id with size.
2576 * They will be stored in the current modifier's
2577 * resolved_ids and resolved_sizes such that it can
2578 * save us a few type-following when we use it later (e.g. in
2581 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
2582 if (env_type_is_resolved(env
, next_type_id
))
2583 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
2585 /* "typedef void new_void", "const void"...etc */
2586 if (!btf_type_is_void(next_type
) &&
2587 !btf_type_is_fwd(next_type
) &&
2588 !btf_type_is_func_proto(next_type
)) {
2589 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2594 env_stack_pop_resolved(env
, next_type_id
, 0);
2599 static int btf_var_resolve(struct btf_verifier_env
*env
,
2600 const struct resolve_vertex
*v
)
2602 const struct btf_type
*next_type
;
2603 const struct btf_type
*t
= v
->t
;
2604 u32 next_type_id
= t
->type
;
2605 struct btf
*btf
= env
->btf
;
2607 next_type
= btf_type_by_id(btf
, next_type_id
);
2608 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
2609 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2613 if (!env_type_is_resolve_sink(env
, next_type
) &&
2614 !env_type_is_resolved(env
, next_type_id
))
2615 return env_stack_push(env
, next_type
, next_type_id
);
2617 if (btf_type_is_modifier(next_type
)) {
2618 const struct btf_type
*resolved_type
;
2619 u32 resolved_type_id
;
2621 resolved_type_id
= next_type_id
;
2622 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
2624 if (btf_type_is_ptr(resolved_type
) &&
2625 !env_type_is_resolve_sink(env
, resolved_type
) &&
2626 !env_type_is_resolved(env
, resolved_type_id
))
2627 return env_stack_push(env
, resolved_type
,
2631 /* We must resolve to something concrete at this point, no
2632 * forward types or similar that would resolve to size of
2635 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
2636 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2640 env_stack_pop_resolved(env
, next_type_id
, 0);
2645 static int btf_ptr_resolve(struct btf_verifier_env
*env
,
2646 const struct resolve_vertex
*v
)
2648 const struct btf_type
*next_type
;
2649 const struct btf_type
*t
= v
->t
;
2650 u32 next_type_id
= t
->type
;
2651 struct btf
*btf
= env
->btf
;
2653 next_type
= btf_type_by_id(btf
, next_type_id
);
2654 if (!next_type
|| btf_type_is_resolve_source_only(next_type
)) {
2655 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2659 if (!env_type_is_resolve_sink(env
, next_type
) &&
2660 !env_type_is_resolved(env
, next_type_id
))
2661 return env_stack_push(env
, next_type
, next_type_id
);
2663 /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY,
2664 * the modifier may have stopped resolving when it was resolved
2665 * to a ptr (last-resolved-ptr).
2667 * We now need to continue from the last-resolved-ptr to
2668 * ensure the last-resolved-ptr will not referring back to
2669 * the current ptr (t).
2671 if (btf_type_is_modifier(next_type
)) {
2672 const struct btf_type
*resolved_type
;
2673 u32 resolved_type_id
;
2675 resolved_type_id
= next_type_id
;
2676 resolved_type
= btf_type_id_resolve(btf
, &resolved_type_id
);
2678 if (btf_type_is_ptr(resolved_type
) &&
2679 !env_type_is_resolve_sink(env
, resolved_type
) &&
2680 !env_type_is_resolved(env
, resolved_type_id
))
2681 return env_stack_push(env
, resolved_type
,
2685 if (!btf_type_id_size(btf
, &next_type_id
, NULL
)) {
2686 if (env_type_is_resolved(env
, next_type_id
))
2687 next_type
= btf_type_id_resolve(btf
, &next_type_id
);
2689 if (!btf_type_is_void(next_type
) &&
2690 !btf_type_is_fwd(next_type
) &&
2691 !btf_type_is_func_proto(next_type
)) {
2692 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
2697 env_stack_pop_resolved(env
, next_type_id
, 0);
2702 static void btf_modifier_show(const struct btf
*btf
,
2703 const struct btf_type
*t
,
2704 u32 type_id
, void *data
,
2705 u8 bits_offset
, struct btf_show
*show
)
2707 if (btf
->resolved_ids
)
2708 t
= btf_type_id_resolve(btf
, &type_id
);
2710 t
= btf_type_skip_modifiers(btf
, type_id
, NULL
);
2712 btf_type_ops(t
)->show(btf
, t
, type_id
, data
, bits_offset
, show
);
2715 static void btf_var_show(const struct btf
*btf
, const struct btf_type
*t
,
2716 u32 type_id
, void *data
, u8 bits_offset
,
2717 struct btf_show
*show
)
2719 t
= btf_type_id_resolve(btf
, &type_id
);
2721 btf_type_ops(t
)->show(btf
, t
, type_id
, data
, bits_offset
, show
);
2724 static void btf_ptr_show(const struct btf
*btf
, const struct btf_type
*t
,
2725 u32 type_id
, void *data
, u8 bits_offset
,
2726 struct btf_show
*show
)
2730 safe_data
= btf_show_start_type(show
, t
, type_id
, data
);
2734 /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
2735 if (show
->flags
& BTF_SHOW_PTR_RAW
)
2736 btf_show_type_value(show
, "0x%px", *(void **)safe_data
);
2738 btf_show_type_value(show
, "0x%p", *(void **)safe_data
);
2739 btf_show_end_type(show
);
2742 static void btf_ref_type_log(struct btf_verifier_env
*env
,
2743 const struct btf_type
*t
)
2745 btf_verifier_log(env
, "type_id=%u", t
->type
);
2748 static struct btf_kind_operations modifier_ops
= {
2749 .check_meta
= btf_ref_type_check_meta
,
2750 .resolve
= btf_modifier_resolve
,
2751 .check_member
= btf_modifier_check_member
,
2752 .check_kflag_member
= btf_modifier_check_kflag_member
,
2753 .log_details
= btf_ref_type_log
,
2754 .show
= btf_modifier_show
,
2757 static struct btf_kind_operations ptr_ops
= {
2758 .check_meta
= btf_ref_type_check_meta
,
2759 .resolve
= btf_ptr_resolve
,
2760 .check_member
= btf_ptr_check_member
,
2761 .check_kflag_member
= btf_generic_check_kflag_member
,
2762 .log_details
= btf_ref_type_log
,
2763 .show
= btf_ptr_show
,
2766 static s32
btf_fwd_check_meta(struct btf_verifier_env
*env
,
2767 const struct btf_type
*t
,
2770 if (btf_type_vlen(t
)) {
2771 btf_verifier_log_type(env
, t
, "vlen != 0");
2776 btf_verifier_log_type(env
, t
, "type != 0");
2780 /* fwd type must have a valid name */
2782 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
2783 btf_verifier_log_type(env
, t
, "Invalid name");
2787 btf_verifier_log_type(env
, t
, NULL
);
2792 static void btf_fwd_type_log(struct btf_verifier_env
*env
,
2793 const struct btf_type
*t
)
2795 btf_verifier_log(env
, "%s", btf_type_kflag(t
) ? "union" : "struct");
2798 static struct btf_kind_operations fwd_ops
= {
2799 .check_meta
= btf_fwd_check_meta
,
2800 .resolve
= btf_df_resolve
,
2801 .check_member
= btf_df_check_member
,
2802 .check_kflag_member
= btf_df_check_kflag_member
,
2803 .log_details
= btf_fwd_type_log
,
2804 .show
= btf_df_show
,
2807 static int btf_array_check_member(struct btf_verifier_env
*env
,
2808 const struct btf_type
*struct_type
,
2809 const struct btf_member
*member
,
2810 const struct btf_type
*member_type
)
2812 u32 struct_bits_off
= member
->offset
;
2813 u32 struct_size
, bytes_offset
;
2814 u32 array_type_id
, array_size
;
2815 struct btf
*btf
= env
->btf
;
2817 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
2818 btf_verifier_log_member(env
, struct_type
, member
,
2819 "Member is not byte aligned");
2823 array_type_id
= member
->type
;
2824 btf_type_id_size(btf
, &array_type_id
, &array_size
);
2825 struct_size
= struct_type
->size
;
2826 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
2827 if (struct_size
- bytes_offset
< array_size
) {
2828 btf_verifier_log_member(env
, struct_type
, member
,
2829 "Member exceeds struct_size");
2836 static s32
btf_array_check_meta(struct btf_verifier_env
*env
,
2837 const struct btf_type
*t
,
2840 const struct btf_array
*array
= btf_type_array(t
);
2841 u32 meta_needed
= sizeof(*array
);
2843 if (meta_left
< meta_needed
) {
2844 btf_verifier_log_basic(env
, t
,
2845 "meta_left:%u meta_needed:%u",
2846 meta_left
, meta_needed
);
2850 /* array type should not have a name */
2852 btf_verifier_log_type(env
, t
, "Invalid name");
2856 if (btf_type_vlen(t
)) {
2857 btf_verifier_log_type(env
, t
, "vlen != 0");
2861 if (btf_type_kflag(t
)) {
2862 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
2867 btf_verifier_log_type(env
, t
, "size != 0");
2871 /* Array elem type and index type cannot be in type void,
2872 * so !array->type and !array->index_type are not allowed.
2874 if (!array
->type
|| !BTF_TYPE_ID_VALID(array
->type
)) {
2875 btf_verifier_log_type(env
, t
, "Invalid elem");
2879 if (!array
->index_type
|| !BTF_TYPE_ID_VALID(array
->index_type
)) {
2880 btf_verifier_log_type(env
, t
, "Invalid index");
2884 btf_verifier_log_type(env
, t
, NULL
);
2889 static int btf_array_resolve(struct btf_verifier_env
*env
,
2890 const struct resolve_vertex
*v
)
2892 const struct btf_array
*array
= btf_type_array(v
->t
);
2893 const struct btf_type
*elem_type
, *index_type
;
2894 u32 elem_type_id
, index_type_id
;
2895 struct btf
*btf
= env
->btf
;
2898 /* Check array->index_type */
2899 index_type_id
= array
->index_type
;
2900 index_type
= btf_type_by_id(btf
, index_type_id
);
2901 if (btf_type_nosize_or_null(index_type
) ||
2902 btf_type_is_resolve_source_only(index_type
)) {
2903 btf_verifier_log_type(env
, v
->t
, "Invalid index");
2907 if (!env_type_is_resolve_sink(env
, index_type
) &&
2908 !env_type_is_resolved(env
, index_type_id
))
2909 return env_stack_push(env
, index_type
, index_type_id
);
2911 index_type
= btf_type_id_size(btf
, &index_type_id
, NULL
);
2912 if (!index_type
|| !btf_type_is_int(index_type
) ||
2913 !btf_type_int_is_regular(index_type
)) {
2914 btf_verifier_log_type(env
, v
->t
, "Invalid index");
2918 /* Check array->type */
2919 elem_type_id
= array
->type
;
2920 elem_type
= btf_type_by_id(btf
, elem_type_id
);
2921 if (btf_type_nosize_or_null(elem_type
) ||
2922 btf_type_is_resolve_source_only(elem_type
)) {
2923 btf_verifier_log_type(env
, v
->t
,
2928 if (!env_type_is_resolve_sink(env
, elem_type
) &&
2929 !env_type_is_resolved(env
, elem_type_id
))
2930 return env_stack_push(env
, elem_type
, elem_type_id
);
2932 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
2934 btf_verifier_log_type(env
, v
->t
, "Invalid elem");
2938 if (btf_type_is_int(elem_type
) && !btf_type_int_is_regular(elem_type
)) {
2939 btf_verifier_log_type(env
, v
->t
, "Invalid array of int");
2943 if (array
->nelems
&& elem_size
> U32_MAX
/ array
->nelems
) {
2944 btf_verifier_log_type(env
, v
->t
,
2945 "Array size overflows U32_MAX");
2949 env_stack_pop_resolved(env
, elem_type_id
, elem_size
* array
->nelems
);
2954 static void btf_array_log(struct btf_verifier_env
*env
,
2955 const struct btf_type
*t
)
2957 const struct btf_array
*array
= btf_type_array(t
);
2959 btf_verifier_log(env
, "type_id=%u index_type_id=%u nr_elems=%u",
2960 array
->type
, array
->index_type
, array
->nelems
);
2963 static void __btf_array_show(const struct btf
*btf
, const struct btf_type
*t
,
2964 u32 type_id
, void *data
, u8 bits_offset
,
2965 struct btf_show
*show
)
2967 const struct btf_array
*array
= btf_type_array(t
);
2968 const struct btf_kind_operations
*elem_ops
;
2969 const struct btf_type
*elem_type
;
2970 u32 i
, elem_size
= 0, elem_type_id
;
2973 elem_type_id
= array
->type
;
2974 elem_type
= btf_type_skip_modifiers(btf
, elem_type_id
, NULL
);
2975 if (elem_type
&& btf_type_has_size(elem_type
))
2976 elem_size
= elem_type
->size
;
2978 if (elem_type
&& btf_type_is_int(elem_type
)) {
2979 u32 int_type
= btf_type_int(elem_type
);
2981 encoding
= BTF_INT_ENCODING(int_type
);
2984 * BTF_INT_CHAR encoding never seems to be set for
2985 * char arrays, so if size is 1 and element is
2986 * printable as a char, we'll do that.
2989 encoding
= BTF_INT_CHAR
;
2992 if (!btf_show_start_array_type(show
, t
, type_id
, encoding
, data
))
2997 elem_ops
= btf_type_ops(elem_type
);
2999 for (i
= 0; i
< array
->nelems
; i
++) {
3001 btf_show_start_array_member(show
);
3003 elem_ops
->show(btf
, elem_type
, elem_type_id
, data
,
3007 btf_show_end_array_member(show
);
3009 if (show
->state
.array_terminated
)
3013 btf_show_end_array_type(show
);
3016 static void btf_array_show(const struct btf
*btf
, const struct btf_type
*t
,
3017 u32 type_id
, void *data
, u8 bits_offset
,
3018 struct btf_show
*show
)
3020 const struct btf_member
*m
= show
->state
.member
;
3023 * First check if any members would be shown (are non-zero).
3024 * See comments above "struct btf_show" definition for more
3025 * details on how this works at a high-level.
3027 if (show
->state
.depth
> 0 && !(show
->flags
& BTF_SHOW_ZERO
)) {
3028 if (!show
->state
.depth_check
) {
3029 show
->state
.depth_check
= show
->state
.depth
+ 1;
3030 show
->state
.depth_to_show
= 0;
3032 __btf_array_show(btf
, t
, type_id
, data
, bits_offset
, show
);
3033 show
->state
.member
= m
;
3035 if (show
->state
.depth_check
!= show
->state
.depth
+ 1)
3037 show
->state
.depth_check
= 0;
3039 if (show
->state
.depth_to_show
<= show
->state
.depth
)
3042 * Reaching here indicates we have recursed and found
3043 * non-zero array member(s).
3046 __btf_array_show(btf
, t
, type_id
, data
, bits_offset
, show
);
3049 static struct btf_kind_operations array_ops
= {
3050 .check_meta
= btf_array_check_meta
,
3051 .resolve
= btf_array_resolve
,
3052 .check_member
= btf_array_check_member
,
3053 .check_kflag_member
= btf_generic_check_kflag_member
,
3054 .log_details
= btf_array_log
,
3055 .show
= btf_array_show
,
3058 static int btf_struct_check_member(struct btf_verifier_env
*env
,
3059 const struct btf_type
*struct_type
,
3060 const struct btf_member
*member
,
3061 const struct btf_type
*member_type
)
3063 u32 struct_bits_off
= member
->offset
;
3064 u32 struct_size
, bytes_offset
;
3066 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
3067 btf_verifier_log_member(env
, struct_type
, member
,
3068 "Member is not byte aligned");
3072 struct_size
= struct_type
->size
;
3073 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
3074 if (struct_size
- bytes_offset
< member_type
->size
) {
3075 btf_verifier_log_member(env
, struct_type
, member
,
3076 "Member exceeds struct_size");
3083 static s32
btf_struct_check_meta(struct btf_verifier_env
*env
,
3084 const struct btf_type
*t
,
3087 bool is_union
= BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
;
3088 const struct btf_member
*member
;
3089 u32 meta_needed
, last_offset
;
3090 struct btf
*btf
= env
->btf
;
3091 u32 struct_size
= t
->size
;
3095 meta_needed
= btf_type_vlen(t
) * sizeof(*member
);
3096 if (meta_left
< meta_needed
) {
3097 btf_verifier_log_basic(env
, t
,
3098 "meta_left:%u meta_needed:%u",
3099 meta_left
, meta_needed
);
3103 /* struct type either no name or a valid one */
3105 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
3106 btf_verifier_log_type(env
, t
, "Invalid name");
3110 btf_verifier_log_type(env
, t
, NULL
);
3113 for_each_member(i
, t
, member
) {
3114 if (!btf_name_offset_valid(btf
, member
->name_off
)) {
3115 btf_verifier_log_member(env
, t
, member
,
3116 "Invalid member name_offset:%u",
3121 /* struct member either no name or a valid one */
3122 if (member
->name_off
&&
3123 !btf_name_valid_identifier(btf
, member
->name_off
)) {
3124 btf_verifier_log_member(env
, t
, member
, "Invalid name");
3127 /* A member cannot be in type void */
3128 if (!member
->type
|| !BTF_TYPE_ID_VALID(member
->type
)) {
3129 btf_verifier_log_member(env
, t
, member
,
3134 offset
= __btf_member_bit_offset(t
, member
);
3135 if (is_union
&& offset
) {
3136 btf_verifier_log_member(env
, t
, member
,
3137 "Invalid member bits_offset");
3142 * ">" instead of ">=" because the last member could be
3145 if (last_offset
> offset
) {
3146 btf_verifier_log_member(env
, t
, member
,
3147 "Invalid member bits_offset");
3151 if (BITS_ROUNDUP_BYTES(offset
) > struct_size
) {
3152 btf_verifier_log_member(env
, t
, member
,
3153 "Member bits_offset exceeds its struct size");
3157 btf_verifier_log_member(env
, t
, member
, NULL
);
3158 last_offset
= offset
;
3164 static int btf_struct_resolve(struct btf_verifier_env
*env
,
3165 const struct resolve_vertex
*v
)
3167 const struct btf_member
*member
;
3171 /* Before continue resolving the next_member,
3172 * ensure the last member is indeed resolved to a
3173 * type with size info.
3175 if (v
->next_member
) {
3176 const struct btf_type
*last_member_type
;
3177 const struct btf_member
*last_member
;
3178 u32 last_member_type_id
;
3180 last_member
= btf_type_member(v
->t
) + v
->next_member
- 1;
3181 last_member_type_id
= last_member
->type
;
3182 if (WARN_ON_ONCE(!env_type_is_resolved(env
,
3183 last_member_type_id
)))
3186 last_member_type
= btf_type_by_id(env
->btf
,
3187 last_member_type_id
);
3188 if (btf_type_kflag(v
->t
))
3189 err
= btf_type_ops(last_member_type
)->check_kflag_member(env
, v
->t
,
3193 err
= btf_type_ops(last_member_type
)->check_member(env
, v
->t
,
3200 for_each_member_from(i
, v
->next_member
, v
->t
, member
) {
3201 u32 member_type_id
= member
->type
;
3202 const struct btf_type
*member_type
= btf_type_by_id(env
->btf
,
3205 if (btf_type_nosize_or_null(member_type
) ||
3206 btf_type_is_resolve_source_only(member_type
)) {
3207 btf_verifier_log_member(env
, v
->t
, member
,
3212 if (!env_type_is_resolve_sink(env
, member_type
) &&
3213 !env_type_is_resolved(env
, member_type_id
)) {
3214 env_stack_set_next_member(env
, i
+ 1);
3215 return env_stack_push(env
, member_type
, member_type_id
);
3218 if (btf_type_kflag(v
->t
))
3219 err
= btf_type_ops(member_type
)->check_kflag_member(env
, v
->t
,
3223 err
= btf_type_ops(member_type
)->check_member(env
, v
->t
,
3230 env_stack_pop_resolved(env
, 0, 0);
3235 static void btf_struct_log(struct btf_verifier_env
*env
,
3236 const struct btf_type
*t
)
3238 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
3242 BTF_FIELD_IGNORE
= 0,
3243 BTF_FIELD_FOUND
= 1,
3246 struct btf_field_info
{
3247 enum btf_field_type type
;
3254 const char *node_name
;
3260 static int btf_find_struct(const struct btf
*btf
, const struct btf_type
*t
,
3261 u32 off
, int sz
, enum btf_field_type field_type
,
3262 struct btf_field_info
*info
)
3264 if (!__btf_type_is_struct(t
))
3265 return BTF_FIELD_IGNORE
;
3267 return BTF_FIELD_IGNORE
;
3268 info
->type
= field_type
;
3270 return BTF_FIELD_FOUND
;
3273 static int btf_find_kptr(const struct btf
*btf
, const struct btf_type
*t
,
3274 u32 off
, int sz
, struct btf_field_info
*info
)
3276 enum btf_field_type type
;
3279 /* Permit modifiers on the pointer itself */
3280 if (btf_type_is_volatile(t
))
3281 t
= btf_type_by_id(btf
, t
->type
);
3282 /* For PTR, sz is always == 8 */
3283 if (!btf_type_is_ptr(t
))
3284 return BTF_FIELD_IGNORE
;
3285 t
= btf_type_by_id(btf
, t
->type
);
3287 if (!btf_type_is_type_tag(t
))
3288 return BTF_FIELD_IGNORE
;
3289 /* Reject extra tags */
3290 if (btf_type_is_type_tag(btf_type_by_id(btf
, t
->type
)))
3292 if (!strcmp("kptr_untrusted", __btf_name_by_offset(btf
, t
->name_off
)))
3293 type
= BPF_KPTR_UNREF
;
3294 else if (!strcmp("kptr", __btf_name_by_offset(btf
, t
->name_off
)))
3295 type
= BPF_KPTR_REF
;
3299 /* Get the base type */
3300 t
= btf_type_skip_modifiers(btf
, t
->type
, &res_id
);
3301 /* Only pointer to struct is allowed */
3302 if (!__btf_type_is_struct(t
))
3307 info
->kptr
.type_id
= res_id
;
3308 return BTF_FIELD_FOUND
;
3311 static const char *btf_find_decl_tag_value(const struct btf
*btf
,
3312 const struct btf_type
*pt
,
3313 int comp_idx
, const char *tag_key
)
3317 for (i
= 1; i
< btf_nr_types(btf
); i
++) {
3318 const struct btf_type
*t
= btf_type_by_id(btf
, i
);
3319 int len
= strlen(tag_key
);
3321 if (!btf_type_is_decl_tag(t
))
3323 if (pt
!= btf_type_by_id(btf
, t
->type
) ||
3324 btf_type_decl_tag(t
)->component_idx
!= comp_idx
)
3326 if (strncmp(__btf_name_by_offset(btf
, t
->name_off
), tag_key
, len
))
3328 return __btf_name_by_offset(btf
, t
->name_off
) + len
;
3334 btf_find_graph_root(const struct btf
*btf
, const struct btf_type
*pt
,
3335 const struct btf_type
*t
, int comp_idx
, u32 off
,
3336 int sz
, struct btf_field_info
*info
,
3337 enum btf_field_type head_type
)
3339 const char *node_field_name
;
3340 const char *value_type
;
3343 if (!__btf_type_is_struct(t
))
3344 return BTF_FIELD_IGNORE
;
3346 return BTF_FIELD_IGNORE
;
3347 value_type
= btf_find_decl_tag_value(btf
, pt
, comp_idx
, "contains:");
3350 node_field_name
= strstr(value_type
, ":");
3351 if (!node_field_name
)
3353 value_type
= kstrndup(value_type
, node_field_name
- value_type
, GFP_KERNEL
| __GFP_NOWARN
);
3356 id
= btf_find_by_name_kind(btf
, value_type
, BTF_KIND_STRUCT
);
3361 if (str_is_empty(node_field_name
))
3363 info
->type
= head_type
;
3365 info
->graph_root
.value_btf_id
= id
;
3366 info
->graph_root
.node_name
= node_field_name
;
3367 return BTF_FIELD_FOUND
;
3370 #define field_mask_test_name(field_type, field_type_str) \
3371 if (field_mask & field_type && !strcmp(name, field_type_str)) { \
3372 type = field_type; \
3376 static int btf_get_field_type(const char *name
, u32 field_mask
, u32
*seen_mask
,
3377 int *align
, int *sz
)
3381 if (field_mask
& BPF_SPIN_LOCK
) {
3382 if (!strcmp(name
, "bpf_spin_lock")) {
3383 if (*seen_mask
& BPF_SPIN_LOCK
)
3385 *seen_mask
|= BPF_SPIN_LOCK
;
3386 type
= BPF_SPIN_LOCK
;
3390 if (field_mask
& BPF_TIMER
) {
3391 if (!strcmp(name
, "bpf_timer")) {
3392 if (*seen_mask
& BPF_TIMER
)
3394 *seen_mask
|= BPF_TIMER
;
3399 field_mask_test_name(BPF_LIST_HEAD
, "bpf_list_head");
3400 field_mask_test_name(BPF_LIST_NODE
, "bpf_list_node");
3401 field_mask_test_name(BPF_RB_ROOT
, "bpf_rb_root");
3402 field_mask_test_name(BPF_RB_NODE
, "bpf_rb_node");
3403 field_mask_test_name(BPF_REFCOUNT
, "bpf_refcount");
3405 /* Only return BPF_KPTR when all other types with matchable names fail */
3406 if (field_mask
& BPF_KPTR
) {
3407 type
= BPF_KPTR_REF
;
3412 *sz
= btf_field_type_size(type
);
3413 *align
= btf_field_type_align(type
);
3417 #undef field_mask_test_name
3419 static int btf_find_struct_field(const struct btf
*btf
,
3420 const struct btf_type
*t
, u32 field_mask
,
3421 struct btf_field_info
*info
, int info_cnt
)
3423 int ret
, idx
= 0, align
, sz
, field_type
;
3424 const struct btf_member
*member
;
3425 struct btf_field_info tmp
;
3426 u32 i
, off
, seen_mask
= 0;
3428 for_each_member(i
, t
, member
) {
3429 const struct btf_type
*member_type
= btf_type_by_id(btf
,
3432 field_type
= btf_get_field_type(__btf_name_by_offset(btf
, member_type
->name_off
),
3433 field_mask
, &seen_mask
, &align
, &sz
);
3434 if (field_type
== 0)
3439 off
= __btf_member_bit_offset(t
, member
);
3441 /* valid C code cannot generate such BTF */
3447 switch (field_type
) {
3453 ret
= btf_find_struct(btf
, member_type
, off
, sz
, field_type
,
3454 idx
< info_cnt
? &info
[idx
] : &tmp
);
3458 case BPF_KPTR_UNREF
:
3460 ret
= btf_find_kptr(btf
, member_type
, off
, sz
,
3461 idx
< info_cnt
? &info
[idx
] : &tmp
);
3467 ret
= btf_find_graph_root(btf
, t
, member_type
,
3469 idx
< info_cnt
? &info
[idx
] : &tmp
,
3478 if (ret
== BTF_FIELD_IGNORE
)
3480 if (idx
>= info_cnt
)
3487 static int btf_find_datasec_var(const struct btf
*btf
, const struct btf_type
*t
,
3488 u32 field_mask
, struct btf_field_info
*info
,
3491 int ret
, idx
= 0, align
, sz
, field_type
;
3492 const struct btf_var_secinfo
*vsi
;
3493 struct btf_field_info tmp
;
3494 u32 i
, off
, seen_mask
= 0;
3496 for_each_vsi(i
, t
, vsi
) {
3497 const struct btf_type
*var
= btf_type_by_id(btf
, vsi
->type
);
3498 const struct btf_type
*var_type
= btf_type_by_id(btf
, var
->type
);
3500 field_type
= btf_get_field_type(__btf_name_by_offset(btf
, var_type
->name_off
),
3501 field_mask
, &seen_mask
, &align
, &sz
);
3502 if (field_type
== 0)
3508 if (vsi
->size
!= sz
)
3513 switch (field_type
) {
3519 ret
= btf_find_struct(btf
, var_type
, off
, sz
, field_type
,
3520 idx
< info_cnt
? &info
[idx
] : &tmp
);
3524 case BPF_KPTR_UNREF
:
3526 ret
= btf_find_kptr(btf
, var_type
, off
, sz
,
3527 idx
< info_cnt
? &info
[idx
] : &tmp
);
3533 ret
= btf_find_graph_root(btf
, var
, var_type
,
3535 idx
< info_cnt
? &info
[idx
] : &tmp
,
3544 if (ret
== BTF_FIELD_IGNORE
)
3546 if (idx
>= info_cnt
)
3553 static int btf_find_field(const struct btf
*btf
, const struct btf_type
*t
,
3554 u32 field_mask
, struct btf_field_info
*info
,
3557 if (__btf_type_is_struct(t
))
3558 return btf_find_struct_field(btf
, t
, field_mask
, info
, info_cnt
);
3559 else if (btf_type_is_datasec(t
))
3560 return btf_find_datasec_var(btf
, t
, field_mask
, info
, info_cnt
);
3564 static int btf_parse_kptr(const struct btf
*btf
, struct btf_field
*field
,
3565 struct btf_field_info
*info
)
3567 struct module
*mod
= NULL
;
3568 const struct btf_type
*t
;
3569 /* If a matching btf type is found in kernel or module BTFs, kptr_ref
3570 * is that BTF, otherwise it's program BTF
3572 struct btf
*kptr_btf
;
3576 /* Find type in map BTF, and use it to look up the matching type
3577 * in vmlinux or module BTFs, by name and kind.
3579 t
= btf_type_by_id(btf
, info
->kptr
.type_id
);
3580 id
= bpf_find_btf_id(__btf_name_by_offset(btf
, t
->name_off
), BTF_INFO_KIND(t
->info
),
3582 if (id
== -ENOENT
) {
3583 /* btf_parse_kptr should only be called w/ btf = program BTF */
3584 WARN_ON_ONCE(btf_is_kernel(btf
));
3586 /* Type exists only in program BTF. Assume that it's a MEM_ALLOC
3587 * kptr allocated via bpf_obj_new
3589 field
->kptr
.dtor
= NULL
;
3590 id
= info
->kptr
.type_id
;
3591 kptr_btf
= (struct btf
*)btf
;
3598 /* Find and stash the function pointer for the destruction function that
3599 * needs to be eventually invoked from the map free path.
3601 if (info
->type
== BPF_KPTR_REF
) {
3602 const struct btf_type
*dtor_func
;
3603 const char *dtor_func_name
;
3607 /* This call also serves as a whitelist of allowed objects that
3608 * can be used as a referenced pointer and be stored in a map at
3611 dtor_btf_id
= btf_find_dtor_kfunc(kptr_btf
, id
);
3612 if (dtor_btf_id
< 0) {
3617 dtor_func
= btf_type_by_id(kptr_btf
, dtor_btf_id
);
3623 if (btf_is_module(kptr_btf
)) {
3624 mod
= btf_try_get_module(kptr_btf
);
3631 /* We already verified dtor_func to be btf_type_is_func
3632 * in register_btf_id_dtor_kfuncs.
3634 dtor_func_name
= __btf_name_by_offset(kptr_btf
, dtor_func
->name_off
);
3635 addr
= kallsyms_lookup_name(dtor_func_name
);
3640 field
->kptr
.dtor
= (void *)addr
;
3644 field
->kptr
.btf_id
= id
;
3645 field
->kptr
.btf
= kptr_btf
;
3646 field
->kptr
.module
= mod
;
3655 static int btf_parse_graph_root(const struct btf
*btf
,
3656 struct btf_field
*field
,
3657 struct btf_field_info
*info
,
3658 const char *node_type_name
,
3659 size_t node_type_align
)
3661 const struct btf_type
*t
, *n
= NULL
;
3662 const struct btf_member
*member
;
3666 t
= btf_type_by_id(btf
, info
->graph_root
.value_btf_id
);
3667 /* We've already checked that value_btf_id is a struct type. We
3668 * just need to figure out the offset of the list_node, and
3671 for_each_member(i
, t
, member
) {
3672 if (strcmp(info
->graph_root
.node_name
,
3673 __btf_name_by_offset(btf
, member
->name_off
)))
3675 /* Invalid BTF, two members with same name */
3678 n
= btf_type_by_id(btf
, member
->type
);
3679 if (!__btf_type_is_struct(n
))
3681 if (strcmp(node_type_name
, __btf_name_by_offset(btf
, n
->name_off
)))
3683 offset
= __btf_member_bit_offset(n
, member
);
3687 if (offset
% node_type_align
)
3690 field
->graph_root
.btf
= (struct btf
*)btf
;
3691 field
->graph_root
.value_btf_id
= info
->graph_root
.value_btf_id
;
3692 field
->graph_root
.node_offset
= offset
;
3699 static int btf_parse_list_head(const struct btf
*btf
, struct btf_field
*field
,
3700 struct btf_field_info
*info
)
3702 return btf_parse_graph_root(btf
, field
, info
, "bpf_list_node",
3703 __alignof__(struct bpf_list_node
));
3706 static int btf_parse_rb_root(const struct btf
*btf
, struct btf_field
*field
,
3707 struct btf_field_info
*info
)
3709 return btf_parse_graph_root(btf
, field
, info
, "bpf_rb_node",
3710 __alignof__(struct bpf_rb_node
));
3713 static int btf_field_cmp(const void *_a
, const void *_b
, const void *priv
)
3715 const struct btf_field
*a
= (const struct btf_field
*)_a
;
3716 const struct btf_field
*b
= (const struct btf_field
*)_b
;
3718 if (a
->offset
< b
->offset
)
3720 else if (a
->offset
> b
->offset
)
3725 struct btf_record
*btf_parse_fields(const struct btf
*btf
, const struct btf_type
*t
,
3726 u32 field_mask
, u32 value_size
)
3728 struct btf_field_info info_arr
[BTF_FIELDS_MAX
];
3729 u32 next_off
= 0, field_type_size
;
3730 struct btf_record
*rec
;
3733 ret
= btf_find_field(btf
, t
, field_mask
, info_arr
, ARRAY_SIZE(info_arr
));
3735 return ERR_PTR(ret
);
3740 /* This needs to be kzalloc to zero out padding and unused fields, see
3741 * comment in btf_record_equal.
3743 rec
= kzalloc(offsetof(struct btf_record
, fields
[cnt
]), GFP_KERNEL
| __GFP_NOWARN
);
3745 return ERR_PTR(-ENOMEM
);
3747 rec
->spin_lock_off
= -EINVAL
;
3748 rec
->timer_off
= -EINVAL
;
3749 rec
->refcount_off
= -EINVAL
;
3750 for (i
= 0; i
< cnt
; i
++) {
3751 field_type_size
= btf_field_type_size(info_arr
[i
].type
);
3752 if (info_arr
[i
].off
+ field_type_size
> value_size
) {
3753 WARN_ONCE(1, "verifier bug off %d size %d", info_arr
[i
].off
, value_size
);
3757 if (info_arr
[i
].off
< next_off
) {
3761 next_off
= info_arr
[i
].off
+ field_type_size
;
3763 rec
->field_mask
|= info_arr
[i
].type
;
3764 rec
->fields
[i
].offset
= info_arr
[i
].off
;
3765 rec
->fields
[i
].type
= info_arr
[i
].type
;
3766 rec
->fields
[i
].size
= field_type_size
;
3768 switch (info_arr
[i
].type
) {
3770 WARN_ON_ONCE(rec
->spin_lock_off
>= 0);
3771 /* Cache offset for faster lookup at runtime */
3772 rec
->spin_lock_off
= rec
->fields
[i
].offset
;
3775 WARN_ON_ONCE(rec
->timer_off
>= 0);
3776 /* Cache offset for faster lookup at runtime */
3777 rec
->timer_off
= rec
->fields
[i
].offset
;
3780 WARN_ON_ONCE(rec
->refcount_off
>= 0);
3781 /* Cache offset for faster lookup at runtime */
3782 rec
->refcount_off
= rec
->fields
[i
].offset
;
3784 case BPF_KPTR_UNREF
:
3786 ret
= btf_parse_kptr(btf
, &rec
->fields
[i
], &info_arr
[i
]);
3791 ret
= btf_parse_list_head(btf
, &rec
->fields
[i
], &info_arr
[i
]);
3796 ret
= btf_parse_rb_root(btf
, &rec
->fields
[i
], &info_arr
[i
]);
3810 /* bpf_{list_head, rb_node} require bpf_spin_lock */
3811 if ((btf_record_has_field(rec
, BPF_LIST_HEAD
) ||
3812 btf_record_has_field(rec
, BPF_RB_ROOT
)) && rec
->spin_lock_off
< 0) {
3817 if (rec
->refcount_off
< 0 &&
3818 btf_record_has_field(rec
, BPF_LIST_NODE
) &&
3819 btf_record_has_field(rec
, BPF_RB_NODE
)) {
3824 sort_r(rec
->fields
, rec
->cnt
, sizeof(struct btf_field
), btf_field_cmp
,
3829 btf_record_free(rec
);
3830 return ERR_PTR(ret
);
3833 #define GRAPH_ROOT_MASK (BPF_LIST_HEAD | BPF_RB_ROOT)
3834 #define GRAPH_NODE_MASK (BPF_LIST_NODE | BPF_RB_NODE)
3836 int btf_check_and_fixup_fields(const struct btf
*btf
, struct btf_record
*rec
)
3840 /* There are three types that signify ownership of some other type:
3841 * kptr_ref, bpf_list_head, bpf_rb_root.
3842 * kptr_ref only supports storing kernel types, which can't store
3843 * references to program allocated local types.
3845 * Hence we only need to ensure that bpf_{list_head,rb_root} ownership
3846 * does not form cycles.
3848 if (IS_ERR_OR_NULL(rec
) || !(rec
->field_mask
& GRAPH_ROOT_MASK
))
3850 for (i
= 0; i
< rec
->cnt
; i
++) {
3851 struct btf_struct_meta
*meta
;
3854 if (!(rec
->fields
[i
].type
& GRAPH_ROOT_MASK
))
3856 btf_id
= rec
->fields
[i
].graph_root
.value_btf_id
;
3857 meta
= btf_find_struct_meta(btf
, btf_id
);
3860 rec
->fields
[i
].graph_root
.value_rec
= meta
->record
;
3862 /* We need to set value_rec for all root types, but no need
3863 * to check ownership cycle for a type unless it's also a
3866 if (!(rec
->field_mask
& GRAPH_NODE_MASK
))
3869 /* We need to ensure ownership acyclicity among all types. The
3870 * proper way to do it would be to topologically sort all BTF
3871 * IDs based on the ownership edges, since there can be multiple
3872 * bpf_{list_head,rb_node} in a type. Instead, we use the
3873 * following resaoning:
3875 * - A type can only be owned by another type in user BTF if it
3876 * has a bpf_{list,rb}_node. Let's call these node types.
3877 * - A type can only _own_ another type in user BTF if it has a
3878 * bpf_{list_head,rb_root}. Let's call these root types.
3880 * We ensure that if a type is both a root and node, its
3881 * element types cannot be root types.
3883 * To ensure acyclicity:
3885 * When A is an root type but not a node, its ownership
3889 * - A is an root, e.g. has bpf_rb_root.
3890 * - B is both a root and node, e.g. has bpf_rb_node and
3892 * - C is only an root, e.g. has bpf_list_node
3894 * When A is both a root and node, some other type already
3895 * owns it in the BTF domain, hence it can not own
3896 * another root type through any of the ownership edges.
3899 * - A is both an root and node.
3900 * - B is only an node.
3902 if (meta
->record
->field_mask
& GRAPH_ROOT_MASK
)
3908 static void __btf_struct_show(const struct btf
*btf
, const struct btf_type
*t
,
3909 u32 type_id
, void *data
, u8 bits_offset
,
3910 struct btf_show
*show
)
3912 const struct btf_member
*member
;
3916 safe_data
= btf_show_start_struct_type(show
, t
, type_id
, data
);
3920 for_each_member(i
, t
, member
) {
3921 const struct btf_type
*member_type
= btf_type_by_id(btf
,
3923 const struct btf_kind_operations
*ops
;
3924 u32 member_offset
, bitfield_size
;
3928 btf_show_start_member(show
, member
);
3930 member_offset
= __btf_member_bit_offset(t
, member
);
3931 bitfield_size
= __btf_member_bitfield_size(t
, member
);
3932 bytes_offset
= BITS_ROUNDDOWN_BYTES(member_offset
);
3933 bits8_offset
= BITS_PER_BYTE_MASKED(member_offset
);
3934 if (bitfield_size
) {
3935 safe_data
= btf_show_start_type(show
, member_type
,
3937 data
+ bytes_offset
);
3939 btf_bitfield_show(safe_data
,
3941 bitfield_size
, show
);
3942 btf_show_end_type(show
);
3944 ops
= btf_type_ops(member_type
);
3945 ops
->show(btf
, member_type
, member
->type
,
3946 data
+ bytes_offset
, bits8_offset
, show
);
3949 btf_show_end_member(show
);
3952 btf_show_end_struct_type(show
);
3955 static void btf_struct_show(const struct btf
*btf
, const struct btf_type
*t
,
3956 u32 type_id
, void *data
, u8 bits_offset
,
3957 struct btf_show
*show
)
3959 const struct btf_member
*m
= show
->state
.member
;
3962 * First check if any members would be shown (are non-zero).
3963 * See comments above "struct btf_show" definition for more
3964 * details on how this works at a high-level.
3966 if (show
->state
.depth
> 0 && !(show
->flags
& BTF_SHOW_ZERO
)) {
3967 if (!show
->state
.depth_check
) {
3968 show
->state
.depth_check
= show
->state
.depth
+ 1;
3969 show
->state
.depth_to_show
= 0;
3971 __btf_struct_show(btf
, t
, type_id
, data
, bits_offset
, show
);
3972 /* Restore saved member data here */
3973 show
->state
.member
= m
;
3974 if (show
->state
.depth_check
!= show
->state
.depth
+ 1)
3976 show
->state
.depth_check
= 0;
3978 if (show
->state
.depth_to_show
<= show
->state
.depth
)
3981 * Reaching here indicates we have recursed and found
3982 * non-zero child values.
3986 __btf_struct_show(btf
, t
, type_id
, data
, bits_offset
, show
);
3989 static struct btf_kind_operations struct_ops
= {
3990 .check_meta
= btf_struct_check_meta
,
3991 .resolve
= btf_struct_resolve
,
3992 .check_member
= btf_struct_check_member
,
3993 .check_kflag_member
= btf_generic_check_kflag_member
,
3994 .log_details
= btf_struct_log
,
3995 .show
= btf_struct_show
,
3998 static int btf_enum_check_member(struct btf_verifier_env
*env
,
3999 const struct btf_type
*struct_type
,
4000 const struct btf_member
*member
,
4001 const struct btf_type
*member_type
)
4003 u32 struct_bits_off
= member
->offset
;
4004 u32 struct_size
, bytes_offset
;
4006 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
4007 btf_verifier_log_member(env
, struct_type
, member
,
4008 "Member is not byte aligned");
4012 struct_size
= struct_type
->size
;
4013 bytes_offset
= BITS_ROUNDDOWN_BYTES(struct_bits_off
);
4014 if (struct_size
- bytes_offset
< member_type
->size
) {
4015 btf_verifier_log_member(env
, struct_type
, member
,
4016 "Member exceeds struct_size");
4023 static int btf_enum_check_kflag_member(struct btf_verifier_env
*env
,
4024 const struct btf_type
*struct_type
,
4025 const struct btf_member
*member
,
4026 const struct btf_type
*member_type
)
4028 u32 struct_bits_off
, nr_bits
, bytes_end
, struct_size
;
4029 u32 int_bitsize
= sizeof(int) * BITS_PER_BYTE
;
4031 struct_bits_off
= BTF_MEMBER_BIT_OFFSET(member
->offset
);
4032 nr_bits
= BTF_MEMBER_BITFIELD_SIZE(member
->offset
);
4034 if (BITS_PER_BYTE_MASKED(struct_bits_off
)) {
4035 btf_verifier_log_member(env
, struct_type
, member
,
4036 "Member is not byte aligned");
4040 nr_bits
= int_bitsize
;
4041 } else if (nr_bits
> int_bitsize
) {
4042 btf_verifier_log_member(env
, struct_type
, member
,
4043 "Invalid member bitfield_size");
4047 struct_size
= struct_type
->size
;
4048 bytes_end
= BITS_ROUNDUP_BYTES(struct_bits_off
+ nr_bits
);
4049 if (struct_size
< bytes_end
) {
4050 btf_verifier_log_member(env
, struct_type
, member
,
4051 "Member exceeds struct_size");
4058 static s32
btf_enum_check_meta(struct btf_verifier_env
*env
,
4059 const struct btf_type
*t
,
4062 const struct btf_enum
*enums
= btf_type_enum(t
);
4063 struct btf
*btf
= env
->btf
;
4064 const char *fmt_str
;
4068 nr_enums
= btf_type_vlen(t
);
4069 meta_needed
= nr_enums
* sizeof(*enums
);
4071 if (meta_left
< meta_needed
) {
4072 btf_verifier_log_basic(env
, t
,
4073 "meta_left:%u meta_needed:%u",
4074 meta_left
, meta_needed
);
4078 if (t
->size
> 8 || !is_power_of_2(t
->size
)) {
4079 btf_verifier_log_type(env
, t
, "Unexpected size");
4083 /* enum type either no name or a valid one */
4085 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
4086 btf_verifier_log_type(env
, t
, "Invalid name");
4090 btf_verifier_log_type(env
, t
, NULL
);
4092 for (i
= 0; i
< nr_enums
; i
++) {
4093 if (!btf_name_offset_valid(btf
, enums
[i
].name_off
)) {
4094 btf_verifier_log(env
, "\tInvalid name_offset:%u",
4099 /* enum member must have a valid name */
4100 if (!enums
[i
].name_off
||
4101 !btf_name_valid_identifier(btf
, enums
[i
].name_off
)) {
4102 btf_verifier_log_type(env
, t
, "Invalid name");
4106 if (env
->log
.level
== BPF_LOG_KERNEL
)
4108 fmt_str
= btf_type_kflag(t
) ? "\t%s val=%d\n" : "\t%s val=%u\n";
4109 btf_verifier_log(env
, fmt_str
,
4110 __btf_name_by_offset(btf
, enums
[i
].name_off
),
4117 static void btf_enum_log(struct btf_verifier_env
*env
,
4118 const struct btf_type
*t
)
4120 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
4123 static void btf_enum_show(const struct btf
*btf
, const struct btf_type
*t
,
4124 u32 type_id
, void *data
, u8 bits_offset
,
4125 struct btf_show
*show
)
4127 const struct btf_enum
*enums
= btf_type_enum(t
);
4128 u32 i
, nr_enums
= btf_type_vlen(t
);
4132 safe_data
= btf_show_start_type(show
, t
, type_id
, data
);
4136 v
= *(int *)safe_data
;
4138 for (i
= 0; i
< nr_enums
; i
++) {
4139 if (v
!= enums
[i
].val
)
4142 btf_show_type_value(show
, "%s",
4143 __btf_name_by_offset(btf
,
4144 enums
[i
].name_off
));
4146 btf_show_end_type(show
);
4150 if (btf_type_kflag(t
))
4151 btf_show_type_value(show
, "%d", v
);
4153 btf_show_type_value(show
, "%u", v
);
4154 btf_show_end_type(show
);
4157 static struct btf_kind_operations enum_ops
= {
4158 .check_meta
= btf_enum_check_meta
,
4159 .resolve
= btf_df_resolve
,
4160 .check_member
= btf_enum_check_member
,
4161 .check_kflag_member
= btf_enum_check_kflag_member
,
4162 .log_details
= btf_enum_log
,
4163 .show
= btf_enum_show
,
4166 static s32
btf_enum64_check_meta(struct btf_verifier_env
*env
,
4167 const struct btf_type
*t
,
4170 const struct btf_enum64
*enums
= btf_type_enum64(t
);
4171 struct btf
*btf
= env
->btf
;
4172 const char *fmt_str
;
4176 nr_enums
= btf_type_vlen(t
);
4177 meta_needed
= nr_enums
* sizeof(*enums
);
4179 if (meta_left
< meta_needed
) {
4180 btf_verifier_log_basic(env
, t
,
4181 "meta_left:%u meta_needed:%u",
4182 meta_left
, meta_needed
);
4186 if (t
->size
> 8 || !is_power_of_2(t
->size
)) {
4187 btf_verifier_log_type(env
, t
, "Unexpected size");
4191 /* enum type either no name or a valid one */
4193 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
4194 btf_verifier_log_type(env
, t
, "Invalid name");
4198 btf_verifier_log_type(env
, t
, NULL
);
4200 for (i
= 0; i
< nr_enums
; i
++) {
4201 if (!btf_name_offset_valid(btf
, enums
[i
].name_off
)) {
4202 btf_verifier_log(env
, "\tInvalid name_offset:%u",
4207 /* enum member must have a valid name */
4208 if (!enums
[i
].name_off
||
4209 !btf_name_valid_identifier(btf
, enums
[i
].name_off
)) {
4210 btf_verifier_log_type(env
, t
, "Invalid name");
4214 if (env
->log
.level
== BPF_LOG_KERNEL
)
4217 fmt_str
= btf_type_kflag(t
) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
4218 btf_verifier_log(env
, fmt_str
,
4219 __btf_name_by_offset(btf
, enums
[i
].name_off
),
4220 btf_enum64_value(enums
+ i
));
4226 static void btf_enum64_show(const struct btf
*btf
, const struct btf_type
*t
,
4227 u32 type_id
, void *data
, u8 bits_offset
,
4228 struct btf_show
*show
)
4230 const struct btf_enum64
*enums
= btf_type_enum64(t
);
4231 u32 i
, nr_enums
= btf_type_vlen(t
);
4235 safe_data
= btf_show_start_type(show
, t
, type_id
, data
);
4239 v
= *(u64
*)safe_data
;
4241 for (i
= 0; i
< nr_enums
; i
++) {
4242 if (v
!= btf_enum64_value(enums
+ i
))
4245 btf_show_type_value(show
, "%s",
4246 __btf_name_by_offset(btf
,
4247 enums
[i
].name_off
));
4249 btf_show_end_type(show
);
4253 if (btf_type_kflag(t
))
4254 btf_show_type_value(show
, "%lld", v
);
4256 btf_show_type_value(show
, "%llu", v
);
4257 btf_show_end_type(show
);
4260 static struct btf_kind_operations enum64_ops
= {
4261 .check_meta
= btf_enum64_check_meta
,
4262 .resolve
= btf_df_resolve
,
4263 .check_member
= btf_enum_check_member
,
4264 .check_kflag_member
= btf_enum_check_kflag_member
,
4265 .log_details
= btf_enum_log
,
4266 .show
= btf_enum64_show
,
4269 static s32
btf_func_proto_check_meta(struct btf_verifier_env
*env
,
4270 const struct btf_type
*t
,
4273 u32 meta_needed
= btf_type_vlen(t
) * sizeof(struct btf_param
);
4275 if (meta_left
< meta_needed
) {
4276 btf_verifier_log_basic(env
, t
,
4277 "meta_left:%u meta_needed:%u",
4278 meta_left
, meta_needed
);
4283 btf_verifier_log_type(env
, t
, "Invalid name");
4287 if (btf_type_kflag(t
)) {
4288 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4292 btf_verifier_log_type(env
, t
, NULL
);
4297 static void btf_func_proto_log(struct btf_verifier_env
*env
,
4298 const struct btf_type
*t
)
4300 const struct btf_param
*args
= (const struct btf_param
*)(t
+ 1);
4301 u16 nr_args
= btf_type_vlen(t
), i
;
4303 btf_verifier_log(env
, "return=%u args=(", t
->type
);
4305 btf_verifier_log(env
, "void");
4309 if (nr_args
== 1 && !args
[0].type
) {
4310 /* Only one vararg */
4311 btf_verifier_log(env
, "vararg");
4315 btf_verifier_log(env
, "%u %s", args
[0].type
,
4316 __btf_name_by_offset(env
->btf
,
4318 for (i
= 1; i
< nr_args
- 1; i
++)
4319 btf_verifier_log(env
, ", %u %s", args
[i
].type
,
4320 __btf_name_by_offset(env
->btf
,
4324 const struct btf_param
*last_arg
= &args
[nr_args
- 1];
4327 btf_verifier_log(env
, ", %u %s", last_arg
->type
,
4328 __btf_name_by_offset(env
->btf
,
4329 last_arg
->name_off
));
4331 btf_verifier_log(env
, ", vararg");
4335 btf_verifier_log(env
, ")");
4338 static struct btf_kind_operations func_proto_ops
= {
4339 .check_meta
= btf_func_proto_check_meta
,
4340 .resolve
= btf_df_resolve
,
4342 * BTF_KIND_FUNC_PROTO cannot be directly referred by
4343 * a struct's member.
4345 * It should be a function pointer instead.
4346 * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
4348 * Hence, there is no btf_func_check_member().
4350 .check_member
= btf_df_check_member
,
4351 .check_kflag_member
= btf_df_check_kflag_member
,
4352 .log_details
= btf_func_proto_log
,
4353 .show
= btf_df_show
,
4356 static s32
btf_func_check_meta(struct btf_verifier_env
*env
,
4357 const struct btf_type
*t
,
4361 !btf_name_valid_identifier(env
->btf
, t
->name_off
)) {
4362 btf_verifier_log_type(env
, t
, "Invalid name");
4366 if (btf_type_vlen(t
) > BTF_FUNC_GLOBAL
) {
4367 btf_verifier_log_type(env
, t
, "Invalid func linkage");
4371 if (btf_type_kflag(t
)) {
4372 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4376 btf_verifier_log_type(env
, t
, NULL
);
4381 static int btf_func_resolve(struct btf_verifier_env
*env
,
4382 const struct resolve_vertex
*v
)
4384 const struct btf_type
*t
= v
->t
;
4385 u32 next_type_id
= t
->type
;
4388 err
= btf_func_check(env
, t
);
4392 env_stack_pop_resolved(env
, next_type_id
, 0);
4396 static struct btf_kind_operations func_ops
= {
4397 .check_meta
= btf_func_check_meta
,
4398 .resolve
= btf_func_resolve
,
4399 .check_member
= btf_df_check_member
,
4400 .check_kflag_member
= btf_df_check_kflag_member
,
4401 .log_details
= btf_ref_type_log
,
4402 .show
= btf_df_show
,
4405 static s32
btf_var_check_meta(struct btf_verifier_env
*env
,
4406 const struct btf_type
*t
,
4409 const struct btf_var
*var
;
4410 u32 meta_needed
= sizeof(*var
);
4412 if (meta_left
< meta_needed
) {
4413 btf_verifier_log_basic(env
, t
,
4414 "meta_left:%u meta_needed:%u",
4415 meta_left
, meta_needed
);
4419 if (btf_type_vlen(t
)) {
4420 btf_verifier_log_type(env
, t
, "vlen != 0");
4424 if (btf_type_kflag(t
)) {
4425 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4430 !__btf_name_valid(env
->btf
, t
->name_off
)) {
4431 btf_verifier_log_type(env
, t
, "Invalid name");
4435 /* A var cannot be in type void */
4436 if (!t
->type
|| !BTF_TYPE_ID_VALID(t
->type
)) {
4437 btf_verifier_log_type(env
, t
, "Invalid type_id");
4441 var
= btf_type_var(t
);
4442 if (var
->linkage
!= BTF_VAR_STATIC
&&
4443 var
->linkage
!= BTF_VAR_GLOBAL_ALLOCATED
) {
4444 btf_verifier_log_type(env
, t
, "Linkage not supported");
4448 btf_verifier_log_type(env
, t
, NULL
);
4453 static void btf_var_log(struct btf_verifier_env
*env
, const struct btf_type
*t
)
4455 const struct btf_var
*var
= btf_type_var(t
);
4457 btf_verifier_log(env
, "type_id=%u linkage=%u", t
->type
, var
->linkage
);
4460 static const struct btf_kind_operations var_ops
= {
4461 .check_meta
= btf_var_check_meta
,
4462 .resolve
= btf_var_resolve
,
4463 .check_member
= btf_df_check_member
,
4464 .check_kflag_member
= btf_df_check_kflag_member
,
4465 .log_details
= btf_var_log
,
4466 .show
= btf_var_show
,
4469 static s32
btf_datasec_check_meta(struct btf_verifier_env
*env
,
4470 const struct btf_type
*t
,
4473 const struct btf_var_secinfo
*vsi
;
4474 u64 last_vsi_end_off
= 0, sum
= 0;
4477 meta_needed
= btf_type_vlen(t
) * sizeof(*vsi
);
4478 if (meta_left
< meta_needed
) {
4479 btf_verifier_log_basic(env
, t
,
4480 "meta_left:%u meta_needed:%u",
4481 meta_left
, meta_needed
);
4486 btf_verifier_log_type(env
, t
, "size == 0");
4490 if (btf_type_kflag(t
)) {
4491 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4496 !btf_name_valid_section(env
->btf
, t
->name_off
)) {
4497 btf_verifier_log_type(env
, t
, "Invalid name");
4501 btf_verifier_log_type(env
, t
, NULL
);
4503 for_each_vsi(i
, t
, vsi
) {
4504 /* A var cannot be in type void */
4505 if (!vsi
->type
|| !BTF_TYPE_ID_VALID(vsi
->type
)) {
4506 btf_verifier_log_vsi(env
, t
, vsi
,
4511 if (vsi
->offset
< last_vsi_end_off
|| vsi
->offset
>= t
->size
) {
4512 btf_verifier_log_vsi(env
, t
, vsi
,
4517 if (!vsi
->size
|| vsi
->size
> t
->size
) {
4518 btf_verifier_log_vsi(env
, t
, vsi
,
4523 last_vsi_end_off
= vsi
->offset
+ vsi
->size
;
4524 if (last_vsi_end_off
> t
->size
) {
4525 btf_verifier_log_vsi(env
, t
, vsi
,
4526 "Invalid offset+size");
4530 btf_verifier_log_vsi(env
, t
, vsi
, NULL
);
4534 if (t
->size
< sum
) {
4535 btf_verifier_log_type(env
, t
, "Invalid btf_info size");
4542 static int btf_datasec_resolve(struct btf_verifier_env
*env
,
4543 const struct resolve_vertex
*v
)
4545 const struct btf_var_secinfo
*vsi
;
4546 struct btf
*btf
= env
->btf
;
4549 env
->resolve_mode
= RESOLVE_TBD
;
4550 for_each_vsi_from(i
, v
->next_member
, v
->t
, vsi
) {
4551 u32 var_type_id
= vsi
->type
, type_id
, type_size
= 0;
4552 const struct btf_type
*var_type
= btf_type_by_id(env
->btf
,
4554 if (!var_type
|| !btf_type_is_var(var_type
)) {
4555 btf_verifier_log_vsi(env
, v
->t
, vsi
,
4556 "Not a VAR kind member");
4560 if (!env_type_is_resolve_sink(env
, var_type
) &&
4561 !env_type_is_resolved(env
, var_type_id
)) {
4562 env_stack_set_next_member(env
, i
+ 1);
4563 return env_stack_push(env
, var_type
, var_type_id
);
4566 type_id
= var_type
->type
;
4567 if (!btf_type_id_size(btf
, &type_id
, &type_size
)) {
4568 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid type");
4572 if (vsi
->size
< type_size
) {
4573 btf_verifier_log_vsi(env
, v
->t
, vsi
, "Invalid size");
4578 env_stack_pop_resolved(env
, 0, 0);
4582 static void btf_datasec_log(struct btf_verifier_env
*env
,
4583 const struct btf_type
*t
)
4585 btf_verifier_log(env
, "size=%u vlen=%u", t
->size
, btf_type_vlen(t
));
4588 static void btf_datasec_show(const struct btf
*btf
,
4589 const struct btf_type
*t
, u32 type_id
,
4590 void *data
, u8 bits_offset
,
4591 struct btf_show
*show
)
4593 const struct btf_var_secinfo
*vsi
;
4594 const struct btf_type
*var
;
4597 if (!btf_show_start_type(show
, t
, type_id
, data
))
4600 btf_show_type_value(show
, "section (\"%s\") = {",
4601 __btf_name_by_offset(btf
, t
->name_off
));
4602 for_each_vsi(i
, t
, vsi
) {
4603 var
= btf_type_by_id(btf
, vsi
->type
);
4605 btf_show(show
, ",");
4606 btf_type_ops(var
)->show(btf
, var
, vsi
->type
,
4607 data
+ vsi
->offset
, bits_offset
, show
);
4609 btf_show_end_type(show
);
4612 static const struct btf_kind_operations datasec_ops
= {
4613 .check_meta
= btf_datasec_check_meta
,
4614 .resolve
= btf_datasec_resolve
,
4615 .check_member
= btf_df_check_member
,
4616 .check_kflag_member
= btf_df_check_kflag_member
,
4617 .log_details
= btf_datasec_log
,
4618 .show
= btf_datasec_show
,
4621 static s32
btf_float_check_meta(struct btf_verifier_env
*env
,
4622 const struct btf_type
*t
,
4625 if (btf_type_vlen(t
)) {
4626 btf_verifier_log_type(env
, t
, "vlen != 0");
4630 if (btf_type_kflag(t
)) {
4631 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4635 if (t
->size
!= 2 && t
->size
!= 4 && t
->size
!= 8 && t
->size
!= 12 &&
4637 btf_verifier_log_type(env
, t
, "Invalid type_size");
4641 btf_verifier_log_type(env
, t
, NULL
);
4646 static int btf_float_check_member(struct btf_verifier_env
*env
,
4647 const struct btf_type
*struct_type
,
4648 const struct btf_member
*member
,
4649 const struct btf_type
*member_type
)
4651 u64 start_offset_bytes
;
4652 u64 end_offset_bytes
;
4657 /* Different architectures have different alignment requirements, so
4658 * here we check only for the reasonable minimum. This way we ensure
4659 * that types after CO-RE can pass the kernel BTF verifier.
4661 align_bytes
= min_t(u64
, sizeof(void *), member_type
->size
);
4662 align_bits
= align_bytes
* BITS_PER_BYTE
;
4663 div64_u64_rem(member
->offset
, align_bits
, &misalign_bits
);
4664 if (misalign_bits
) {
4665 btf_verifier_log_member(env
, struct_type
, member
,
4666 "Member is not properly aligned");
4670 start_offset_bytes
= member
->offset
/ BITS_PER_BYTE
;
4671 end_offset_bytes
= start_offset_bytes
+ member_type
->size
;
4672 if (end_offset_bytes
> struct_type
->size
) {
4673 btf_verifier_log_member(env
, struct_type
, member
,
4674 "Member exceeds struct_size");
4681 static void btf_float_log(struct btf_verifier_env
*env
,
4682 const struct btf_type
*t
)
4684 btf_verifier_log(env
, "size=%u", t
->size
);
4687 static const struct btf_kind_operations float_ops
= {
4688 .check_meta
= btf_float_check_meta
,
4689 .resolve
= btf_df_resolve
,
4690 .check_member
= btf_float_check_member
,
4691 .check_kflag_member
= btf_generic_check_kflag_member
,
4692 .log_details
= btf_float_log
,
4693 .show
= btf_df_show
,
4696 static s32
btf_decl_tag_check_meta(struct btf_verifier_env
*env
,
4697 const struct btf_type
*t
,
4700 const struct btf_decl_tag
*tag
;
4701 u32 meta_needed
= sizeof(*tag
);
4705 if (meta_left
< meta_needed
) {
4706 btf_verifier_log_basic(env
, t
,
4707 "meta_left:%u meta_needed:%u",
4708 meta_left
, meta_needed
);
4712 value
= btf_name_by_offset(env
->btf
, t
->name_off
);
4713 if (!value
|| !value
[0]) {
4714 btf_verifier_log_type(env
, t
, "Invalid value");
4718 if (btf_type_vlen(t
)) {
4719 btf_verifier_log_type(env
, t
, "vlen != 0");
4723 if (btf_type_kflag(t
)) {
4724 btf_verifier_log_type(env
, t
, "Invalid btf_info kind_flag");
4728 component_idx
= btf_type_decl_tag(t
)->component_idx
;
4729 if (component_idx
< -1) {
4730 btf_verifier_log_type(env
, t
, "Invalid component_idx");
4734 btf_verifier_log_type(env
, t
, NULL
);
4739 static int btf_decl_tag_resolve(struct btf_verifier_env
*env
,
4740 const struct resolve_vertex
*v
)
4742 const struct btf_type
*next_type
;
4743 const struct btf_type
*t
= v
->t
;
4744 u32 next_type_id
= t
->type
;
4745 struct btf
*btf
= env
->btf
;
4749 next_type
= btf_type_by_id(btf
, next_type_id
);
4750 if (!next_type
|| !btf_type_is_decl_tag_target(next_type
)) {
4751 btf_verifier_log_type(env
, v
->t
, "Invalid type_id");
4755 if (!env_type_is_resolve_sink(env
, next_type
) &&
4756 !env_type_is_resolved(env
, next_type_id
))
4757 return env_stack_push(env
, next_type
, next_type_id
);
4759 component_idx
= btf_type_decl_tag(t
)->component_idx
;
4760 if (component_idx
!= -1) {
4761 if (btf_type_is_var(next_type
) || btf_type_is_typedef(next_type
)) {
4762 btf_verifier_log_type(env
, v
->t
, "Invalid component_idx");
4766 if (btf_type_is_struct(next_type
)) {
4767 vlen
= btf_type_vlen(next_type
);
4769 /* next_type should be a function */
4770 next_type
= btf_type_by_id(btf
, next_type
->type
);
4771 vlen
= btf_type_vlen(next_type
);
4774 if ((u32
)component_idx
>= vlen
) {
4775 btf_verifier_log_type(env
, v
->t
, "Invalid component_idx");
4780 env_stack_pop_resolved(env
, next_type_id
, 0);
4785 static void btf_decl_tag_log(struct btf_verifier_env
*env
, const struct btf_type
*t
)
4787 btf_verifier_log(env
, "type=%u component_idx=%d", t
->type
,
4788 btf_type_decl_tag(t
)->component_idx
);
4791 static const struct btf_kind_operations decl_tag_ops
= {
4792 .check_meta
= btf_decl_tag_check_meta
,
4793 .resolve
= btf_decl_tag_resolve
,
4794 .check_member
= btf_df_check_member
,
4795 .check_kflag_member
= btf_df_check_kflag_member
,
4796 .log_details
= btf_decl_tag_log
,
4797 .show
= btf_df_show
,
4800 static int btf_func_proto_check(struct btf_verifier_env
*env
,
4801 const struct btf_type
*t
)
4803 const struct btf_type
*ret_type
;
4804 const struct btf_param
*args
;
4805 const struct btf
*btf
;
4810 args
= (const struct btf_param
*)(t
+ 1);
4811 nr_args
= btf_type_vlen(t
);
4813 /* Check func return type which could be "void" (t->type == 0) */
4815 u32 ret_type_id
= t
->type
;
4817 ret_type
= btf_type_by_id(btf
, ret_type_id
);
4819 btf_verifier_log_type(env
, t
, "Invalid return type");
4823 if (btf_type_is_resolve_source_only(ret_type
)) {
4824 btf_verifier_log_type(env
, t
, "Invalid return type");
4828 if (btf_type_needs_resolve(ret_type
) &&
4829 !env_type_is_resolved(env
, ret_type_id
)) {
4830 err
= btf_resolve(env
, ret_type
, ret_type_id
);
4835 /* Ensure the return type is a type that has a size */
4836 if (!btf_type_id_size(btf
, &ret_type_id
, NULL
)) {
4837 btf_verifier_log_type(env
, t
, "Invalid return type");
4845 /* Last func arg type_id could be 0 if it is a vararg */
4846 if (!args
[nr_args
- 1].type
) {
4847 if (args
[nr_args
- 1].name_off
) {
4848 btf_verifier_log_type(env
, t
, "Invalid arg#%u",
4855 for (i
= 0; i
< nr_args
; i
++) {
4856 const struct btf_type
*arg_type
;
4859 arg_type_id
= args
[i
].type
;
4860 arg_type
= btf_type_by_id(btf
, arg_type_id
);
4862 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
4866 if (btf_type_is_resolve_source_only(arg_type
)) {
4867 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
4871 if (args
[i
].name_off
&&
4872 (!btf_name_offset_valid(btf
, args
[i
].name_off
) ||
4873 !btf_name_valid_identifier(btf
, args
[i
].name_off
))) {
4874 btf_verifier_log_type(env
, t
,
4875 "Invalid arg#%u", i
+ 1);
4879 if (btf_type_needs_resolve(arg_type
) &&
4880 !env_type_is_resolved(env
, arg_type_id
)) {
4881 err
= btf_resolve(env
, arg_type
, arg_type_id
);
4886 if (!btf_type_id_size(btf
, &arg_type_id
, NULL
)) {
4887 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
4895 static int btf_func_check(struct btf_verifier_env
*env
,
4896 const struct btf_type
*t
)
4898 const struct btf_type
*proto_type
;
4899 const struct btf_param
*args
;
4900 const struct btf
*btf
;
4904 proto_type
= btf_type_by_id(btf
, t
->type
);
4906 if (!proto_type
|| !btf_type_is_func_proto(proto_type
)) {
4907 btf_verifier_log_type(env
, t
, "Invalid type_id");
4911 args
= (const struct btf_param
*)(proto_type
+ 1);
4912 nr_args
= btf_type_vlen(proto_type
);
4913 for (i
= 0; i
< nr_args
; i
++) {
4914 if (!args
[i
].name_off
&& args
[i
].type
) {
4915 btf_verifier_log_type(env
, t
, "Invalid arg#%u", i
+ 1);
4923 static const struct btf_kind_operations
* const kind_ops
[NR_BTF_KINDS
] = {
4924 [BTF_KIND_INT
] = &int_ops
,
4925 [BTF_KIND_PTR
] = &ptr_ops
,
4926 [BTF_KIND_ARRAY
] = &array_ops
,
4927 [BTF_KIND_STRUCT
] = &struct_ops
,
4928 [BTF_KIND_UNION
] = &struct_ops
,
4929 [BTF_KIND_ENUM
] = &enum_ops
,
4930 [BTF_KIND_FWD
] = &fwd_ops
,
4931 [BTF_KIND_TYPEDEF
] = &modifier_ops
,
4932 [BTF_KIND_VOLATILE
] = &modifier_ops
,
4933 [BTF_KIND_CONST
] = &modifier_ops
,
4934 [BTF_KIND_RESTRICT
] = &modifier_ops
,
4935 [BTF_KIND_FUNC
] = &func_ops
,
4936 [BTF_KIND_FUNC_PROTO
] = &func_proto_ops
,
4937 [BTF_KIND_VAR
] = &var_ops
,
4938 [BTF_KIND_DATASEC
] = &datasec_ops
,
4939 [BTF_KIND_FLOAT
] = &float_ops
,
4940 [BTF_KIND_DECL_TAG
] = &decl_tag_ops
,
4941 [BTF_KIND_TYPE_TAG
] = &modifier_ops
,
4942 [BTF_KIND_ENUM64
] = &enum64_ops
,
4945 static s32
btf_check_meta(struct btf_verifier_env
*env
,
4946 const struct btf_type
*t
,
4949 u32 saved_meta_left
= meta_left
;
4952 if (meta_left
< sizeof(*t
)) {
4953 btf_verifier_log(env
, "[%u] meta_left:%u meta_needed:%zu",
4954 env
->log_type_id
, meta_left
, sizeof(*t
));
4957 meta_left
-= sizeof(*t
);
4959 if (t
->info
& ~BTF_INFO_MASK
) {
4960 btf_verifier_log(env
, "[%u] Invalid btf_info:%x",
4961 env
->log_type_id
, t
->info
);
4965 if (BTF_INFO_KIND(t
->info
) > BTF_KIND_MAX
||
4966 BTF_INFO_KIND(t
->info
) == BTF_KIND_UNKN
) {
4967 btf_verifier_log(env
, "[%u] Invalid kind:%u",
4968 env
->log_type_id
, BTF_INFO_KIND(t
->info
));
4972 if (!btf_name_offset_valid(env
->btf
, t
->name_off
)) {
4973 btf_verifier_log(env
, "[%u] Invalid name_offset:%u",
4974 env
->log_type_id
, t
->name_off
);
4978 var_meta_size
= btf_type_ops(t
)->check_meta(env
, t
, meta_left
);
4979 if (var_meta_size
< 0)
4980 return var_meta_size
;
4982 meta_left
-= var_meta_size
;
4984 return saved_meta_left
- meta_left
;
4987 static int btf_check_all_metas(struct btf_verifier_env
*env
)
4989 struct btf
*btf
= env
->btf
;
4990 struct btf_header
*hdr
;
4994 cur
= btf
->nohdr_data
+ hdr
->type_off
;
4995 end
= cur
+ hdr
->type_len
;
4997 env
->log_type_id
= btf
->base_btf
? btf
->start_id
: 1;
4999 struct btf_type
*t
= cur
;
5002 meta_size
= btf_check_meta(env
, t
, end
- cur
);
5006 btf_add_type(env
, t
);
5014 static bool btf_resolve_valid(struct btf_verifier_env
*env
,
5015 const struct btf_type
*t
,
5018 struct btf
*btf
= env
->btf
;
5020 if (!env_type_is_resolved(env
, type_id
))
5023 if (btf_type_is_struct(t
) || btf_type_is_datasec(t
))
5024 return !btf_resolved_type_id(btf
, type_id
) &&
5025 !btf_resolved_type_size(btf
, type_id
);
5027 if (btf_type_is_decl_tag(t
) || btf_type_is_func(t
))
5028 return btf_resolved_type_id(btf
, type_id
) &&
5029 !btf_resolved_type_size(btf
, type_id
);
5031 if (btf_type_is_modifier(t
) || btf_type_is_ptr(t
) ||
5032 btf_type_is_var(t
)) {
5033 t
= btf_type_id_resolve(btf
, &type_id
);
5035 !btf_type_is_modifier(t
) &&
5036 !btf_type_is_var(t
) &&
5037 !btf_type_is_datasec(t
);
5040 if (btf_type_is_array(t
)) {
5041 const struct btf_array
*array
= btf_type_array(t
);
5042 const struct btf_type
*elem_type
;
5043 u32 elem_type_id
= array
->type
;
5046 elem_type
= btf_type_id_size(btf
, &elem_type_id
, &elem_size
);
5047 return elem_type
&& !btf_type_is_modifier(elem_type
) &&
5048 (array
->nelems
* elem_size
==
5049 btf_resolved_type_size(btf
, type_id
));
5055 static int btf_resolve(struct btf_verifier_env
*env
,
5056 const struct btf_type
*t
, u32 type_id
)
5058 u32 save_log_type_id
= env
->log_type_id
;
5059 const struct resolve_vertex
*v
;
5062 env
->resolve_mode
= RESOLVE_TBD
;
5063 env_stack_push(env
, t
, type_id
);
5064 while (!err
&& (v
= env_stack_peak(env
))) {
5065 env
->log_type_id
= v
->type_id
;
5066 err
= btf_type_ops(v
->t
)->resolve(env
, v
);
5069 env
->log_type_id
= type_id
;
5070 if (err
== -E2BIG
) {
5071 btf_verifier_log_type(env
, t
,
5072 "Exceeded max resolving depth:%u",
5074 } else if (err
== -EEXIST
) {
5075 btf_verifier_log_type(env
, t
, "Loop detected");
5078 /* Final sanity check */
5079 if (!err
&& !btf_resolve_valid(env
, t
, type_id
)) {
5080 btf_verifier_log_type(env
, t
, "Invalid resolve state");
5084 env
->log_type_id
= save_log_type_id
;
5088 static int btf_check_all_types(struct btf_verifier_env
*env
)
5090 struct btf
*btf
= env
->btf
;
5091 const struct btf_type
*t
;
5095 err
= env_resolve_init(env
);
5100 for (i
= btf
->base_btf
? 0 : 1; i
< btf
->nr_types
; i
++) {
5101 type_id
= btf
->start_id
+ i
;
5102 t
= btf_type_by_id(btf
, type_id
);
5104 env
->log_type_id
= type_id
;
5105 if (btf_type_needs_resolve(t
) &&
5106 !env_type_is_resolved(env
, type_id
)) {
5107 err
= btf_resolve(env
, t
, type_id
);
5112 if (btf_type_is_func_proto(t
)) {
5113 err
= btf_func_proto_check(env
, t
);
5122 static int btf_parse_type_sec(struct btf_verifier_env
*env
)
5124 const struct btf_header
*hdr
= &env
->btf
->hdr
;
5127 /* Type section must align to 4 bytes */
5128 if (hdr
->type_off
& (sizeof(u32
) - 1)) {
5129 btf_verifier_log(env
, "Unaligned type_off");
5133 if (!env
->btf
->base_btf
&& !hdr
->type_len
) {
5134 btf_verifier_log(env
, "No type found");
5138 err
= btf_check_all_metas(env
);
5142 return btf_check_all_types(env
);
5145 static int btf_parse_str_sec(struct btf_verifier_env
*env
)
5147 const struct btf_header
*hdr
;
5148 struct btf
*btf
= env
->btf
;
5149 const char *start
, *end
;
5152 start
= btf
->nohdr_data
+ hdr
->str_off
;
5153 end
= start
+ hdr
->str_len
;
5155 if (end
!= btf
->data
+ btf
->data_size
) {
5156 btf_verifier_log(env
, "String section is not at the end");
5160 btf
->strings
= start
;
5162 if (btf
->base_btf
&& !hdr
->str_len
)
5164 if (!hdr
->str_len
|| hdr
->str_len
- 1 > BTF_MAX_NAME_OFFSET
|| end
[-1]) {
5165 btf_verifier_log(env
, "Invalid string section");
5168 if (!btf
->base_btf
&& start
[0]) {
5169 btf_verifier_log(env
, "Invalid string section");
5176 static const size_t btf_sec_info_offset
[] = {
5177 offsetof(struct btf_header
, type_off
),
5178 offsetof(struct btf_header
, str_off
),
5181 static int btf_sec_info_cmp(const void *a
, const void *b
)
5183 const struct btf_sec_info
*x
= a
;
5184 const struct btf_sec_info
*y
= b
;
5186 return (int)(x
->off
- y
->off
) ? : (int)(x
->len
- y
->len
);
5189 static int btf_check_sec_info(struct btf_verifier_env
*env
,
5192 struct btf_sec_info secs
[ARRAY_SIZE(btf_sec_info_offset
)];
5193 u32 total
, expected_total
, i
;
5194 const struct btf_header
*hdr
;
5195 const struct btf
*btf
;
5200 /* Populate the secs from hdr */
5201 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++)
5202 secs
[i
] = *(struct btf_sec_info
*)((void *)hdr
+
5203 btf_sec_info_offset
[i
]);
5205 sort(secs
, ARRAY_SIZE(btf_sec_info_offset
),
5206 sizeof(struct btf_sec_info
), btf_sec_info_cmp
, NULL
);
5208 /* Check for gaps and overlap among sections */
5210 expected_total
= btf_data_size
- hdr
->hdr_len
;
5211 for (i
= 0; i
< ARRAY_SIZE(btf_sec_info_offset
); i
++) {
5212 if (expected_total
< secs
[i
].off
) {
5213 btf_verifier_log(env
, "Invalid section offset");
5216 if (total
< secs
[i
].off
) {
5218 btf_verifier_log(env
, "Unsupported section found");
5221 if (total
> secs
[i
].off
) {
5222 btf_verifier_log(env
, "Section overlap found");
5225 if (expected_total
- total
< secs
[i
].len
) {
5226 btf_verifier_log(env
,
5227 "Total section length too long");
5230 total
+= secs
[i
].len
;
5233 /* There is data other than hdr and known sections */
5234 if (expected_total
!= total
) {
5235 btf_verifier_log(env
, "Unsupported section found");
5242 static int btf_parse_hdr(struct btf_verifier_env
*env
)
5244 u32 hdr_len
, hdr_copy
, btf_data_size
;
5245 const struct btf_header
*hdr
;
5249 btf_data_size
= btf
->data_size
;
5251 if (btf_data_size
< offsetofend(struct btf_header
, hdr_len
)) {
5252 btf_verifier_log(env
, "hdr_len not found");
5257 hdr_len
= hdr
->hdr_len
;
5258 if (btf_data_size
< hdr_len
) {
5259 btf_verifier_log(env
, "btf_header not found");
5263 /* Ensure the unsupported header fields are zero */
5264 if (hdr_len
> sizeof(btf
->hdr
)) {
5265 u8
*expected_zero
= btf
->data
+ sizeof(btf
->hdr
);
5266 u8
*end
= btf
->data
+ hdr_len
;
5268 for (; expected_zero
< end
; expected_zero
++) {
5269 if (*expected_zero
) {
5270 btf_verifier_log(env
, "Unsupported btf_header");
5276 hdr_copy
= min_t(u32
, hdr_len
, sizeof(btf
->hdr
));
5277 memcpy(&btf
->hdr
, btf
->data
, hdr_copy
);
5281 btf_verifier_log_hdr(env
, btf_data_size
);
5283 if (hdr
->magic
!= BTF_MAGIC
) {
5284 btf_verifier_log(env
, "Invalid magic");
5288 if (hdr
->version
!= BTF_VERSION
) {
5289 btf_verifier_log(env
, "Unsupported version");
5294 btf_verifier_log(env
, "Unsupported flags");
5298 if (!btf
->base_btf
&& btf_data_size
== hdr
->hdr_len
) {
5299 btf_verifier_log(env
, "No data");
5303 return btf_check_sec_info(env
, btf_data_size
);
5306 static const char *alloc_obj_fields
[] = {
5315 static struct btf_struct_metas
*
5316 btf_parse_struct_metas(struct bpf_verifier_log
*log
, struct btf
*btf
)
5319 struct btf_id_set set
;
5322 u32 _ids
[ARRAY_SIZE(alloc_obj_fields
)];
5325 struct btf_struct_metas
*tab
= NULL
;
5328 BUILD_BUG_ON(offsetof(struct btf_id_set
, cnt
) != 0);
5329 BUILD_BUG_ON(sizeof(struct btf_id_set
) != sizeof(u32
));
5331 memset(&aof
, 0, sizeof(aof
));
5332 for (i
= 0; i
< ARRAY_SIZE(alloc_obj_fields
); i
++) {
5333 /* Try to find whether this special type exists in user BTF, and
5334 * if so remember its ID so we can easily find it among members
5335 * of structs that we iterate in the next loop.
5337 id
= btf_find_by_name_kind(btf
, alloc_obj_fields
[i
], BTF_KIND_STRUCT
);
5340 aof
.set
.ids
[aof
.set
.cnt
++] = id
;
5345 sort(&aof
.set
.ids
, aof
.set
.cnt
, sizeof(aof
.set
.ids
[0]), btf_id_cmp_func
, NULL
);
5347 n
= btf_nr_types(btf
);
5348 for (i
= 1; i
< n
; i
++) {
5349 struct btf_struct_metas
*new_tab
;
5350 const struct btf_member
*member
;
5351 struct btf_struct_meta
*type
;
5352 struct btf_record
*record
;
5353 const struct btf_type
*t
;
5356 t
= btf_type_by_id(btf
, i
);
5361 if (!__btf_type_is_struct(t
))
5366 for_each_member(j
, t
, member
) {
5367 if (btf_id_set_contains(&aof
.set
, member
->type
))
5372 tab_cnt
= tab
? tab
->cnt
: 0;
5373 new_tab
= krealloc(tab
, offsetof(struct btf_struct_metas
, types
[tab_cnt
+ 1]),
5374 GFP_KERNEL
| __GFP_NOWARN
);
5383 type
= &tab
->types
[tab
->cnt
];
5385 record
= btf_parse_fields(btf
, t
, BPF_SPIN_LOCK
| BPF_LIST_HEAD
| BPF_LIST_NODE
|
5386 BPF_RB_ROOT
| BPF_RB_NODE
| BPF_REFCOUNT
, t
->size
);
5387 /* The record cannot be unset, treat it as an error if so */
5388 if (IS_ERR_OR_NULL(record
)) {
5389 ret
= PTR_ERR_OR_ZERO(record
) ?: -EFAULT
;
5392 type
->record
= record
;
5397 btf_struct_metas_free(tab
);
5398 return ERR_PTR(ret
);
5401 struct btf_struct_meta
*btf_find_struct_meta(const struct btf
*btf
, u32 btf_id
)
5403 struct btf_struct_metas
*tab
;
5405 BUILD_BUG_ON(offsetof(struct btf_struct_meta
, btf_id
) != 0);
5406 tab
= btf
->struct_meta_tab
;
5409 return bsearch(&btf_id
, tab
->types
, tab
->cnt
, sizeof(tab
->types
[0]), btf_id_cmp_func
);
5412 static int btf_check_type_tags(struct btf_verifier_env
*env
,
5413 struct btf
*btf
, int start_id
)
5415 int i
, n
, good_id
= start_id
- 1;
5418 n
= btf_nr_types(btf
);
5419 for (i
= start_id
; i
< n
; i
++) {
5420 const struct btf_type
*t
;
5421 int chain_limit
= 32;
5424 t
= btf_type_by_id(btf
, i
);
5427 if (!btf_type_is_modifier(t
))
5432 in_tags
= btf_type_is_type_tag(t
);
5433 while (btf_type_is_modifier(t
)) {
5434 if (!chain_limit
--) {
5435 btf_verifier_log(env
, "Max chain length or cycle detected");
5438 if (btf_type_is_type_tag(t
)) {
5440 btf_verifier_log(env
, "Type tags don't precede modifiers");
5443 } else if (in_tags
) {
5446 if (cur_id
<= good_id
)
5448 /* Move to next type */
5450 t
= btf_type_by_id(btf
, cur_id
);
5459 static int finalize_log(struct bpf_verifier_log
*log
, bpfptr_t uattr
, u32 uattr_size
)
5464 err
= bpf_vlog_finalize(log
, &log_true_size
);
5466 if (uattr_size
>= offsetofend(union bpf_attr
, btf_log_true_size
) &&
5467 copy_to_bpfptr_offset(uattr
, offsetof(union bpf_attr
, btf_log_true_size
),
5468 &log_true_size
, sizeof(log_true_size
)))
5474 static struct btf
*btf_parse(const union bpf_attr
*attr
, bpfptr_t uattr
, u32 uattr_size
)
5476 bpfptr_t btf_data
= make_bpfptr(attr
->btf
, uattr
.is_kernel
);
5477 char __user
*log_ubuf
= u64_to_user_ptr(attr
->btf_log_buf
);
5478 struct btf_struct_metas
*struct_meta_tab
;
5479 struct btf_verifier_env
*env
= NULL
;
5480 struct btf
*btf
= NULL
;
5484 if (attr
->btf_size
> BTF_MAX_SIZE
)
5485 return ERR_PTR(-E2BIG
);
5487 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
5489 return ERR_PTR(-ENOMEM
);
5491 /* user could have requested verbose verifier output
5492 * and supplied buffer to store the verification trace
5494 err
= bpf_vlog_init(&env
->log
, attr
->btf_log_level
,
5495 log_ubuf
, attr
->btf_log_size
);
5499 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
5506 data
= kvmalloc(attr
->btf_size
, GFP_KERNEL
| __GFP_NOWARN
);
5513 btf
->data_size
= attr
->btf_size
;
5515 if (copy_from_bpfptr(data
, btf_data
, attr
->btf_size
)) {
5520 err
= btf_parse_hdr(env
);
5524 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
5526 err
= btf_parse_str_sec(env
);
5530 err
= btf_parse_type_sec(env
);
5534 err
= btf_check_type_tags(env
, btf
, 1);
5538 struct_meta_tab
= btf_parse_struct_metas(&env
->log
, btf
);
5539 if (IS_ERR(struct_meta_tab
)) {
5540 err
= PTR_ERR(struct_meta_tab
);
5543 btf
->struct_meta_tab
= struct_meta_tab
;
5545 if (struct_meta_tab
) {
5548 for (i
= 0; i
< struct_meta_tab
->cnt
; i
++) {
5549 err
= btf_check_and_fixup_fields(btf
, struct_meta_tab
->types
[i
].record
);
5555 err
= finalize_log(&env
->log
, uattr
, uattr_size
);
5559 btf_verifier_env_free(env
);
5560 refcount_set(&btf
->refcnt
, 1);
5564 btf_free_struct_meta_tab(btf
);
5566 /* overwrite err with -ENOSPC or -EFAULT */
5567 ret
= finalize_log(&env
->log
, uattr
, uattr_size
);
5571 btf_verifier_env_free(env
);
5574 return ERR_PTR(err
);
5577 extern char __weak __start_BTF
[];
5578 extern char __weak __stop_BTF
[];
5579 extern struct btf
*btf_vmlinux
;
5581 #define BPF_MAP_TYPE(_id, _ops)
5582 #define BPF_LINK_TYPE(_id, _name)
5584 struct bpf_ctx_convert
{
5585 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5586 prog_ctx_type _id##_prog; \
5587 kern_ctx_type _id##_kern;
5588 #include <linux/bpf_types.h>
5589 #undef BPF_PROG_TYPE
5591 /* 't' is written once under lock. Read many times. */
5592 const struct btf_type
*t
;
5595 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5597 #include <linux/bpf_types.h>
5598 #undef BPF_PROG_TYPE
5599 __ctx_convert_unused
, /* to avoid empty enum in extreme .config */
5601 static u8 bpf_ctx_convert_map
[] = {
5602 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
5603 [_id] = __ctx_convert##_id,
5604 #include <linux/bpf_types.h>
5605 #undef BPF_PROG_TYPE
5606 0, /* avoid empty array */
5609 #undef BPF_LINK_TYPE
5611 const struct btf_member
*
5612 btf_get_prog_ctx_type(struct bpf_verifier_log
*log
, const struct btf
*btf
,
5613 const struct btf_type
*t
, enum bpf_prog_type prog_type
,
5616 const struct btf_type
*conv_struct
;
5617 const struct btf_type
*ctx_struct
;
5618 const struct btf_member
*ctx_type
;
5619 const char *tname
, *ctx_tname
;
5621 conv_struct
= bpf_ctx_convert
.t
;
5623 bpf_log(log
, "btf_vmlinux is malformed\n");
5626 t
= btf_type_by_id(btf
, t
->type
);
5627 while (btf_type_is_modifier(t
))
5628 t
= btf_type_by_id(btf
, t
->type
);
5629 if (!btf_type_is_struct(t
)) {
5630 /* Only pointer to struct is supported for now.
5631 * That means that BPF_PROG_TYPE_TRACEPOINT with BTF
5632 * is not supported yet.
5633 * BPF_PROG_TYPE_RAW_TRACEPOINT is fine.
5637 tname
= btf_name_by_offset(btf
, t
->name_off
);
5639 bpf_log(log
, "arg#%d struct doesn't have a name\n", arg
);
5642 /* prog_type is valid bpf program type. No need for bounds check. */
5643 ctx_type
= btf_type_member(conv_struct
) + bpf_ctx_convert_map
[prog_type
] * 2;
5644 /* ctx_struct is a pointer to prog_ctx_type in vmlinux.
5645 * Like 'struct __sk_buff'
5647 ctx_struct
= btf_type_by_id(btf_vmlinux
, ctx_type
->type
);
5649 /* should not happen */
5652 ctx_tname
= btf_name_by_offset(btf_vmlinux
, ctx_struct
->name_off
);
5654 /* should not happen */
5655 bpf_log(log
, "Please fix kernel include/linux/bpf_types.h\n");
5658 /* only compare that prog's ctx type name is the same as
5659 * kernel expects. No need to compare field by field.
5660 * It's ok for bpf prog to do:
5661 * struct __sk_buff {};
5662 * int socket_filter_bpf_prog(struct __sk_buff *skb)
5663 * { // no fields of skb are ever used }
5665 if (strcmp(ctx_tname
, "__sk_buff") == 0 && strcmp(tname
, "sk_buff") == 0)
5667 if (strcmp(ctx_tname
, "xdp_md") == 0 && strcmp(tname
, "xdp_buff") == 0)
5669 if (strcmp(ctx_tname
, tname
)) {
5670 /* bpf_user_pt_regs_t is a typedef, so resolve it to
5671 * underlying struct and check name again
5673 if (!btf_type_is_modifier(ctx_struct
))
5675 while (btf_type_is_modifier(ctx_struct
))
5676 ctx_struct
= btf_type_by_id(btf_vmlinux
, ctx_struct
->type
);
5682 static int btf_translate_to_vmlinux(struct bpf_verifier_log
*log
,
5684 const struct btf_type
*t
,
5685 enum bpf_prog_type prog_type
,
5688 const struct btf_member
*prog_ctx_type
, *kern_ctx_type
;
5690 prog_ctx_type
= btf_get_prog_ctx_type(log
, btf
, t
, prog_type
, arg
);
5693 kern_ctx_type
= prog_ctx_type
+ 1;
5694 return kern_ctx_type
->type
;
5697 int get_kern_ctx_btf_id(struct bpf_verifier_log
*log
, enum bpf_prog_type prog_type
)
5699 const struct btf_member
*kctx_member
;
5700 const struct btf_type
*conv_struct
;
5701 const struct btf_type
*kctx_type
;
5704 conv_struct
= bpf_ctx_convert
.t
;
5705 /* get member for kernel ctx type */
5706 kctx_member
= btf_type_member(conv_struct
) + bpf_ctx_convert_map
[prog_type
] * 2 + 1;
5707 kctx_type_id
= kctx_member
->type
;
5708 kctx_type
= btf_type_by_id(btf_vmlinux
, kctx_type_id
);
5709 if (!btf_type_is_struct(kctx_type
)) {
5710 bpf_log(log
, "kern ctx type id %u is not a struct\n", kctx_type_id
);
5714 return kctx_type_id
;
5717 BTF_ID_LIST(bpf_ctx_convert_btf_id
)
5718 BTF_ID(struct, bpf_ctx_convert
)
5720 struct btf
*btf_parse_vmlinux(void)
5722 struct btf_verifier_env
*env
= NULL
;
5723 struct bpf_verifier_log
*log
;
5724 struct btf
*btf
= NULL
;
5727 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
5729 return ERR_PTR(-ENOMEM
);
5732 log
->level
= BPF_LOG_KERNEL
;
5734 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
5741 btf
->data
= __start_BTF
;
5742 btf
->data_size
= __stop_BTF
- __start_BTF
;
5743 btf
->kernel_btf
= true;
5744 snprintf(btf
->name
, sizeof(btf
->name
), "vmlinux");
5746 err
= btf_parse_hdr(env
);
5750 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
5752 err
= btf_parse_str_sec(env
);
5756 err
= btf_check_all_metas(env
);
5760 err
= btf_check_type_tags(env
, btf
, 1);
5764 /* btf_parse_vmlinux() runs under bpf_verifier_lock */
5765 bpf_ctx_convert
.t
= btf_type_by_id(btf
, bpf_ctx_convert_btf_id
[0]);
5767 bpf_struct_ops_init(btf
, log
);
5769 refcount_set(&btf
->refcnt
, 1);
5771 err
= btf_alloc_id(btf
);
5775 btf_verifier_env_free(env
);
5779 btf_verifier_env_free(env
);
5784 return ERR_PTR(err
);
5787 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
5789 static struct btf
*btf_parse_module(const char *module_name
, const void *data
, unsigned int data_size
)
5791 struct btf_verifier_env
*env
= NULL
;
5792 struct bpf_verifier_log
*log
;
5793 struct btf
*btf
= NULL
, *base_btf
;
5796 base_btf
= bpf_get_btf_vmlinux();
5797 if (IS_ERR(base_btf
))
5800 return ERR_PTR(-EINVAL
);
5802 env
= kzalloc(sizeof(*env
), GFP_KERNEL
| __GFP_NOWARN
);
5804 return ERR_PTR(-ENOMEM
);
5807 log
->level
= BPF_LOG_KERNEL
;
5809 btf
= kzalloc(sizeof(*btf
), GFP_KERNEL
| __GFP_NOWARN
);
5816 btf
->base_btf
= base_btf
;
5817 btf
->start_id
= base_btf
->nr_types
;
5818 btf
->start_str_off
= base_btf
->hdr
.str_len
;
5819 btf
->kernel_btf
= true;
5820 snprintf(btf
->name
, sizeof(btf
->name
), "%s", module_name
);
5822 btf
->data
= kvmalloc(data_size
, GFP_KERNEL
| __GFP_NOWARN
);
5827 memcpy(btf
->data
, data
, data_size
);
5828 btf
->data_size
= data_size
;
5830 err
= btf_parse_hdr(env
);
5834 btf
->nohdr_data
= btf
->data
+ btf
->hdr
.hdr_len
;
5836 err
= btf_parse_str_sec(env
);
5840 err
= btf_check_all_metas(env
);
5844 err
= btf_check_type_tags(env
, btf
, btf_nr_types(base_btf
));
5848 btf_verifier_env_free(env
);
5849 refcount_set(&btf
->refcnt
, 1);
5853 btf_verifier_env_free(env
);
5859 return ERR_PTR(err
);
5862 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
5864 struct btf
*bpf_prog_get_target_btf(const struct bpf_prog
*prog
)
5866 struct bpf_prog
*tgt_prog
= prog
->aux
->dst_prog
;
5869 return tgt_prog
->aux
->btf
;
5871 return prog
->aux
->attach_btf
;
5874 static bool is_int_ptr(struct btf
*btf
, const struct btf_type
*t
)
5876 /* skip modifiers */
5877 t
= btf_type_skip_modifiers(btf
, t
->type
, NULL
);
5879 return btf_type_is_int(t
);
5882 static u32
get_ctx_arg_idx(struct btf
*btf
, const struct btf_type
*func_proto
,
5885 const struct btf_param
*args
;
5886 const struct btf_type
*t
;
5887 u32 offset
= 0, nr_args
;
5893 nr_args
= btf_type_vlen(func_proto
);
5894 args
= (const struct btf_param
*)(func_proto
+ 1);
5895 for (i
= 0; i
< nr_args
; i
++) {
5896 t
= btf_type_skip_modifiers(btf
, args
[i
].type
, NULL
);
5897 offset
+= btf_type_is_ptr(t
) ? 8 : roundup(t
->size
, 8);
5902 t
= btf_type_skip_modifiers(btf
, func_proto
->type
, NULL
);
5903 offset
+= btf_type_is_ptr(t
) ? 8 : roundup(t
->size
, 8);
5910 static bool prog_args_trusted(const struct bpf_prog
*prog
)
5912 enum bpf_attach_type atype
= prog
->expected_attach_type
;
5914 switch (prog
->type
) {
5915 case BPF_PROG_TYPE_TRACING
:
5916 return atype
== BPF_TRACE_RAW_TP
|| atype
== BPF_TRACE_ITER
;
5917 case BPF_PROG_TYPE_LSM
:
5918 return bpf_lsm_is_trusted(prog
);
5919 case BPF_PROG_TYPE_STRUCT_OPS
:
5926 bool btf_ctx_access(int off
, int size
, enum bpf_access_type type
,
5927 const struct bpf_prog
*prog
,
5928 struct bpf_insn_access_aux
*info
)
5930 const struct btf_type
*t
= prog
->aux
->attach_func_proto
;
5931 struct bpf_prog
*tgt_prog
= prog
->aux
->dst_prog
;
5932 struct btf
*btf
= bpf_prog_get_target_btf(prog
);
5933 const char *tname
= prog
->aux
->attach_func_name
;
5934 struct bpf_verifier_log
*log
= info
->log
;
5935 const struct btf_param
*args
;
5936 const char *tag_value
;
5941 bpf_log(log
, "func '%s' offset %d is not multiple of 8\n",
5945 arg
= get_ctx_arg_idx(btf
, t
, off
);
5946 args
= (const struct btf_param
*)(t
+ 1);
5947 /* if (t == NULL) Fall back to default BPF prog with
5948 * MAX_BPF_FUNC_REG_ARGS u64 arguments.
5950 nr_args
= t
? btf_type_vlen(t
) : MAX_BPF_FUNC_REG_ARGS
;
5951 if (prog
->aux
->attach_btf_trace
) {
5952 /* skip first 'void *__data' argument in btf_trace_##name typedef */
5957 if (arg
> nr_args
) {
5958 bpf_log(log
, "func '%s' doesn't have %d-th argument\n",
5963 if (arg
== nr_args
) {
5964 switch (prog
->expected_attach_type
) {
5965 case BPF_LSM_CGROUP
:
5967 case BPF_TRACE_FEXIT
:
5968 /* When LSM programs are attached to void LSM hooks
5969 * they use FEXIT trampolines and when attached to
5970 * int LSM hooks, they use MODIFY_RETURN trampolines.
5972 * While the LSM programs are BPF_MODIFY_RETURN-like
5975 * if (ret_type != 'int')
5978 * is _not_ done here. This is still safe as LSM hooks
5979 * have only void and int return types.
5983 t
= btf_type_by_id(btf
, t
->type
);
5985 case BPF_MODIFY_RETURN
:
5986 /* For now the BPF_MODIFY_RETURN can only be attached to
5987 * functions that return an int.
5992 t
= btf_type_skip_modifiers(btf
, t
->type
, NULL
);
5993 if (!btf_type_is_small_int(t
)) {
5995 "ret type %s not allowed for fmod_ret\n",
6001 bpf_log(log
, "func '%s' doesn't have %d-th argument\n",
6007 /* Default prog with MAX_BPF_FUNC_REG_ARGS args */
6009 t
= btf_type_by_id(btf
, args
[arg
].type
);
6012 /* skip modifiers */
6013 while (btf_type_is_modifier(t
))
6014 t
= btf_type_by_id(btf
, t
->type
);
6015 if (btf_type_is_small_int(t
) || btf_is_any_enum(t
) || __btf_type_is_struct(t
))
6016 /* accessing a scalar */
6018 if (!btf_type_is_ptr(t
)) {
6020 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
6022 __btf_name_by_offset(btf
, t
->name_off
),
6027 /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */
6028 for (i
= 0; i
< prog
->aux
->ctx_arg_info_size
; i
++) {
6029 const struct bpf_ctx_arg_aux
*ctx_arg_info
= &prog
->aux
->ctx_arg_info
[i
];
6032 type
= base_type(ctx_arg_info
->reg_type
);
6033 flag
= type_flag(ctx_arg_info
->reg_type
);
6034 if (ctx_arg_info
->offset
== off
&& type
== PTR_TO_BUF
&&
6035 (flag
& PTR_MAYBE_NULL
)) {
6036 info
->reg_type
= ctx_arg_info
->reg_type
;
6042 /* This is a pointer to void.
6043 * It is the same as scalar from the verifier safety pov.
6044 * No further pointer walking is allowed.
6048 if (is_int_ptr(btf
, t
))
6051 /* this is a pointer to another type */
6052 for (i
= 0; i
< prog
->aux
->ctx_arg_info_size
; i
++) {
6053 const struct bpf_ctx_arg_aux
*ctx_arg_info
= &prog
->aux
->ctx_arg_info
[i
];
6055 if (ctx_arg_info
->offset
== off
) {
6056 if (!ctx_arg_info
->btf_id
) {
6057 bpf_log(log
,"invalid btf_id for context argument offset %u\n", off
);
6061 info
->reg_type
= ctx_arg_info
->reg_type
;
6062 info
->btf
= btf_vmlinux
;
6063 info
->btf_id
= ctx_arg_info
->btf_id
;
6068 info
->reg_type
= PTR_TO_BTF_ID
;
6069 if (prog_args_trusted(prog
))
6070 info
->reg_type
|= PTR_TRUSTED
;
6073 enum bpf_prog_type tgt_type
;
6075 if (tgt_prog
->type
== BPF_PROG_TYPE_EXT
)
6076 tgt_type
= tgt_prog
->aux
->saved_dst_prog_type
;
6078 tgt_type
= tgt_prog
->type
;
6080 ret
= btf_translate_to_vmlinux(log
, btf
, t
, tgt_type
, arg
);
6082 info
->btf
= btf_vmlinux
;
6091 info
->btf_id
= t
->type
;
6092 t
= btf_type_by_id(btf
, t
->type
);
6094 if (btf_type_is_type_tag(t
)) {
6095 tag_value
= __btf_name_by_offset(btf
, t
->name_off
);
6096 if (strcmp(tag_value
, "user") == 0)
6097 info
->reg_type
|= MEM_USER
;
6098 if (strcmp(tag_value
, "percpu") == 0)
6099 info
->reg_type
|= MEM_PERCPU
;
6102 /* skip modifiers */
6103 while (btf_type_is_modifier(t
)) {
6104 info
->btf_id
= t
->type
;
6105 t
= btf_type_by_id(btf
, t
->type
);
6107 if (!btf_type_is_struct(t
)) {
6109 "func '%s' arg%d type %s is not a struct\n",
6110 tname
, arg
, btf_type_str(t
));
6113 bpf_log(log
, "func '%s' arg%d has btf_id %d type %s '%s'\n",
6114 tname
, arg
, info
->btf_id
, btf_type_str(t
),
6115 __btf_name_by_offset(btf
, t
->name_off
));
6119 enum bpf_struct_walk_result
{
6126 static int btf_struct_walk(struct bpf_verifier_log
*log
, const struct btf
*btf
,
6127 const struct btf_type
*t
, int off
, int size
,
6128 u32
*next_btf_id
, enum bpf_type_flag
*flag
,
6129 const char **field_name
)
6131 u32 i
, moff
, mtrue_end
, msize
= 0, total_nelems
= 0;
6132 const struct btf_type
*mtype
, *elem_type
= NULL
;
6133 const struct btf_member
*member
;
6134 const char *tname
, *mname
, *tag_value
;
6135 u32 vlen
, elem_id
, mid
;
6138 if (btf_type_is_modifier(t
))
6139 t
= btf_type_skip_modifiers(btf
, t
->type
, NULL
);
6140 tname
= __btf_name_by_offset(btf
, t
->name_off
);
6141 if (!btf_type_is_struct(t
)) {
6142 bpf_log(log
, "Type '%s' is not a struct\n", tname
);
6146 vlen
= btf_type_vlen(t
);
6147 if (BTF_INFO_KIND(t
->info
) == BTF_KIND_UNION
&& vlen
!= 1 && !(*flag
& PTR_UNTRUSTED
))
6149 * walking unions yields untrusted pointers
6150 * with exception of __bpf_md_ptr and other
6151 * unions with a single member
6153 *flag
|= PTR_UNTRUSTED
;
6155 if (off
+ size
> t
->size
) {
6156 /* If the last element is a variable size array, we may
6157 * need to relax the rule.
6159 struct btf_array
*array_elem
;
6164 member
= btf_type_member(t
) + vlen
- 1;
6165 mtype
= btf_type_skip_modifiers(btf
, member
->type
,
6167 if (!btf_type_is_array(mtype
))
6170 array_elem
= (struct btf_array
*)(mtype
+ 1);
6171 if (array_elem
->nelems
!= 0)
6174 moff
= __btf_member_bit_offset(t
, member
) / 8;
6178 /* allow structure and integer */
6179 t
= btf_type_skip_modifiers(btf
, array_elem
->type
,
6182 if (btf_type_is_int(t
))
6185 if (!btf_type_is_struct(t
))
6188 off
= (off
- moff
) % t
->size
;
6192 bpf_log(log
, "access beyond struct %s at off %u size %u\n",
6197 for_each_member(i
, t
, member
) {
6198 /* offset of the field in bytes */
6199 moff
= __btf_member_bit_offset(t
, member
) / 8;
6200 if (off
+ size
<= moff
)
6201 /* won't find anything, field is already too far */
6204 if (__btf_member_bitfield_size(t
, member
)) {
6205 u32 end_bit
= __btf_member_bit_offset(t
, member
) +
6206 __btf_member_bitfield_size(t
, member
);
6208 /* off <= moff instead of off == moff because clang
6209 * does not generate a BTF member for anonymous
6210 * bitfield like the ":16" here:
6217 BITS_ROUNDUP_BYTES(end_bit
) <= off
+ size
)
6220 /* off may be accessing a following member
6224 * Doing partial access at either end of this
6225 * bitfield. Continue on this case also to
6226 * treat it as not accessing this bitfield
6227 * and eventually error out as field not
6228 * found to keep it simple.
6229 * It could be relaxed if there was a legit
6230 * partial access case later.
6235 /* In case of "off" is pointing to holes of a struct */
6239 /* type of the field */
6241 mtype
= btf_type_by_id(btf
, member
->type
);
6242 mname
= __btf_name_by_offset(btf
, member
->name_off
);
6244 mtype
= __btf_resolve_size(btf
, mtype
, &msize
,
6245 &elem_type
, &elem_id
, &total_nelems
,
6247 if (IS_ERR(mtype
)) {
6248 bpf_log(log
, "field %s doesn't have size\n", mname
);
6252 mtrue_end
= moff
+ msize
;
6253 if (off
>= mtrue_end
)
6254 /* no overlap with member, keep iterating */
6257 if (btf_type_is_array(mtype
)) {
6260 /* __btf_resolve_size() above helps to
6261 * linearize a multi-dimensional array.
6263 * The logic here is treating an array
6264 * in a struct as the following way:
6267 * struct inner array[2][2];
6273 * struct inner array_elem0;
6274 * struct inner array_elem1;
6275 * struct inner array_elem2;
6276 * struct inner array_elem3;
6279 * When accessing outer->array[1][0], it moves
6280 * moff to "array_elem2", set mtype to
6281 * "struct inner", and msize also becomes
6282 * sizeof(struct inner). Then most of the
6283 * remaining logic will fall through without
6284 * caring the current member is an array or
6287 * Unlike mtype/msize/moff, mtrue_end does not
6288 * change. The naming difference ("_true") tells
6289 * that it is not always corresponding to
6290 * the current mtype/msize/moff.
6291 * It is the true end of the current
6292 * member (i.e. array in this case). That
6293 * will allow an int array to be accessed like
6295 * i.e. allow access beyond the size of
6296 * the array's element as long as it is
6297 * within the mtrue_end boundary.
6300 /* skip empty array */
6301 if (moff
== mtrue_end
)
6304 msize
/= total_nelems
;
6305 elem_idx
= (off
- moff
) / msize
;
6306 moff
+= elem_idx
* msize
;
6311 /* the 'off' we're looking for is either equal to start
6312 * of this field or inside of this struct
6314 if (btf_type_is_struct(mtype
)) {
6315 /* our field must be inside that union or struct */
6318 /* return if the offset matches the member offset */
6324 /* adjust offset we're looking for */
6329 if (btf_type_is_ptr(mtype
)) {
6330 const struct btf_type
*stype
, *t
;
6331 enum bpf_type_flag tmp_flag
= 0;
6334 if (msize
!= size
|| off
!= moff
) {
6336 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
6337 mname
, moff
, tname
, off
, size
);
6341 /* check type tag */
6342 t
= btf_type_by_id(btf
, mtype
->type
);
6343 if (btf_type_is_type_tag(t
)) {
6344 tag_value
= __btf_name_by_offset(btf
, t
->name_off
);
6345 /* check __user tag */
6346 if (strcmp(tag_value
, "user") == 0)
6347 tmp_flag
= MEM_USER
;
6348 /* check __percpu tag */
6349 if (strcmp(tag_value
, "percpu") == 0)
6350 tmp_flag
= MEM_PERCPU
;
6351 /* check __rcu tag */
6352 if (strcmp(tag_value
, "rcu") == 0)
6356 stype
= btf_type_skip_modifiers(btf
, mtype
->type
, &id
);
6357 if (btf_type_is_struct(stype
)) {
6361 *field_name
= mname
;
6366 /* Allow more flexible access within an int as long as
6367 * it is within mtrue_end.
6368 * Since mtrue_end could be the end of an array,
6369 * that also allows using an array of int as a scratch
6370 * space. e.g. skb->cb[].
6372 if (off
+ size
> mtrue_end
&& !(*flag
& PTR_UNTRUSTED
)) {
6374 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
6375 mname
, mtrue_end
, tname
, off
, size
);
6381 bpf_log(log
, "struct %s doesn't have field at offset %d\n", tname
, off
);
6385 int btf_struct_access(struct bpf_verifier_log
*log
,
6386 const struct bpf_reg_state
*reg
,
6387 int off
, int size
, enum bpf_access_type atype __maybe_unused
,
6388 u32
*next_btf_id
, enum bpf_type_flag
*flag
,
6389 const char **field_name
)
6391 const struct btf
*btf
= reg
->btf
;
6392 enum bpf_type_flag tmp_flag
= 0;
6393 const struct btf_type
*t
;
6394 u32 id
= reg
->btf_id
;
6397 while (type_is_alloc(reg
->type
)) {
6398 struct btf_struct_meta
*meta
;
6399 struct btf_record
*rec
;
6402 meta
= btf_find_struct_meta(btf
, id
);
6406 for (i
= 0; i
< rec
->cnt
; i
++) {
6407 struct btf_field
*field
= &rec
->fields
[i
];
6408 u32 offset
= field
->offset
;
6409 if (off
< offset
+ btf_field_type_size(field
->type
) && offset
< off
+ size
) {
6411 "direct access to %s is disallowed\n",
6412 btf_field_type_name(field
->type
));
6419 t
= btf_type_by_id(btf
, id
);
6421 err
= btf_struct_walk(log
, btf
, t
, off
, size
, &id
, &tmp_flag
, field_name
);
6425 /* For local types, the destination register cannot
6426 * become a pointer again.
6428 if (type_is_alloc(reg
->type
))
6429 return SCALAR_VALUE
;
6430 /* If we found the pointer or scalar on t+off,
6435 return PTR_TO_BTF_ID
;
6437 return SCALAR_VALUE
;
6439 /* We found nested struct, so continue the search
6440 * by diving in it. At this point the offset is
6441 * aligned with the new type, so set it to 0.
6443 t
= btf_type_by_id(btf
, id
);
6447 /* It's either error or unknown return value..
6450 if (WARN_ONCE(err
> 0, "unknown btf_struct_walk return value"))
6459 /* Check that two BTF types, each specified as an BTF object + id, are exactly
6460 * the same. Trivial ID check is not enough due to module BTFs, because we can
6461 * end up with two different module BTFs, but IDs point to the common type in
6464 bool btf_types_are_same(const struct btf
*btf1
, u32 id1
,
6465 const struct btf
*btf2
, u32 id2
)
6471 return btf_type_by_id(btf1
, id1
) == btf_type_by_id(btf2
, id2
);
6474 bool btf_struct_ids_match(struct bpf_verifier_log
*log
,
6475 const struct btf
*btf
, u32 id
, int off
,
6476 const struct btf
*need_btf
, u32 need_type_id
,
6479 const struct btf_type
*type
;
6480 enum bpf_type_flag flag
= 0;
6483 /* Are we already done? */
6484 if (off
== 0 && btf_types_are_same(btf
, id
, need_btf
, need_type_id
))
6486 /* In case of strict type match, we do not walk struct, the top level
6487 * type match must succeed. When strict is true, off should have already
6493 type
= btf_type_by_id(btf
, id
);
6496 err
= btf_struct_walk(log
, btf
, type
, off
, 1, &id
, &flag
, NULL
);
6497 if (err
!= WALK_STRUCT
)
6500 /* We found nested struct object. If it matches
6501 * the requested ID, we're done. Otherwise let's
6502 * continue the search with offset 0 in the new
6505 if (!btf_types_are_same(btf
, id
, need_btf
, need_type_id
)) {
6513 static int __get_type_size(struct btf
*btf
, u32 btf_id
,
6514 const struct btf_type
**ret_type
)
6516 const struct btf_type
*t
;
6518 *ret_type
= btf_type_by_id(btf
, 0);
6522 t
= btf_type_by_id(btf
, btf_id
);
6523 while (t
&& btf_type_is_modifier(t
))
6524 t
= btf_type_by_id(btf
, t
->type
);
6528 if (btf_type_is_ptr(t
))
6529 /* kernel size of pointer. Not BPF's size of pointer*/
6530 return sizeof(void *);
6531 if (btf_type_is_int(t
) || btf_is_any_enum(t
) || __btf_type_is_struct(t
))
6536 static u8
__get_type_fmodel_flags(const struct btf_type
*t
)
6540 if (__btf_type_is_struct(t
))
6541 flags
|= BTF_FMODEL_STRUCT_ARG
;
6542 if (btf_type_is_signed_int(t
))
6543 flags
|= BTF_FMODEL_SIGNED_ARG
;
6548 int btf_distill_func_proto(struct bpf_verifier_log
*log
,
6550 const struct btf_type
*func
,
6552 struct btf_func_model
*m
)
6554 const struct btf_param
*args
;
6555 const struct btf_type
*t
;
6560 /* BTF function prototype doesn't match the verifier types.
6561 * Fall back to MAX_BPF_FUNC_REG_ARGS u64 args.
6563 for (i
= 0; i
< MAX_BPF_FUNC_REG_ARGS
; i
++) {
6565 m
->arg_flags
[i
] = 0;
6569 m
->nr_args
= MAX_BPF_FUNC_REG_ARGS
;
6572 args
= (const struct btf_param
*)(func
+ 1);
6573 nargs
= btf_type_vlen(func
);
6574 if (nargs
> MAX_BPF_FUNC_ARGS
) {
6576 "The function %s has %d arguments. Too many.\n",
6580 ret
= __get_type_size(btf
, func
->type
, &t
);
6581 if (ret
< 0 || __btf_type_is_struct(t
)) {
6583 "The function %s return type %s is unsupported.\n",
6584 tname
, btf_type_str(t
));
6588 m
->ret_flags
= __get_type_fmodel_flags(t
);
6590 for (i
= 0; i
< nargs
; i
++) {
6591 if (i
== nargs
- 1 && args
[i
].type
== 0) {
6593 "The function %s with variable args is unsupported.\n",
6597 ret
= __get_type_size(btf
, args
[i
].type
, &t
);
6599 /* No support of struct argument size greater than 16 bytes */
6600 if (ret
< 0 || ret
> 16) {
6602 "The function %s arg%d type %s is unsupported.\n",
6603 tname
, i
, btf_type_str(t
));
6608 "The function %s has malformed void argument.\n",
6612 m
->arg_size
[i
] = ret
;
6613 m
->arg_flags
[i
] = __get_type_fmodel_flags(t
);
6619 /* Compare BTFs of two functions assuming only scalars and pointers to context.
6620 * t1 points to BTF_KIND_FUNC in btf1
6621 * t2 points to BTF_KIND_FUNC in btf2
6623 * EINVAL - function prototype mismatch
6624 * EFAULT - verifier bug
6625 * 0 - 99% match. The last 1% is validated by the verifier.
6627 static int btf_check_func_type_match(struct bpf_verifier_log
*log
,
6628 struct btf
*btf1
, const struct btf_type
*t1
,
6629 struct btf
*btf2
, const struct btf_type
*t2
)
6631 const struct btf_param
*args1
, *args2
;
6632 const char *fn1
, *fn2
, *s1
, *s2
;
6633 u32 nargs1
, nargs2
, i
;
6635 fn1
= btf_name_by_offset(btf1
, t1
->name_off
);
6636 fn2
= btf_name_by_offset(btf2
, t2
->name_off
);
6638 if (btf_func_linkage(t1
) != BTF_FUNC_GLOBAL
) {
6639 bpf_log(log
, "%s() is not a global function\n", fn1
);
6642 if (btf_func_linkage(t2
) != BTF_FUNC_GLOBAL
) {
6643 bpf_log(log
, "%s() is not a global function\n", fn2
);
6647 t1
= btf_type_by_id(btf1
, t1
->type
);
6648 if (!t1
|| !btf_type_is_func_proto(t1
))
6650 t2
= btf_type_by_id(btf2
, t2
->type
);
6651 if (!t2
|| !btf_type_is_func_proto(t2
))
6654 args1
= (const struct btf_param
*)(t1
+ 1);
6655 nargs1
= btf_type_vlen(t1
);
6656 args2
= (const struct btf_param
*)(t2
+ 1);
6657 nargs2
= btf_type_vlen(t2
);
6659 if (nargs1
!= nargs2
) {
6660 bpf_log(log
, "%s() has %d args while %s() has %d args\n",
6661 fn1
, nargs1
, fn2
, nargs2
);
6665 t1
= btf_type_skip_modifiers(btf1
, t1
->type
, NULL
);
6666 t2
= btf_type_skip_modifiers(btf2
, t2
->type
, NULL
);
6667 if (t1
->info
!= t2
->info
) {
6669 "Return type %s of %s() doesn't match type %s of %s()\n",
6670 btf_type_str(t1
), fn1
,
6671 btf_type_str(t2
), fn2
);
6675 for (i
= 0; i
< nargs1
; i
++) {
6676 t1
= btf_type_skip_modifiers(btf1
, args1
[i
].type
, NULL
);
6677 t2
= btf_type_skip_modifiers(btf2
, args2
[i
].type
, NULL
);
6679 if (t1
->info
!= t2
->info
) {
6680 bpf_log(log
, "arg%d in %s() is %s while %s() has %s\n",
6681 i
, fn1
, btf_type_str(t1
),
6682 fn2
, btf_type_str(t2
));
6685 if (btf_type_has_size(t1
) && t1
->size
!= t2
->size
) {
6687 "arg%d in %s() has size %d while %s() has %d\n",
6693 /* global functions are validated with scalars and pointers
6694 * to context only. And only global functions can be replaced.
6695 * Hence type check only those types.
6697 if (btf_type_is_int(t1
) || btf_is_any_enum(t1
))
6699 if (!btf_type_is_ptr(t1
)) {
6701 "arg%d in %s() has unrecognized type\n",
6705 t1
= btf_type_skip_modifiers(btf1
, t1
->type
, NULL
);
6706 t2
= btf_type_skip_modifiers(btf2
, t2
->type
, NULL
);
6707 if (!btf_type_is_struct(t1
)) {
6709 "arg%d in %s() is not a pointer to context\n",
6713 if (!btf_type_is_struct(t2
)) {
6715 "arg%d in %s() is not a pointer to context\n",
6719 /* This is an optional check to make program writing easier.
6720 * Compare names of structs and report an error to the user.
6721 * btf_prepare_func_args() already checked that t2 struct
6722 * is a context type. btf_prepare_func_args() will check
6723 * later that t1 struct is a context type as well.
6725 s1
= btf_name_by_offset(btf1
, t1
->name_off
);
6726 s2
= btf_name_by_offset(btf2
, t2
->name_off
);
6727 if (strcmp(s1
, s2
)) {
6729 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
6730 i
, fn1
, s1
, fn2
, s2
);
6737 /* Compare BTFs of given program with BTF of target program */
6738 int btf_check_type_match(struct bpf_verifier_log
*log
, const struct bpf_prog
*prog
,
6739 struct btf
*btf2
, const struct btf_type
*t2
)
6741 struct btf
*btf1
= prog
->aux
->btf
;
6742 const struct btf_type
*t1
;
6745 if (!prog
->aux
->func_info
) {
6746 bpf_log(log
, "Program extension requires BTF\n");
6750 btf_id
= prog
->aux
->func_info
[0].type_id
;
6754 t1
= btf_type_by_id(btf1
, btf_id
);
6755 if (!t1
|| !btf_type_is_func(t1
))
6758 return btf_check_func_type_match(log
, btf1
, t1
, btf2
, t2
);
6761 static int btf_check_func_arg_match(struct bpf_verifier_env
*env
,
6762 const struct btf
*btf
, u32 func_id
,
6763 struct bpf_reg_state
*regs
,
6765 bool processing_call
)
6767 enum bpf_prog_type prog_type
= resolve_prog_type(env
->prog
);
6768 struct bpf_verifier_log
*log
= &env
->log
;
6769 const char *func_name
, *ref_tname
;
6770 const struct btf_type
*t
, *ref_t
;
6771 const struct btf_param
*args
;
6772 u32 i
, nargs
, ref_id
;
6775 t
= btf_type_by_id(btf
, func_id
);
6776 if (!t
|| !btf_type_is_func(t
)) {
6777 /* These checks were already done by the verifier while loading
6778 * struct bpf_func_info or in add_kfunc_call().
6780 bpf_log(log
, "BTF of func_id %u doesn't point to KIND_FUNC\n",
6784 func_name
= btf_name_by_offset(btf
, t
->name_off
);
6786 t
= btf_type_by_id(btf
, t
->type
);
6787 if (!t
|| !btf_type_is_func_proto(t
)) {
6788 bpf_log(log
, "Invalid BTF of func %s\n", func_name
);
6791 args
= (const struct btf_param
*)(t
+ 1);
6792 nargs
= btf_type_vlen(t
);
6793 if (nargs
> MAX_BPF_FUNC_REG_ARGS
) {
6794 bpf_log(log
, "Function %s has %d > %d args\n", func_name
, nargs
,
6795 MAX_BPF_FUNC_REG_ARGS
);
6799 /* check that BTF function arguments match actual types that the
6802 for (i
= 0; i
< nargs
; i
++) {
6803 enum bpf_arg_type arg_type
= ARG_DONTCARE
;
6805 struct bpf_reg_state
*reg
= ®s
[regno
];
6807 t
= btf_type_skip_modifiers(btf
, args
[i
].type
, NULL
);
6808 if (btf_type_is_scalar(t
)) {
6809 if (reg
->type
== SCALAR_VALUE
)
6811 bpf_log(log
, "R%d is not a scalar\n", regno
);
6815 if (!btf_type_is_ptr(t
)) {
6816 bpf_log(log
, "Unrecognized arg#%d type %s\n",
6817 i
, btf_type_str(t
));
6821 ref_t
= btf_type_skip_modifiers(btf
, t
->type
, &ref_id
);
6822 ref_tname
= btf_name_by_offset(btf
, ref_t
->name_off
);
6824 ret
= check_func_arg_reg_off(env
, reg
, regno
, arg_type
);
6828 if (btf_get_prog_ctx_type(log
, btf
, t
, prog_type
, i
)) {
6829 /* If function expects ctx type in BTF check that caller
6830 * is passing PTR_TO_CTX.
6832 if (reg
->type
!= PTR_TO_CTX
) {
6834 "arg#%d expected pointer to ctx, but got %s\n",
6835 i
, btf_type_str(t
));
6838 } else if (ptr_to_mem_ok
&& processing_call
) {
6839 const struct btf_type
*resolve_ret
;
6842 resolve_ret
= btf_resolve_size(btf
, ref_t
, &type_size
);
6843 if (IS_ERR(resolve_ret
)) {
6845 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
6846 i
, btf_type_str(ref_t
), ref_tname
,
6847 PTR_ERR(resolve_ret
));
6851 if (check_mem_reg(env
, reg
, regno
, type_size
))
6854 bpf_log(log
, "reg type unsupported for arg#%d function %s#%d\n", i
,
6855 func_name
, func_id
);
6863 /* Compare BTF of a function declaration with given bpf_reg_state.
6865 * EFAULT - there is a verifier bug. Abort verification.
6866 * EINVAL - there is a type mismatch or BTF is not available.
6867 * 0 - BTF matches with what bpf_reg_state expects.
6868 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6870 int btf_check_subprog_arg_match(struct bpf_verifier_env
*env
, int subprog
,
6871 struct bpf_reg_state
*regs
)
6873 struct bpf_prog
*prog
= env
->prog
;
6874 struct btf
*btf
= prog
->aux
->btf
;
6879 if (!prog
->aux
->func_info
)
6882 btf_id
= prog
->aux
->func_info
[subprog
].type_id
;
6886 if (prog
->aux
->func_info_aux
[subprog
].unreliable
)
6889 is_global
= prog
->aux
->func_info_aux
[subprog
].linkage
== BTF_FUNC_GLOBAL
;
6890 err
= btf_check_func_arg_match(env
, btf
, btf_id
, regs
, is_global
, false);
6892 /* Compiler optimizations can remove arguments from static functions
6893 * or mismatched type can be passed into a global function.
6894 * In such cases mark the function as unreliable from BTF point of view.
6897 prog
->aux
->func_info_aux
[subprog
].unreliable
= true;
6901 /* Compare BTF of a function call with given bpf_reg_state.
6903 * EFAULT - there is a verifier bug. Abort verification.
6904 * EINVAL - there is a type mismatch or BTF is not available.
6905 * 0 - BTF matches with what bpf_reg_state expects.
6906 * Only PTR_TO_CTX and SCALAR_VALUE states are recognized.
6908 * NOTE: the code is duplicated from btf_check_subprog_arg_match()
6909 * because btf_check_func_arg_match() is still doing both. Once that
6910 * function is split in 2, we can call from here btf_check_subprog_arg_match()
6911 * first, and then treat the calling part in a new code path.
6913 int btf_check_subprog_call(struct bpf_verifier_env
*env
, int subprog
,
6914 struct bpf_reg_state
*regs
)
6916 struct bpf_prog
*prog
= env
->prog
;
6917 struct btf
*btf
= prog
->aux
->btf
;
6922 if (!prog
->aux
->func_info
)
6925 btf_id
= prog
->aux
->func_info
[subprog
].type_id
;
6929 if (prog
->aux
->func_info_aux
[subprog
].unreliable
)
6932 is_global
= prog
->aux
->func_info_aux
[subprog
].linkage
== BTF_FUNC_GLOBAL
;
6933 err
= btf_check_func_arg_match(env
, btf
, btf_id
, regs
, is_global
, true);
6935 /* Compiler optimizations can remove arguments from static functions
6936 * or mismatched type can be passed into a global function.
6937 * In such cases mark the function as unreliable from BTF point of view.
6940 prog
->aux
->func_info_aux
[subprog
].unreliable
= true;
6944 /* Convert BTF of a function into bpf_reg_state if possible
6946 * EFAULT - there is a verifier bug. Abort verification.
6947 * EINVAL - cannot convert BTF.
6948 * 0 - Successfully converted BTF into bpf_reg_state
6949 * (either PTR_TO_CTX or SCALAR_VALUE).
6951 int btf_prepare_func_args(struct bpf_verifier_env
*env
, int subprog
,
6952 struct bpf_reg_state
*regs
)
6954 struct bpf_verifier_log
*log
= &env
->log
;
6955 struct bpf_prog
*prog
= env
->prog
;
6956 enum bpf_prog_type prog_type
= prog
->type
;
6957 struct btf
*btf
= prog
->aux
->btf
;
6958 const struct btf_param
*args
;
6959 const struct btf_type
*t
, *ref_t
;
6960 u32 i
, nargs
, btf_id
;
6963 if (!prog
->aux
->func_info
||
6964 prog
->aux
->func_info_aux
[subprog
].linkage
!= BTF_FUNC_GLOBAL
) {
6965 bpf_log(log
, "Verifier bug\n");
6969 btf_id
= prog
->aux
->func_info
[subprog
].type_id
;
6971 bpf_log(log
, "Global functions need valid BTF\n");
6975 t
= btf_type_by_id(btf
, btf_id
);
6976 if (!t
|| !btf_type_is_func(t
)) {
6977 /* These checks were already done by the verifier while loading
6978 * struct bpf_func_info
6980 bpf_log(log
, "BTF of func#%d doesn't point to KIND_FUNC\n",
6984 tname
= btf_name_by_offset(btf
, t
->name_off
);
6986 if (log
->level
& BPF_LOG_LEVEL
)
6987 bpf_log(log
, "Validating %s() func#%d...\n",
6990 if (prog
->aux
->func_info_aux
[subprog
].unreliable
) {
6991 bpf_log(log
, "Verifier bug in function %s()\n", tname
);
6994 if (prog_type
== BPF_PROG_TYPE_EXT
)
6995 prog_type
= prog
->aux
->dst_prog
->type
;
6997 t
= btf_type_by_id(btf
, t
->type
);
6998 if (!t
|| !btf_type_is_func_proto(t
)) {
6999 bpf_log(log
, "Invalid type of function %s()\n", tname
);
7002 args
= (const struct btf_param
*)(t
+ 1);
7003 nargs
= btf_type_vlen(t
);
7004 if (nargs
> MAX_BPF_FUNC_REG_ARGS
) {
7005 bpf_log(log
, "Global function %s() with %d > %d args. Buggy compiler.\n",
7006 tname
, nargs
, MAX_BPF_FUNC_REG_ARGS
);
7009 /* check that function returns int */
7010 t
= btf_type_by_id(btf
, t
->type
);
7011 while (btf_type_is_modifier(t
))
7012 t
= btf_type_by_id(btf
, t
->type
);
7013 if (!btf_type_is_int(t
) && !btf_is_any_enum(t
)) {
7015 "Global function %s() doesn't return scalar. Only those are supported.\n",
7019 /* Convert BTF function arguments into verifier types.
7020 * Only PTR_TO_CTX and SCALAR are supported atm.
7022 for (i
= 0; i
< nargs
; i
++) {
7023 struct bpf_reg_state
*reg
= ®s
[i
+ 1];
7025 t
= btf_type_by_id(btf
, args
[i
].type
);
7026 while (btf_type_is_modifier(t
))
7027 t
= btf_type_by_id(btf
, t
->type
);
7028 if (btf_type_is_int(t
) || btf_is_any_enum(t
)) {
7029 reg
->type
= SCALAR_VALUE
;
7032 if (btf_type_is_ptr(t
)) {
7033 if (btf_get_prog_ctx_type(log
, btf
, t
, prog_type
, i
)) {
7034 reg
->type
= PTR_TO_CTX
;
7038 t
= btf_type_skip_modifiers(btf
, t
->type
, NULL
);
7040 ref_t
= btf_resolve_size(btf
, t
, ®
->mem_size
);
7041 if (IS_ERR(ref_t
)) {
7043 "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
7044 i
, btf_type_str(t
), btf_name_by_offset(btf
, t
->name_off
),
7049 reg
->type
= PTR_TO_MEM
| PTR_MAYBE_NULL
;
7050 reg
->id
= ++env
->id_gen
;
7054 bpf_log(log
, "Arg#%d type %s in %s() is not supported yet.\n",
7055 i
, btf_type_str(t
), tname
);
7061 static void btf_type_show(const struct btf
*btf
, u32 type_id
, void *obj
,
7062 struct btf_show
*show
)
7064 const struct btf_type
*t
= btf_type_by_id(btf
, type_id
);
7067 memset(&show
->state
, 0, sizeof(show
->state
));
7068 memset(&show
->obj
, 0, sizeof(show
->obj
));
7070 btf_type_ops(t
)->show(btf
, t
, type_id
, obj
, 0, show
);
7073 static void btf_seq_show(struct btf_show
*show
, const char *fmt
,
7076 seq_vprintf((struct seq_file
*)show
->target
, fmt
, args
);
7079 int btf_type_seq_show_flags(const struct btf
*btf
, u32 type_id
,
7080 void *obj
, struct seq_file
*m
, u64 flags
)
7082 struct btf_show sseq
;
7085 sseq
.showfn
= btf_seq_show
;
7088 btf_type_show(btf
, type_id
, obj
, &sseq
);
7090 return sseq
.state
.status
;
7093 void btf_type_seq_show(const struct btf
*btf
, u32 type_id
, void *obj
,
7096 (void) btf_type_seq_show_flags(btf
, type_id
, obj
, m
,
7097 BTF_SHOW_NONAME
| BTF_SHOW_COMPACT
|
7098 BTF_SHOW_ZERO
| BTF_SHOW_UNSAFE
);
7101 struct btf_show_snprintf
{
7102 struct btf_show show
;
7103 int len_left
; /* space left in string */
7104 int len
; /* length we would have written */
7107 static void btf_snprintf_show(struct btf_show
*show
, const char *fmt
,
7110 struct btf_show_snprintf
*ssnprintf
= (struct btf_show_snprintf
*)show
;
7113 len
= vsnprintf(show
->target
, ssnprintf
->len_left
, fmt
, args
);
7116 ssnprintf
->len_left
= 0;
7117 ssnprintf
->len
= len
;
7118 } else if (len
>= ssnprintf
->len_left
) {
7119 /* no space, drive on to get length we would have written */
7120 ssnprintf
->len_left
= 0;
7121 ssnprintf
->len
+= len
;
7123 ssnprintf
->len_left
-= len
;
7124 ssnprintf
->len
+= len
;
7125 show
->target
+= len
;
7129 int btf_type_snprintf_show(const struct btf
*btf
, u32 type_id
, void *obj
,
7130 char *buf
, int len
, u64 flags
)
7132 struct btf_show_snprintf ssnprintf
;
7134 ssnprintf
.show
.target
= buf
;
7135 ssnprintf
.show
.flags
= flags
;
7136 ssnprintf
.show
.showfn
= btf_snprintf_show
;
7137 ssnprintf
.len_left
= len
;
7140 btf_type_show(btf
, type_id
, obj
, (struct btf_show
*)&ssnprintf
);
7142 /* If we encountered an error, return it. */
7143 if (ssnprintf
.show
.state
.status
)
7144 return ssnprintf
.show
.state
.status
;
7146 /* Otherwise return length we would have written */
7147 return ssnprintf
.len
;
7150 #ifdef CONFIG_PROC_FS
7151 static void bpf_btf_show_fdinfo(struct seq_file
*m
, struct file
*filp
)
7153 const struct btf
*btf
= filp
->private_data
;
7155 seq_printf(m
, "btf_id:\t%u\n", btf
->id
);
7159 static int btf_release(struct inode
*inode
, struct file
*filp
)
7161 btf_put(filp
->private_data
);
7165 const struct file_operations btf_fops
= {
7166 #ifdef CONFIG_PROC_FS
7167 .show_fdinfo
= bpf_btf_show_fdinfo
,
7169 .release
= btf_release
,
7172 static int __btf_new_fd(struct btf
*btf
)
7174 return anon_inode_getfd("btf", &btf_fops
, btf
, O_RDONLY
| O_CLOEXEC
);
7177 int btf_new_fd(const union bpf_attr
*attr
, bpfptr_t uattr
, u32 uattr_size
)
7182 btf
= btf_parse(attr
, uattr
, uattr_size
);
7184 return PTR_ERR(btf
);
7186 ret
= btf_alloc_id(btf
);
7193 * The BTF ID is published to the userspace.
7194 * All BTF free must go through call_rcu() from
7195 * now on (i.e. free by calling btf_put()).
7198 ret
= __btf_new_fd(btf
);
7205 struct btf
*btf_get_by_fd(int fd
)
7213 return ERR_PTR(-EBADF
);
7215 if (f
.file
->f_op
!= &btf_fops
) {
7217 return ERR_PTR(-EINVAL
);
7220 btf
= f
.file
->private_data
;
7221 refcount_inc(&btf
->refcnt
);
7227 int btf_get_info_by_fd(const struct btf
*btf
,
7228 const union bpf_attr
*attr
,
7229 union bpf_attr __user
*uattr
)
7231 struct bpf_btf_info __user
*uinfo
;
7232 struct bpf_btf_info info
;
7233 u32 info_copy
, btf_copy
;
7236 u32 uinfo_len
, uname_len
, name_len
;
7239 uinfo
= u64_to_user_ptr(attr
->info
.info
);
7240 uinfo_len
= attr
->info
.info_len
;
7242 info_copy
= min_t(u32
, uinfo_len
, sizeof(info
));
7243 memset(&info
, 0, sizeof(info
));
7244 if (copy_from_user(&info
, uinfo
, info_copy
))
7248 ubtf
= u64_to_user_ptr(info
.btf
);
7249 btf_copy
= min_t(u32
, btf
->data_size
, info
.btf_size
);
7250 if (copy_to_user(ubtf
, btf
->data
, btf_copy
))
7252 info
.btf_size
= btf
->data_size
;
7254 info
.kernel_btf
= btf
->kernel_btf
;
7256 uname
= u64_to_user_ptr(info
.name
);
7257 uname_len
= info
.name_len
;
7258 if (!uname
^ !uname_len
)
7261 name_len
= strlen(btf
->name
);
7262 info
.name_len
= name_len
;
7265 if (uname_len
>= name_len
+ 1) {
7266 if (copy_to_user(uname
, btf
->name
, name_len
+ 1))
7271 if (copy_to_user(uname
, btf
->name
, uname_len
- 1))
7273 if (put_user(zero
, uname
+ uname_len
- 1))
7275 /* let user-space know about too short buffer */
7280 if (copy_to_user(uinfo
, &info
, info_copy
) ||
7281 put_user(info_copy
, &uattr
->info
.info_len
))
7287 int btf_get_fd_by_id(u32 id
)
7293 btf
= idr_find(&btf_idr
, id
);
7294 if (!btf
|| !refcount_inc_not_zero(&btf
->refcnt
))
7295 btf
= ERR_PTR(-ENOENT
);
7299 return PTR_ERR(btf
);
7301 fd
= __btf_new_fd(btf
);
7308 u32
btf_obj_id(const struct btf
*btf
)
7313 bool btf_is_kernel(const struct btf
*btf
)
7315 return btf
->kernel_btf
;
7318 bool btf_is_module(const struct btf
*btf
)
7320 return btf
->kernel_btf
&& strcmp(btf
->name
, "vmlinux") != 0;
7324 BTF_MODULE_F_LIVE
= (1 << 0),
7327 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7329 struct list_head list
;
7330 struct module
*module
;
7332 struct bin_attribute
*sysfs_attr
;
7336 static LIST_HEAD(btf_modules
);
7337 static DEFINE_MUTEX(btf_module_mutex
);
7340 btf_module_read(struct file
*file
, struct kobject
*kobj
,
7341 struct bin_attribute
*bin_attr
,
7342 char *buf
, loff_t off
, size_t len
)
7344 const struct btf
*btf
= bin_attr
->private;
7346 memcpy(buf
, btf
->data
+ off
, len
);
7350 static void purge_cand_cache(struct btf
*btf
);
7352 static int btf_module_notify(struct notifier_block
*nb
, unsigned long op
,
7355 struct btf_module
*btf_mod
, *tmp
;
7356 struct module
*mod
= module
;
7360 if (mod
->btf_data_size
== 0 ||
7361 (op
!= MODULE_STATE_COMING
&& op
!= MODULE_STATE_LIVE
&&
7362 op
!= MODULE_STATE_GOING
))
7366 case MODULE_STATE_COMING
:
7367 btf_mod
= kzalloc(sizeof(*btf_mod
), GFP_KERNEL
);
7372 btf
= btf_parse_module(mod
->name
, mod
->btf_data
, mod
->btf_data_size
);
7375 if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH
)) {
7376 pr_warn("failed to validate module [%s] BTF: %ld\n",
7377 mod
->name
, PTR_ERR(btf
));
7380 pr_warn_once("Kernel module BTF mismatch detected, BTF debug info may be unavailable for some modules\n");
7384 err
= btf_alloc_id(btf
);
7391 purge_cand_cache(NULL
);
7392 mutex_lock(&btf_module_mutex
);
7393 btf_mod
->module
= module
;
7395 list_add(&btf_mod
->list
, &btf_modules
);
7396 mutex_unlock(&btf_module_mutex
);
7398 if (IS_ENABLED(CONFIG_SYSFS
)) {
7399 struct bin_attribute
*attr
;
7401 attr
= kzalloc(sizeof(*attr
), GFP_KERNEL
);
7405 sysfs_bin_attr_init(attr
);
7406 attr
->attr
.name
= btf
->name
;
7407 attr
->attr
.mode
= 0444;
7408 attr
->size
= btf
->data_size
;
7409 attr
->private = btf
;
7410 attr
->read
= btf_module_read
;
7412 err
= sysfs_create_bin_file(btf_kobj
, attr
);
7414 pr_warn("failed to register module [%s] BTF in sysfs: %d\n",
7421 btf_mod
->sysfs_attr
= attr
;
7425 case MODULE_STATE_LIVE
:
7426 mutex_lock(&btf_module_mutex
);
7427 list_for_each_entry_safe(btf_mod
, tmp
, &btf_modules
, list
) {
7428 if (btf_mod
->module
!= module
)
7431 btf_mod
->flags
|= BTF_MODULE_F_LIVE
;
7434 mutex_unlock(&btf_module_mutex
);
7436 case MODULE_STATE_GOING
:
7437 mutex_lock(&btf_module_mutex
);
7438 list_for_each_entry_safe(btf_mod
, tmp
, &btf_modules
, list
) {
7439 if (btf_mod
->module
!= module
)
7442 list_del(&btf_mod
->list
);
7443 if (btf_mod
->sysfs_attr
)
7444 sysfs_remove_bin_file(btf_kobj
, btf_mod
->sysfs_attr
);
7445 purge_cand_cache(btf_mod
->btf
);
7446 btf_put(btf_mod
->btf
);
7447 kfree(btf_mod
->sysfs_attr
);
7451 mutex_unlock(&btf_module_mutex
);
7455 return notifier_from_errno(err
);
7458 static struct notifier_block btf_module_nb
= {
7459 .notifier_call
= btf_module_notify
,
7462 static int __init
btf_module_init(void)
7464 register_module_notifier(&btf_module_nb
);
7468 fs_initcall(btf_module_init
);
7469 #endif /* CONFIG_DEBUG_INFO_BTF_MODULES */
7471 struct module
*btf_try_get_module(const struct btf
*btf
)
7473 struct module
*res
= NULL
;
7474 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7475 struct btf_module
*btf_mod
, *tmp
;
7477 mutex_lock(&btf_module_mutex
);
7478 list_for_each_entry_safe(btf_mod
, tmp
, &btf_modules
, list
) {
7479 if (btf_mod
->btf
!= btf
)
7482 /* We must only consider module whose __init routine has
7483 * finished, hence we must check for BTF_MODULE_F_LIVE flag,
7484 * which is set from the notifier callback for
7485 * MODULE_STATE_LIVE.
7487 if ((btf_mod
->flags
& BTF_MODULE_F_LIVE
) && try_module_get(btf_mod
->module
))
7488 res
= btf_mod
->module
;
7492 mutex_unlock(&btf_module_mutex
);
7498 /* Returns struct btf corresponding to the struct module.
7499 * This function can return NULL or ERR_PTR.
7501 static struct btf
*btf_get_module_btf(const struct module
*module
)
7503 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7504 struct btf_module
*btf_mod
, *tmp
;
7506 struct btf
*btf
= NULL
;
7509 btf
= bpf_get_btf_vmlinux();
7510 if (!IS_ERR_OR_NULL(btf
))
7515 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
7516 mutex_lock(&btf_module_mutex
);
7517 list_for_each_entry_safe(btf_mod
, tmp
, &btf_modules
, list
) {
7518 if (btf_mod
->module
!= module
)
7521 btf_get(btf_mod
->btf
);
7525 mutex_unlock(&btf_module_mutex
);
7531 BPF_CALL_4(bpf_btf_find_by_name_kind
, char *, name
, int, name_sz
, u32
, kind
, int, flags
)
7533 struct btf
*btf
= NULL
;
7540 if (name_sz
<= 1 || name
[name_sz
- 1])
7543 ret
= bpf_find_btf_id(name
, kind
, &btf
);
7544 if (ret
> 0 && btf_is_module(btf
)) {
7545 btf_obj_fd
= __btf_new_fd(btf
);
7546 if (btf_obj_fd
< 0) {
7550 return ret
| (((u64
)btf_obj_fd
) << 32);
7557 const struct bpf_func_proto bpf_btf_find_by_name_kind_proto
= {
7558 .func
= bpf_btf_find_by_name_kind
,
7560 .ret_type
= RET_INTEGER
,
7561 .arg1_type
= ARG_PTR_TO_MEM
| MEM_RDONLY
,
7562 .arg2_type
= ARG_CONST_SIZE
,
7563 .arg3_type
= ARG_ANYTHING
,
7564 .arg4_type
= ARG_ANYTHING
,
7567 BTF_ID_LIST_GLOBAL(btf_tracing_ids
, MAX_BTF_TRACING_TYPE
)
7568 #define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
7569 BTF_TRACING_TYPE_xxx
7570 #undef BTF_TRACING_TYPE
7572 static int btf_check_iter_kfuncs(struct btf
*btf
, const char *func_name
,
7573 const struct btf_type
*func
, u32 func_flags
)
7575 u32 flags
= func_flags
& (KF_ITER_NEW
| KF_ITER_NEXT
| KF_ITER_DESTROY
);
7576 const char *name
, *sfx
, *iter_name
;
7577 const struct btf_param
*arg
;
7578 const struct btf_type
*t
;
7582 /* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
7583 if (!flags
|| (flags
& (flags
- 1)))
7586 /* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
7587 nr_args
= btf_type_vlen(func
);
7591 arg
= &btf_params(func
)[0];
7592 t
= btf_type_skip_modifiers(btf
, arg
->type
, NULL
);
7593 if (!t
|| !btf_type_is_ptr(t
))
7595 t
= btf_type_skip_modifiers(btf
, t
->type
, NULL
);
7596 if (!t
|| !__btf_type_is_struct(t
))
7599 name
= btf_name_by_offset(btf
, t
->name_off
);
7600 if (!name
|| strncmp(name
, ITER_PREFIX
, sizeof(ITER_PREFIX
) - 1))
7603 /* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
7604 * fit nicely in stack slots
7606 if (t
->size
== 0 || (t
->size
% 8))
7609 /* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
7612 iter_name
= name
+ sizeof(ITER_PREFIX
) - 1;
7613 if (flags
& KF_ITER_NEW
)
7615 else if (flags
& KF_ITER_NEXT
)
7617 else /* (flags & KF_ITER_DESTROY) */
7620 snprintf(exp_name
, sizeof(exp_name
), "bpf_iter_%s_%s", iter_name
, sfx
);
7621 if (strcmp(func_name
, exp_name
))
7624 /* only iter constructor should have extra arguments */
7625 if (!(flags
& KF_ITER_NEW
) && nr_args
!= 1)
7628 if (flags
& KF_ITER_NEXT
) {
7629 /* bpf_iter_<type>_next() should return pointer */
7630 t
= btf_type_skip_modifiers(btf
, func
->type
, NULL
);
7631 if (!t
|| !btf_type_is_ptr(t
))
7635 if (flags
& KF_ITER_DESTROY
) {
7636 /* bpf_iter_<type>_destroy() should return void */
7637 t
= btf_type_by_id(btf
, func
->type
);
7638 if (!t
|| !btf_type_is_void(t
))
7645 static int btf_check_kfunc_protos(struct btf
*btf
, u32 func_id
, u32 func_flags
)
7647 const struct btf_type
*func
;
7648 const char *func_name
;
7651 /* any kfunc should be FUNC -> FUNC_PROTO */
7652 func
= btf_type_by_id(btf
, func_id
);
7653 if (!func
|| !btf_type_is_func(func
))
7656 /* sanity check kfunc name */
7657 func_name
= btf_name_by_offset(btf
, func
->name_off
);
7658 if (!func_name
|| !func_name
[0])
7661 func
= btf_type_by_id(btf
, func
->type
);
7662 if (!func
|| !btf_type_is_func_proto(func
))
7665 if (func_flags
& (KF_ITER_NEW
| KF_ITER_NEXT
| KF_ITER_DESTROY
)) {
7666 err
= btf_check_iter_kfuncs(btf
, func_name
, func
, func_flags
);
7674 /* Kernel Function (kfunc) BTF ID set registration API */
7676 static int btf_populate_kfunc_set(struct btf
*btf
, enum btf_kfunc_hook hook
,
7677 const struct btf_kfunc_id_set
*kset
)
7679 struct btf_kfunc_hook_filter
*hook_filter
;
7680 struct btf_id_set8
*add_set
= kset
->set
;
7681 bool vmlinux_set
= !btf_is_module(btf
);
7682 bool add_filter
= !!kset
->filter
;
7683 struct btf_kfunc_set_tab
*tab
;
7684 struct btf_id_set8
*set
;
7688 if (hook
>= BTF_KFUNC_HOOK_MAX
) {
7696 tab
= btf
->kfunc_set_tab
;
7698 if (tab
&& add_filter
) {
7701 hook_filter
= &tab
->hook_filters
[hook
];
7702 for (i
= 0; i
< hook_filter
->nr_filters
; i
++) {
7703 if (hook_filter
->filters
[i
] == kset
->filter
) {
7709 if (add_filter
&& hook_filter
->nr_filters
== BTF_KFUNC_FILTER_MAX_CNT
) {
7716 tab
= kzalloc(sizeof(*tab
), GFP_KERNEL
| __GFP_NOWARN
);
7719 btf
->kfunc_set_tab
= tab
;
7722 set
= tab
->sets
[hook
];
7723 /* Warn when register_btf_kfunc_id_set is called twice for the same hook
7726 if (WARN_ON_ONCE(set
&& !vmlinux_set
)) {
7731 /* We don't need to allocate, concatenate, and sort module sets, because
7732 * only one is allowed per hook. Hence, we can directly assign the
7733 * pointer and return.
7736 tab
->sets
[hook
] = add_set
;
7740 /* In case of vmlinux sets, there may be more than one set being
7741 * registered per hook. To create a unified set, we allocate a new set
7742 * and concatenate all individual sets being registered. While each set
7743 * is individually sorted, they may become unsorted when concatenated,
7744 * hence re-sorting the final set again is required to make binary
7745 * searching the set using btf_id_set8_contains function work.
7747 set_cnt
= set
? set
->cnt
: 0;
7749 if (set_cnt
> U32_MAX
- add_set
->cnt
) {
7754 if (set_cnt
+ add_set
->cnt
> BTF_KFUNC_SET_MAX_CNT
) {
7760 set
= krealloc(tab
->sets
[hook
],
7761 offsetof(struct btf_id_set8
, pairs
[set_cnt
+ add_set
->cnt
]),
7762 GFP_KERNEL
| __GFP_NOWARN
);
7768 /* For newly allocated set, initialize set->cnt to 0 */
7769 if (!tab
->sets
[hook
])
7771 tab
->sets
[hook
] = set
;
7773 /* Concatenate the two sets */
7774 memcpy(set
->pairs
+ set
->cnt
, add_set
->pairs
, add_set
->cnt
* sizeof(set
->pairs
[0]));
7775 set
->cnt
+= add_set
->cnt
;
7777 sort(set
->pairs
, set
->cnt
, sizeof(set
->pairs
[0]), btf_id_cmp_func
, NULL
);
7781 hook_filter
= &tab
->hook_filters
[hook
];
7782 hook_filter
->filters
[hook_filter
->nr_filters
++] = kset
->filter
;
7786 btf_free_kfunc_set_tab(btf
);
7790 static u32
*__btf_kfunc_id_set_contains(const struct btf
*btf
,
7791 enum btf_kfunc_hook hook
,
7793 const struct bpf_prog
*prog
)
7795 struct btf_kfunc_hook_filter
*hook_filter
;
7796 struct btf_id_set8
*set
;
7799 if (hook
>= BTF_KFUNC_HOOK_MAX
)
7801 if (!btf
->kfunc_set_tab
)
7803 hook_filter
= &btf
->kfunc_set_tab
->hook_filters
[hook
];
7804 for (i
= 0; i
< hook_filter
->nr_filters
; i
++) {
7805 if (hook_filter
->filters
[i
](prog
, kfunc_btf_id
))
7808 set
= btf
->kfunc_set_tab
->sets
[hook
];
7811 id
= btf_id_set8_contains(set
, kfunc_btf_id
);
7814 /* The flags for BTF ID are located next to it */
7818 static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type
)
7820 switch (prog_type
) {
7821 case BPF_PROG_TYPE_UNSPEC
:
7822 return BTF_KFUNC_HOOK_COMMON
;
7823 case BPF_PROG_TYPE_XDP
:
7824 return BTF_KFUNC_HOOK_XDP
;
7825 case BPF_PROG_TYPE_SCHED_CLS
:
7826 return BTF_KFUNC_HOOK_TC
;
7827 case BPF_PROG_TYPE_STRUCT_OPS
:
7828 return BTF_KFUNC_HOOK_STRUCT_OPS
;
7829 case BPF_PROG_TYPE_TRACING
:
7830 case BPF_PROG_TYPE_LSM
:
7831 return BTF_KFUNC_HOOK_TRACING
;
7832 case BPF_PROG_TYPE_SYSCALL
:
7833 return BTF_KFUNC_HOOK_SYSCALL
;
7834 case BPF_PROG_TYPE_CGROUP_SKB
:
7835 return BTF_KFUNC_HOOK_CGROUP_SKB
;
7836 case BPF_PROG_TYPE_SCHED_ACT
:
7837 return BTF_KFUNC_HOOK_SCHED_ACT
;
7838 case BPF_PROG_TYPE_SK_SKB
:
7839 return BTF_KFUNC_HOOK_SK_SKB
;
7840 case BPF_PROG_TYPE_SOCKET_FILTER
:
7841 return BTF_KFUNC_HOOK_SOCKET_FILTER
;
7842 case BPF_PROG_TYPE_LWT_OUT
:
7843 case BPF_PROG_TYPE_LWT_IN
:
7844 case BPF_PROG_TYPE_LWT_XMIT
:
7845 case BPF_PROG_TYPE_LWT_SEG6LOCAL
:
7846 return BTF_KFUNC_HOOK_LWT
;
7847 case BPF_PROG_TYPE_NETFILTER
:
7848 return BTF_KFUNC_HOOK_NETFILTER
;
7850 return BTF_KFUNC_HOOK_MAX
;
7855 * Reference to the module (obtained using btf_try_get_module) corresponding to
7856 * the struct btf *MUST* be held when calling this function from verifier
7857 * context. This is usually true as we stash references in prog's kfunc_btf_tab;
7858 * keeping the reference for the duration of the call provides the necessary
7859 * protection for looking up a well-formed btf->kfunc_set_tab.
7861 u32
*btf_kfunc_id_set_contains(const struct btf
*btf
,
7863 const struct bpf_prog
*prog
)
7865 enum bpf_prog_type prog_type
= resolve_prog_type(prog
);
7866 enum btf_kfunc_hook hook
;
7869 kfunc_flags
= __btf_kfunc_id_set_contains(btf
, BTF_KFUNC_HOOK_COMMON
, kfunc_btf_id
, prog
);
7873 hook
= bpf_prog_type_to_kfunc_hook(prog_type
);
7874 return __btf_kfunc_id_set_contains(btf
, hook
, kfunc_btf_id
, prog
);
7877 u32
*btf_kfunc_is_modify_return(const struct btf
*btf
, u32 kfunc_btf_id
,
7878 const struct bpf_prog
*prog
)
7880 return __btf_kfunc_id_set_contains(btf
, BTF_KFUNC_HOOK_FMODRET
, kfunc_btf_id
, prog
);
7883 static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook
,
7884 const struct btf_kfunc_id_set
*kset
)
7889 btf
= btf_get_module_btf(kset
->owner
);
7891 if (!kset
->owner
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF
)) {
7892 pr_err("missing vmlinux BTF, cannot register kfuncs\n");
7895 if (kset
->owner
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES
))
7896 pr_warn("missing module BTF, cannot register kfuncs\n");
7900 return PTR_ERR(btf
);
7902 for (i
= 0; i
< kset
->set
->cnt
; i
++) {
7903 ret
= btf_check_kfunc_protos(btf
, kset
->set
->pairs
[i
].id
,
7904 kset
->set
->pairs
[i
].flags
);
7909 ret
= btf_populate_kfunc_set(btf
, hook
, kset
);
7916 /* This function must be invoked only from initcalls/module init functions */
7917 int register_btf_kfunc_id_set(enum bpf_prog_type prog_type
,
7918 const struct btf_kfunc_id_set
*kset
)
7920 enum btf_kfunc_hook hook
;
7922 hook
= bpf_prog_type_to_kfunc_hook(prog_type
);
7923 return __register_btf_kfunc_id_set(hook
, kset
);
7925 EXPORT_SYMBOL_GPL(register_btf_kfunc_id_set
);
7927 /* This function must be invoked only from initcalls/module init functions */
7928 int register_btf_fmodret_id_set(const struct btf_kfunc_id_set
*kset
)
7930 return __register_btf_kfunc_id_set(BTF_KFUNC_HOOK_FMODRET
, kset
);
7932 EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set
);
7934 s32
btf_find_dtor_kfunc(struct btf
*btf
, u32 btf_id
)
7936 struct btf_id_dtor_kfunc_tab
*tab
= btf
->dtor_kfunc_tab
;
7937 struct btf_id_dtor_kfunc
*dtor
;
7941 /* Even though the size of tab->dtors[0] is > sizeof(u32), we only need
7942 * to compare the first u32 with btf_id, so we can reuse btf_id_cmp_func.
7944 BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc
, btf_id
) != 0);
7945 dtor
= bsearch(&btf_id
, tab
->dtors
, tab
->cnt
, sizeof(tab
->dtors
[0]), btf_id_cmp_func
);
7948 return dtor
->kfunc_btf_id
;
7951 static int btf_check_dtor_kfuncs(struct btf
*btf
, const struct btf_id_dtor_kfunc
*dtors
, u32 cnt
)
7953 const struct btf_type
*dtor_func
, *dtor_func_proto
, *t
;
7954 const struct btf_param
*args
;
7958 for (i
= 0; i
< cnt
; i
++) {
7959 dtor_btf_id
= dtors
[i
].kfunc_btf_id
;
7961 dtor_func
= btf_type_by_id(btf
, dtor_btf_id
);
7962 if (!dtor_func
|| !btf_type_is_func(dtor_func
))
7965 dtor_func_proto
= btf_type_by_id(btf
, dtor_func
->type
);
7966 if (!dtor_func_proto
|| !btf_type_is_func_proto(dtor_func_proto
))
7969 /* Make sure the prototype of the destructor kfunc is 'void func(type *)' */
7970 t
= btf_type_by_id(btf
, dtor_func_proto
->type
);
7971 if (!t
|| !btf_type_is_void(t
))
7974 nr_args
= btf_type_vlen(dtor_func_proto
);
7977 args
= btf_params(dtor_func_proto
);
7978 t
= btf_type_by_id(btf
, args
[0].type
);
7979 /* Allow any pointer type, as width on targets Linux supports
7980 * will be same for all pointer types (i.e. sizeof(void *))
7982 if (!t
|| !btf_type_is_ptr(t
))
7988 /* This function must be invoked only from initcalls/module init functions */
7989 int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc
*dtors
, u32 add_cnt
,
7990 struct module
*owner
)
7992 struct btf_id_dtor_kfunc_tab
*tab
;
7997 btf
= btf_get_module_btf(owner
);
7999 if (!owner
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF
)) {
8000 pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n");
8003 if (owner
&& IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES
)) {
8004 pr_err("missing module BTF, cannot register dtor kfuncs\n");
8010 return PTR_ERR(btf
);
8012 if (add_cnt
>= BTF_DTOR_KFUNC_MAX_CNT
) {
8013 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT
);
8018 /* Ensure that the prototype of dtor kfuncs being registered is sane */
8019 ret
= btf_check_dtor_kfuncs(btf
, dtors
, add_cnt
);
8023 tab
= btf
->dtor_kfunc_tab
;
8024 /* Only one call allowed for modules */
8025 if (WARN_ON_ONCE(tab
&& btf_is_module(btf
))) {
8030 tab_cnt
= tab
? tab
->cnt
: 0;
8031 if (tab_cnt
> U32_MAX
- add_cnt
) {
8035 if (tab_cnt
+ add_cnt
>= BTF_DTOR_KFUNC_MAX_CNT
) {
8036 pr_err("cannot register more than %d kfunc destructors\n", BTF_DTOR_KFUNC_MAX_CNT
);
8041 tab
= krealloc(btf
->dtor_kfunc_tab
,
8042 offsetof(struct btf_id_dtor_kfunc_tab
, dtors
[tab_cnt
+ add_cnt
]),
8043 GFP_KERNEL
| __GFP_NOWARN
);
8049 if (!btf
->dtor_kfunc_tab
)
8051 btf
->dtor_kfunc_tab
= tab
;
8053 memcpy(tab
->dtors
+ tab
->cnt
, dtors
, add_cnt
* sizeof(tab
->dtors
[0]));
8054 tab
->cnt
+= add_cnt
;
8056 sort(tab
->dtors
, tab
->cnt
, sizeof(tab
->dtors
[0]), btf_id_cmp_func
, NULL
);
8060 btf_free_dtor_kfunc_tab(btf
);
8064 EXPORT_SYMBOL_GPL(register_btf_id_dtor_kfuncs
);
8066 #define MAX_TYPES_ARE_COMPAT_DEPTH 2
8068 /* Check local and target types for compatibility. This check is used for
8069 * type-based CO-RE relocations and follow slightly different rules than
8070 * field-based relocations. This function assumes that root types were already
8071 * checked for name match. Beyond that initial root-level name check, names
8072 * are completely ignored. Compatibility rules are as follows:
8073 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
8074 * kind should match for local and target types (i.e., STRUCT is not
8075 * compatible with UNION);
8076 * - for ENUMs/ENUM64s, the size is ignored;
8077 * - for INT, size and signedness are ignored;
8078 * - for ARRAY, dimensionality is ignored, element types are checked for
8079 * compatibility recursively;
8080 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
8081 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
8082 * - FUNC_PROTOs are compatible if they have compatible signature: same
8083 * number of input args and compatible return and argument types.
8084 * These rules are not set in stone and probably will be adjusted as we get
8085 * more experience with using BPF CO-RE relocations.
8087 int bpf_core_types_are_compat(const struct btf
*local_btf
, __u32 local_id
,
8088 const struct btf
*targ_btf
, __u32 targ_id
)
8090 return __bpf_core_types_are_compat(local_btf
, local_id
, targ_btf
, targ_id
,
8091 MAX_TYPES_ARE_COMPAT_DEPTH
);
8094 #define MAX_TYPES_MATCH_DEPTH 2
8096 int bpf_core_types_match(const struct btf
*local_btf
, u32 local_id
,
8097 const struct btf
*targ_btf
, u32 targ_id
)
8099 return __bpf_core_types_match(local_btf
, local_id
, targ_btf
, targ_id
, false,
8100 MAX_TYPES_MATCH_DEPTH
);
8103 static bool bpf_core_is_flavor_sep(const char *s
)
8105 /* check X___Y name pattern, where X and Y are not underscores */
8106 return s
[0] != '_' && /* X */
8107 s
[1] == '_' && s
[2] == '_' && s
[3] == '_' && /* ___ */
8108 s
[4] != '_'; /* Y */
8111 size_t bpf_core_essential_name_len(const char *name
)
8113 size_t n
= strlen(name
);
8116 for (i
= n
- 5; i
>= 0; i
--) {
8117 if (bpf_core_is_flavor_sep(name
+ i
))
8123 struct bpf_cand_cache
{
8129 const struct btf
*btf
;
8134 static void bpf_free_cands(struct bpf_cand_cache
*cands
)
8137 /* empty candidate array was allocated on stack */
8142 static void bpf_free_cands_from_cache(struct bpf_cand_cache
*cands
)
8148 #define VMLINUX_CAND_CACHE_SIZE 31
8149 static struct bpf_cand_cache
*vmlinux_cand_cache
[VMLINUX_CAND_CACHE_SIZE
];
8151 #define MODULE_CAND_CACHE_SIZE 31
8152 static struct bpf_cand_cache
*module_cand_cache
[MODULE_CAND_CACHE_SIZE
];
8154 static DEFINE_MUTEX(cand_cache_mutex
);
8156 static void __print_cand_cache(struct bpf_verifier_log
*log
,
8157 struct bpf_cand_cache
**cache
,
8160 struct bpf_cand_cache
*cc
;
8163 for (i
= 0; i
< cache_size
; i
++) {
8167 bpf_log(log
, "[%d]%s(", i
, cc
->name
);
8168 for (j
= 0; j
< cc
->cnt
; j
++) {
8169 bpf_log(log
, "%d", cc
->cands
[j
].id
);
8170 if (j
< cc
->cnt
- 1)
8173 bpf_log(log
, "), ");
8177 static void print_cand_cache(struct bpf_verifier_log
*log
)
8179 mutex_lock(&cand_cache_mutex
);
8180 bpf_log(log
, "vmlinux_cand_cache:");
8181 __print_cand_cache(log
, vmlinux_cand_cache
, VMLINUX_CAND_CACHE_SIZE
);
8182 bpf_log(log
, "\nmodule_cand_cache:");
8183 __print_cand_cache(log
, module_cand_cache
, MODULE_CAND_CACHE_SIZE
);
8185 mutex_unlock(&cand_cache_mutex
);
8188 static u32
hash_cands(struct bpf_cand_cache
*cands
)
8190 return jhash(cands
->name
, cands
->name_len
, 0);
8193 static struct bpf_cand_cache
*check_cand_cache(struct bpf_cand_cache
*cands
,
8194 struct bpf_cand_cache
**cache
,
8197 struct bpf_cand_cache
*cc
= cache
[hash_cands(cands
) % cache_size
];
8199 if (cc
&& cc
->name_len
== cands
->name_len
&&
8200 !strncmp(cc
->name
, cands
->name
, cands
->name_len
))
8205 static size_t sizeof_cands(int cnt
)
8207 return offsetof(struct bpf_cand_cache
, cands
[cnt
]);
8210 static struct bpf_cand_cache
*populate_cand_cache(struct bpf_cand_cache
*cands
,
8211 struct bpf_cand_cache
**cache
,
8214 struct bpf_cand_cache
**cc
= &cache
[hash_cands(cands
) % cache_size
], *new_cands
;
8217 bpf_free_cands_from_cache(*cc
);
8220 new_cands
= kmemdup(cands
, sizeof_cands(cands
->cnt
), GFP_KERNEL
);
8222 bpf_free_cands(cands
);
8223 return ERR_PTR(-ENOMEM
);
8225 /* strdup the name, since it will stay in cache.
8226 * the cands->name points to strings in prog's BTF and the prog can be unloaded.
8228 new_cands
->name
= kmemdup_nul(cands
->name
, cands
->name_len
, GFP_KERNEL
);
8229 bpf_free_cands(cands
);
8230 if (!new_cands
->name
) {
8232 return ERR_PTR(-ENOMEM
);
8238 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
8239 static void __purge_cand_cache(struct btf
*btf
, struct bpf_cand_cache
**cache
,
8242 struct bpf_cand_cache
*cc
;
8245 for (i
= 0; i
< cache_size
; i
++) {
8250 /* when new module is loaded purge all of module_cand_cache,
8251 * since new module might have candidates with the name
8252 * that matches cached cands.
8254 bpf_free_cands_from_cache(cc
);
8258 /* when module is unloaded purge cache entries
8259 * that match module's btf
8261 for (j
= 0; j
< cc
->cnt
; j
++)
8262 if (cc
->cands
[j
].btf
== btf
) {
8263 bpf_free_cands_from_cache(cc
);
8271 static void purge_cand_cache(struct btf
*btf
)
8273 mutex_lock(&cand_cache_mutex
);
8274 __purge_cand_cache(btf
, module_cand_cache
, MODULE_CAND_CACHE_SIZE
);
8275 mutex_unlock(&cand_cache_mutex
);
8279 static struct bpf_cand_cache
*
8280 bpf_core_add_cands(struct bpf_cand_cache
*cands
, const struct btf
*targ_btf
,
8283 struct bpf_cand_cache
*new_cands
;
8284 const struct btf_type
*t
;
8285 const char *targ_name
;
8286 size_t targ_essent_len
;
8289 n
= btf_nr_types(targ_btf
);
8290 for (i
= targ_start_id
; i
< n
; i
++) {
8291 t
= btf_type_by_id(targ_btf
, i
);
8292 if (btf_kind(t
) != cands
->kind
)
8295 targ_name
= btf_name_by_offset(targ_btf
, t
->name_off
);
8299 /* the resched point is before strncmp to make sure that search
8300 * for non-existing name will have a chance to schedule().
8304 if (strncmp(cands
->name
, targ_name
, cands
->name_len
) != 0)
8307 targ_essent_len
= bpf_core_essential_name_len(targ_name
);
8308 if (targ_essent_len
!= cands
->name_len
)
8311 /* most of the time there is only one candidate for a given kind+name pair */
8312 new_cands
= kmalloc(sizeof_cands(cands
->cnt
+ 1), GFP_KERNEL
);
8314 bpf_free_cands(cands
);
8315 return ERR_PTR(-ENOMEM
);
8318 memcpy(new_cands
, cands
, sizeof_cands(cands
->cnt
));
8319 bpf_free_cands(cands
);
8321 cands
->cands
[cands
->cnt
].btf
= targ_btf
;
8322 cands
->cands
[cands
->cnt
].id
= i
;
8328 static struct bpf_cand_cache
*
8329 bpf_core_find_cands(struct bpf_core_ctx
*ctx
, u32 local_type_id
)
8331 struct bpf_cand_cache
*cands
, *cc
, local_cand
= {};
8332 const struct btf
*local_btf
= ctx
->btf
;
8333 const struct btf_type
*local_type
;
8334 const struct btf
*main_btf
;
8335 size_t local_essent_len
;
8336 struct btf
*mod_btf
;
8340 main_btf
= bpf_get_btf_vmlinux();
8341 if (IS_ERR(main_btf
))
8342 return ERR_CAST(main_btf
);
8344 return ERR_PTR(-EINVAL
);
8346 local_type
= btf_type_by_id(local_btf
, local_type_id
);
8348 return ERR_PTR(-EINVAL
);
8350 name
= btf_name_by_offset(local_btf
, local_type
->name_off
);
8351 if (str_is_empty(name
))
8352 return ERR_PTR(-EINVAL
);
8353 local_essent_len
= bpf_core_essential_name_len(name
);
8355 cands
= &local_cand
;
8357 cands
->kind
= btf_kind(local_type
);
8358 cands
->name_len
= local_essent_len
;
8360 cc
= check_cand_cache(cands
, vmlinux_cand_cache
, VMLINUX_CAND_CACHE_SIZE
);
8361 /* cands is a pointer to stack here */
8368 /* Attempt to find target candidates in vmlinux BTF first */
8369 cands
= bpf_core_add_cands(cands
, main_btf
, 1);
8371 return ERR_CAST(cands
);
8373 /* cands is a pointer to kmalloced memory here if cands->cnt > 0 */
8375 /* populate cache even when cands->cnt == 0 */
8376 cc
= populate_cand_cache(cands
, vmlinux_cand_cache
, VMLINUX_CAND_CACHE_SIZE
);
8378 return ERR_CAST(cc
);
8380 /* if vmlinux BTF has any candidate, don't go for module BTFs */
8385 /* cands is a pointer to stack here and cands->cnt == 0 */
8386 cc
= check_cand_cache(cands
, module_cand_cache
, MODULE_CAND_CACHE_SIZE
);
8388 /* if cache has it return it even if cc->cnt == 0 */
8391 /* If candidate is not found in vmlinux's BTF then search in module's BTFs */
8392 spin_lock_bh(&btf_idr_lock
);
8393 idr_for_each_entry(&btf_idr
, mod_btf
, id
) {
8394 if (!btf_is_module(mod_btf
))
8396 /* linear search could be slow hence unlock/lock
8397 * the IDR to avoiding holding it for too long
8400 spin_unlock_bh(&btf_idr_lock
);
8401 cands
= bpf_core_add_cands(cands
, mod_btf
, btf_nr_types(main_btf
));
8404 return ERR_CAST(cands
);
8405 spin_lock_bh(&btf_idr_lock
);
8407 spin_unlock_bh(&btf_idr_lock
);
8408 /* cands is a pointer to kmalloced memory here if cands->cnt > 0
8409 * or pointer to stack if cands->cnd == 0.
8410 * Copy it into the cache even when cands->cnt == 0 and
8411 * return the result.
8413 return populate_cand_cache(cands
, module_cand_cache
, MODULE_CAND_CACHE_SIZE
);
8416 int bpf_core_apply(struct bpf_core_ctx
*ctx
, const struct bpf_core_relo
*relo
,
8417 int relo_idx
, void *insn
)
8419 bool need_cands
= relo
->kind
!= BPF_CORE_TYPE_ID_LOCAL
;
8420 struct bpf_core_cand_list cands
= {};
8421 struct bpf_core_relo_res targ_res
;
8422 struct bpf_core_spec
*specs
;
8425 /* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
8426 * into arrays of btf_ids of struct fields and array indices.
8428 specs
= kcalloc(3, sizeof(*specs
), GFP_KERNEL
);
8433 struct bpf_cand_cache
*cc
;
8436 mutex_lock(&cand_cache_mutex
);
8437 cc
= bpf_core_find_cands(ctx
, relo
->type_id
);
8439 bpf_log(ctx
->log
, "target candidate search failed for %d\n",
8445 cands
.cands
= kcalloc(cc
->cnt
, sizeof(*cands
.cands
), GFP_KERNEL
);
8451 for (i
= 0; i
< cc
->cnt
; i
++) {
8453 "CO-RE relocating %s %s: found target candidate [%d]\n",
8454 btf_kind_str
[cc
->kind
], cc
->name
, cc
->cands
[i
].id
);
8455 cands
.cands
[i
].btf
= cc
->cands
[i
].btf
;
8456 cands
.cands
[i
].id
= cc
->cands
[i
].id
;
8458 cands
.len
= cc
->cnt
;
8459 /* cand_cache_mutex needs to span the cache lookup and
8460 * copy of btf pointer into bpf_core_cand_list,
8461 * since module can be unloaded while bpf_core_calc_relo_insn
8462 * is working with module's btf.
8466 err
= bpf_core_calc_relo_insn((void *)ctx
->log
, relo
, relo_idx
, ctx
->btf
, &cands
, specs
,
8471 err
= bpf_core_patch_insn((void *)ctx
->log
, insn
, relo
->insn_off
/ 8, relo
, relo_idx
,
8478 mutex_unlock(&cand_cache_mutex
);
8479 if (ctx
->log
->level
& BPF_LOG_LEVEL2
)
8480 print_cand_cache(ctx
->log
);
8485 bool btf_nested_type_is_trusted(struct bpf_verifier_log
*log
,
8486 const struct bpf_reg_state
*reg
,
8487 const char *field_name
, u32 btf_id
, const char *suffix
)
8489 struct btf
*btf
= reg
->btf
;
8490 const struct btf_type
*walk_type
, *safe_type
;
8492 char safe_tname
[64];
8494 const struct btf_member
*member
;
8497 walk_type
= btf_type_by_id(btf
, reg
->btf_id
);
8501 tname
= btf_name_by_offset(btf
, walk_type
->name_off
);
8503 ret
= snprintf(safe_tname
, sizeof(safe_tname
), "%s%s", tname
, suffix
);
8507 safe_id
= btf_find_by_name_kind(btf
, safe_tname
, BTF_INFO_KIND(walk_type
->info
));
8511 safe_type
= btf_type_by_id(btf
, safe_id
);
8515 for_each_member(i
, safe_type
, member
) {
8516 const char *m_name
= __btf_name_by_offset(btf
, member
->name_off
);
8517 const struct btf_type
*mtype
= btf_type_by_id(btf
, member
->type
);
8520 if (!btf_type_is_ptr(mtype
))
8523 btf_type_skip_modifiers(btf
, mtype
->type
, &id
);
8524 /* If we match on both type and name, the field is considered trusted. */
8525 if (btf_id
== id
&& !strcmp(field_name
, m_name
))
8532 bool btf_type_ids_nocast_alias(struct bpf_verifier_log
*log
,
8533 const struct btf
*reg_btf
, u32 reg_id
,
8534 const struct btf
*arg_btf
, u32 arg_id
)
8536 const char *reg_name
, *arg_name
, *search_needle
;
8537 const struct btf_type
*reg_type
, *arg_type
;
8538 int reg_len
, arg_len
, cmp_len
;
8539 size_t pattern_len
= sizeof(NOCAST_ALIAS_SUFFIX
) - sizeof(char);
8541 reg_type
= btf_type_by_id(reg_btf
, reg_id
);
8545 arg_type
= btf_type_by_id(arg_btf
, arg_id
);
8549 reg_name
= btf_name_by_offset(reg_btf
, reg_type
->name_off
);
8550 arg_name
= btf_name_by_offset(arg_btf
, arg_type
->name_off
);
8552 reg_len
= strlen(reg_name
);
8553 arg_len
= strlen(arg_name
);
8555 /* Exactly one of the two type names may be suffixed with ___init, so
8556 * if the strings are the same size, they can't possibly be no-cast
8557 * aliases of one another. If you have two of the same type names, e.g.
8558 * they're both nf_conn___init, it would be improper to return true
8559 * because they are _not_ no-cast aliases, they are the same type.
8561 if (reg_len
== arg_len
)
8564 /* Either of the two names must be the other name, suffixed with ___init. */
8565 if ((reg_len
!= arg_len
+ pattern_len
) &&
8566 (arg_len
!= reg_len
+ pattern_len
))
8569 if (reg_len
< arg_len
) {
8570 search_needle
= strstr(arg_name
, NOCAST_ALIAS_SUFFIX
);
8573 search_needle
= strstr(reg_name
, NOCAST_ALIAS_SUFFIX
);
8580 /* ___init suffix must come at the end of the name */
8581 if (*(search_needle
+ pattern_len
) != '\0')
8584 return !strncmp(reg_name
, arg_name
, cmp_len
);