1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
11 #include <objtool/builtin.h>
12 #include <objtool/cfi.h>
13 #include <objtool/arch.h>
14 #include <objtool/check.h>
15 #include <objtool/special.h>
16 #include <objtool/warn.h>
17 #include <objtool/endianness.h>
19 #include <linux/objtool_types.h>
20 #include <linux/hashtable.h>
21 #include <linux/kernel.h>
22 #include <linux/static_call_types.h>
23 #include <linux/string.h>
26 struct alternative
*next
;
27 struct instruction
*insn
;
30 static unsigned long nr_cfi
, nr_cfi_reused
, nr_cfi_cache
;
32 static struct cfi_init_state initial_func_cfi
;
33 static struct cfi_state init_cfi
;
34 static struct cfi_state func_cfi
;
35 static struct cfi_state force_undefined_cfi
;
37 struct instruction
*find_insn(struct objtool_file
*file
,
38 struct section
*sec
, unsigned long offset
)
40 struct instruction
*insn
;
42 hash_for_each_possible(file
->insn_hash
, insn
, hash
, sec_offset_hash(sec
, offset
)) {
43 if (insn
->sec
== sec
&& insn
->offset
== offset
)
50 struct instruction
*next_insn_same_sec(struct objtool_file
*file
,
51 struct instruction
*insn
)
53 if (insn
->idx
== INSN_CHUNK_MAX
)
54 return find_insn(file
, insn
->sec
, insn
->offset
+ insn
->len
);
63 static struct instruction
*next_insn_same_func(struct objtool_file
*file
,
64 struct instruction
*insn
)
66 struct instruction
*next
= next_insn_same_sec(file
, insn
);
67 struct symbol
*func
= insn_func(insn
);
72 if (next
&& insn_func(next
) == func
)
75 /* Check if we're already in the subfunction: */
76 if (func
== func
->cfunc
)
79 /* Move to the subfunction: */
80 return find_insn(file
, func
->cfunc
->sec
, func
->cfunc
->offset
);
83 static struct instruction
*prev_insn_same_sec(struct objtool_file
*file
,
84 struct instruction
*insn
)
88 return find_insn(file
, insn
->sec
, insn
->offset
- insn
->prev_len
);
95 static struct instruction
*prev_insn_same_sym(struct objtool_file
*file
,
96 struct instruction
*insn
)
98 struct instruction
*prev
= prev_insn_same_sec(file
, insn
);
100 if (prev
&& insn_func(prev
) == insn_func(insn
))
106 #define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
112 #define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
115 insn = next_insn_same_func(file, insn))
117 #define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
127 #define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
130 #define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
134 static inline struct symbol
*insn_call_dest(struct instruction
*insn
)
136 if (insn
->type
== INSN_JUMP_DYNAMIC
||
137 insn
->type
== INSN_CALL_DYNAMIC
)
140 return insn
->_call_dest
;
143 static inline struct reloc
*insn_jump_table(struct instruction
*insn
)
145 if (insn
->type
== INSN_JUMP_DYNAMIC
||
146 insn
->type
== INSN_CALL_DYNAMIC
)
147 return insn
->_jump_table
;
152 static inline unsigned long insn_jump_table_size(struct instruction
*insn
)
154 if (insn
->type
== INSN_JUMP_DYNAMIC
||
155 insn
->type
== INSN_CALL_DYNAMIC
)
156 return insn
->_jump_table_size
;
161 static bool is_jump_table_jump(struct instruction
*insn
)
163 struct alt_group
*alt_group
= insn
->alt_group
;
165 if (insn_jump_table(insn
))
168 /* Retpoline alternative for a jump table? */
169 return alt_group
&& alt_group
->orig_group
&&
170 insn_jump_table(alt_group
->orig_group
->first_insn
);
173 static bool is_sibling_call(struct instruction
*insn
)
176 * Assume only STT_FUNC calls have jump-tables.
178 if (insn_func(insn
)) {
179 /* An indirect jump is either a sibling call or a jump to a table. */
180 if (insn
->type
== INSN_JUMP_DYNAMIC
)
181 return !is_jump_table_jump(insn
);
184 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
185 return (is_static_jump(insn
) && insn_call_dest(insn
));
189 * Checks if a string ends with another.
191 static bool str_ends_with(const char *s
, const char *sub
)
193 const int slen
= strlen(s
);
194 const int sublen
= strlen(sub
);
199 return !memcmp(s
+ slen
- sublen
, sub
, sublen
);
203 * Checks if a function is a Rust "noreturn" one.
205 static bool is_rust_noreturn(const struct symbol
*func
)
208 * If it does not start with "_R", then it is not a Rust symbol.
210 if (strncmp(func
->name
, "_R", 2))
214 * These are just heuristics -- we do not control the precise symbol
215 * name, due to the crate disambiguators (which depend on the compiler)
216 * as well as changes to the source code itself between versions (since
217 * these come from the Rust standard library).
219 return str_ends_with(func
->name
, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") ||
220 str_ends_with(func
->name
, "_4core6option13unwrap_failed") ||
221 str_ends_with(func
->name
, "_4core6result13unwrap_failed") ||
222 str_ends_with(func
->name
, "_4core9panicking5panic") ||
223 str_ends_with(func
->name
, "_4core9panicking9panic_fmt") ||
224 str_ends_with(func
->name
, "_4core9panicking14panic_explicit") ||
225 str_ends_with(func
->name
, "_4core9panicking14panic_nounwind") ||
226 str_ends_with(func
->name
, "_4core9panicking18panic_bounds_check") ||
227 str_ends_with(func
->name
, "_4core9panicking19assert_failed_inner") ||
228 str_ends_with(func
->name
, "_4core9panicking30panic_null_pointer_dereference") ||
229 str_ends_with(func
->name
, "_4core9panicking36panic_misaligned_pointer_dereference") ||
230 str_ends_with(func
->name
, "_7___rustc17rust_begin_unwind") ||
231 strstr(func
->name
, "_4core9panicking13assert_failed") ||
232 strstr(func
->name
, "_4core9panicking11panic_const24panic_const_") ||
233 (strstr(func
->name
, "_4core5slice5index") &&
234 strstr(func
->name
, "slice_") &&
235 str_ends_with(func
->name
, "_fail"));
239 * This checks to see if the given function is a "noreturn" function.
241 * For global functions which are outside the scope of this object file, we
242 * have to keep a manual list of them.
244 * For local functions, we have to detect them manually by simply looking for
245 * the lack of a return instruction.
247 static bool __dead_end_function(struct objtool_file
*file
, struct symbol
*func
,
251 struct instruction
*insn
;
254 #define NORETURN(func) __stringify(func),
255 static const char * const global_noreturns
[] = {
256 #include "noreturns.h"
263 if (func
->bind
== STB_GLOBAL
|| func
->bind
== STB_WEAK
) {
264 if (is_rust_noreturn(func
))
267 for (i
= 0; i
< ARRAY_SIZE(global_noreturns
); i
++)
268 if (!strcmp(func
->name
, global_noreturns
[i
]))
272 if (func
->bind
== STB_WEAK
)
278 insn
= find_insn(file
, func
->sec
, func
->offset
);
279 if (!insn
|| !insn_func(insn
))
282 func_for_each_insn(file
, func
, insn
) {
285 if (insn
->type
== INSN_RETURN
)
293 * A function can have a sibling call instead of a return. In that
294 * case, the function's dead-end status depends on whether the target
295 * of the sibling call returns.
297 func_for_each_insn(file
, func
, insn
) {
298 if (is_sibling_call(insn
)) {
299 struct instruction
*dest
= insn
->jump_dest
;
302 /* sibling call to another file */
305 /* local sibling call */
306 if (recursion
== 5) {
308 * Infinite recursion: two functions have
309 * sibling calls to each other. This is a very
310 * rare case. It means they aren't dead ends.
315 return __dead_end_function(file
, insn_func(dest
), recursion
+1);
322 static bool dead_end_function(struct objtool_file
*file
, struct symbol
*func
)
324 return __dead_end_function(file
, func
, 0);
327 static void init_cfi_state(struct cfi_state
*cfi
)
331 for (i
= 0; i
< CFI_NUM_REGS
; i
++) {
332 cfi
->regs
[i
].base
= CFI_UNDEFINED
;
333 cfi
->vals
[i
].base
= CFI_UNDEFINED
;
335 cfi
->cfa
.base
= CFI_UNDEFINED
;
336 cfi
->drap_reg
= CFI_UNDEFINED
;
337 cfi
->drap_offset
= -1;
340 static void init_insn_state(struct objtool_file
*file
, struct insn_state
*state
,
343 memset(state
, 0, sizeof(*state
));
344 init_cfi_state(&state
->cfi
);
346 if (opts
.noinstr
&& sec
)
347 state
->noinstr
= sec
->noinstr
;
350 static struct cfi_state
*cfi_alloc(void)
352 struct cfi_state
*cfi
= calloc(1, sizeof(struct cfi_state
));
354 ERROR_GLIBC("calloc");
362 static struct hlist_head
*cfi_hash
;
364 static inline bool cficmp(struct cfi_state
*cfi1
, struct cfi_state
*cfi2
)
366 return memcmp((void *)cfi1
+ sizeof(cfi1
->hash
),
367 (void *)cfi2
+ sizeof(cfi2
->hash
),
368 sizeof(struct cfi_state
) - sizeof(struct hlist_node
));
371 static inline u32
cfi_key(struct cfi_state
*cfi
)
373 return jhash((void *)cfi
+ sizeof(cfi
->hash
),
374 sizeof(*cfi
) - sizeof(cfi
->hash
), 0);
377 static struct cfi_state
*cfi_hash_find_or_add(struct cfi_state
*cfi
)
379 struct hlist_head
*head
= &cfi_hash
[hash_min(cfi_key(cfi
), cfi_bits
)];
380 struct cfi_state
*obj
;
382 hlist_for_each_entry(obj
, head
, hash
) {
383 if (!cficmp(cfi
, obj
)) {
391 hlist_add_head(&obj
->hash
, head
);
396 static void cfi_hash_add(struct cfi_state
*cfi
)
398 struct hlist_head
*head
= &cfi_hash
[hash_min(cfi_key(cfi
), cfi_bits
)];
400 hlist_add_head(&cfi
->hash
, head
);
403 static void *cfi_hash_alloc(unsigned long size
)
405 cfi_bits
= max(10, ilog2(size
));
406 cfi_hash
= mmap(NULL
, sizeof(struct hlist_head
) << cfi_bits
,
407 PROT_READ
|PROT_WRITE
,
408 MAP_PRIVATE
|MAP_ANON
, -1, 0);
409 if (cfi_hash
== (void *)-1L) {
410 ERROR_GLIBC("mmap fail cfi_hash");
412 } else if (opts
.stats
) {
413 printf("cfi_bits: %d\n", cfi_bits
);
419 static unsigned long nr_insns
;
420 static unsigned long nr_insns_visited
;
423 * Call the arch-specific instruction decoder for all the instructions and add
424 * them to the global instruction list.
426 static int decode_instructions(struct objtool_file
*file
)
430 unsigned long offset
;
431 struct instruction
*insn
;
434 for_each_sec(file
, sec
) {
435 struct instruction
*insns
= NULL
;
439 if (!(sec
->sh
.sh_flags
& SHF_EXECINSTR
))
442 if (strcmp(sec
->name
, ".altinstr_replacement") &&
443 strcmp(sec
->name
, ".altinstr_aux") &&
444 strncmp(sec
->name
, ".discard.", 9))
447 if (!strcmp(sec
->name
, ".noinstr.text") ||
448 !strcmp(sec
->name
, ".entry.text") ||
449 !strcmp(sec
->name
, ".cpuidle.text") ||
450 !strncmp(sec
->name
, ".text..__x86.", 13))
454 * .init.text code is ran before userspace and thus doesn't
455 * strictly need retpolines, except for modules which are
456 * loaded late, they very much do need retpoline in their
459 if (!strcmp(sec
->name
, ".init.text") && !opts
.module
)
462 for (offset
= 0; offset
< sec
->sh
.sh_size
; offset
+= insn
->len
) {
463 if (!insns
|| idx
== INSN_CHUNK_MAX
) {
464 insns
= calloc(sizeof(*insn
), INSN_CHUNK_SIZE
);
466 ERROR_GLIBC("calloc");
476 INIT_LIST_HEAD(&insn
->call_node
);
478 insn
->offset
= offset
;
479 insn
->prev_len
= prev_len
;
481 ret
= arch_decode_instruction(file
, sec
, offset
,
482 sec
->sh
.sh_size
- offset
,
487 prev_len
= insn
->len
;
490 * By default, "ud2" is a dead end unless otherwise
491 * annotated, because GCC 7 inserts it for certain
492 * divide-by-zero cases.
494 if (insn
->type
== INSN_BUG
)
495 insn
->dead_end
= true;
497 hash_add(file
->insn_hash
, &insn
->hash
, sec_offset_hash(sec
, insn
->offset
));
501 sec_for_each_sym(sec
, func
) {
502 if (func
->type
!= STT_NOTYPE
&& func
->type
!= STT_FUNC
)
505 if (func
->offset
== sec
->sh
.sh_size
) {
506 /* Heuristic: likely an "end" symbol */
507 if (func
->type
== STT_NOTYPE
)
509 ERROR("%s(): STT_FUNC at end of section", func
->name
);
513 if (func
->embedded_insn
|| func
->alias
!= func
)
516 if (!find_insn(file
, sec
, func
->offset
)) {
517 ERROR("%s(): can't find starting instruction", func
->name
);
521 sym_for_each_insn(file
, func
, insn
) {
523 if (func
->type
== STT_FUNC
&&
524 insn
->type
== INSN_ENDBR
&&
525 list_empty(&insn
->call_node
)) {
526 if (insn
->offset
== func
->offset
) {
527 list_add_tail(&insn
->call_node
, &file
->endbr_list
);
530 file
->nr_endbr_int
++;
538 printf("nr_insns: %lu\n", nr_insns
);
544 * Read the pv_ops[] .data table to find the static initialized values.
546 static int add_pv_ops(struct objtool_file
*file
, const char *symname
)
548 struct symbol
*sym
, *func
;
549 unsigned long off
, end
;
553 sym
= find_symbol_by_name(file
->elf
, symname
);
558 end
= off
+ sym
->len
;
560 reloc
= find_reloc_by_dest_range(file
->elf
, sym
->sec
, off
, end
- off
);
564 idx
= (reloc_offset(reloc
) - sym
->offset
) / sizeof(unsigned long);
567 if (func
->type
== STT_SECTION
)
568 func
= find_symbol_by_offset(reloc
->sym
->sec
,
569 reloc_addend(reloc
));
571 ERROR_FUNC(reloc
->sym
->sec
, reloc_addend(reloc
),
572 "can't find func at %s[%d]", symname
, idx
);
576 if (objtool_pv_add(file
, idx
, func
))
579 off
= reloc_offset(reloc
) + 1;
588 * Allocate and initialize file->pv_ops[].
590 static int init_pv_ops(struct objtool_file
*file
)
592 static const char *pv_ops_tables
[] = {
608 sym
= find_symbol_by_name(file
->elf
, "pv_ops");
612 nr
= sym
->len
/ sizeof(unsigned long);
613 file
->pv_ops
= calloc(sizeof(struct pv_state
), nr
);
615 ERROR_GLIBC("calloc");
619 for (idx
= 0; idx
< nr
; idx
++)
620 INIT_LIST_HEAD(&file
->pv_ops
[idx
].targets
);
622 for (idx
= 0; (pv_ops
= pv_ops_tables
[idx
]); idx
++) {
623 ret
= add_pv_ops(file
, pv_ops
);
631 static int create_static_call_sections(struct objtool_file
*file
)
633 struct static_call_site
*site
;
635 struct instruction
*insn
;
636 struct symbol
*key_sym
;
637 char *key_name
, *tmp
;
640 sec
= find_section_by_name(file
->elf
, ".static_call_sites");
642 INIT_LIST_HEAD(&file
->static_call_list
);
643 WARN("file already has .static_call_sites section, skipping");
647 if (list_empty(&file
->static_call_list
))
651 list_for_each_entry(insn
, &file
->static_call_list
, call_node
)
654 sec
= elf_create_section_pair(file
->elf
, ".static_call_sites",
655 sizeof(*site
), idx
, idx
* 2);
659 /* Allow modules to modify the low bits of static_call_site::key */
660 sec
->sh
.sh_flags
|= SHF_WRITE
;
663 list_for_each_entry(insn
, &file
->static_call_list
, call_node
) {
665 /* populate reloc for 'addr' */
666 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
667 idx
* sizeof(*site
), idx
* 2,
668 insn
->sec
, insn
->offset
))
671 /* find key symbol */
672 key_name
= strdup(insn_call_dest(insn
)->name
);
674 ERROR_GLIBC("strdup");
677 if (strncmp(key_name
, STATIC_CALL_TRAMP_PREFIX_STR
,
678 STATIC_CALL_TRAMP_PREFIX_LEN
)) {
679 ERROR("static_call: trampoline name malformed: %s", key_name
);
682 tmp
= key_name
+ STATIC_CALL_TRAMP_PREFIX_LEN
- STATIC_CALL_KEY_PREFIX_LEN
;
683 memcpy(tmp
, STATIC_CALL_KEY_PREFIX_STR
, STATIC_CALL_KEY_PREFIX_LEN
);
685 key_sym
= find_symbol_by_name(file
->elf
, tmp
);
688 ERROR("static_call: can't find static_call_key symbol: %s", tmp
);
693 * For modules(), the key might not be exported, which
694 * means the module can make static calls but isn't
695 * allowed to change them.
697 * In that case we temporarily set the key to be the
698 * trampoline address. This is fixed up in
699 * static_call_add_module().
701 key_sym
= insn_call_dest(insn
);
704 /* populate reloc for 'key' */
705 if (!elf_init_reloc_data_sym(file
->elf
, sec
,
706 idx
* sizeof(*site
) + 4,
707 (idx
* 2) + 1, key_sym
,
708 is_sibling_call(insn
) * STATIC_CALL_SITE_TAIL
))
717 static int create_retpoline_sites_sections(struct objtool_file
*file
)
719 struct instruction
*insn
;
723 sec
= find_section_by_name(file
->elf
, ".retpoline_sites");
725 WARN("file already has .retpoline_sites, skipping");
730 list_for_each_entry(insn
, &file
->retpoline_call_list
, call_node
)
736 sec
= elf_create_section_pair(file
->elf
, ".retpoline_sites",
737 sizeof(int), idx
, idx
);
742 list_for_each_entry(insn
, &file
->retpoline_call_list
, call_node
) {
744 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
745 idx
* sizeof(int), idx
,
746 insn
->sec
, insn
->offset
))
755 static int create_return_sites_sections(struct objtool_file
*file
)
757 struct instruction
*insn
;
761 sec
= find_section_by_name(file
->elf
, ".return_sites");
763 WARN("file already has .return_sites, skipping");
768 list_for_each_entry(insn
, &file
->return_thunk_list
, call_node
)
774 sec
= elf_create_section_pair(file
->elf
, ".return_sites",
775 sizeof(int), idx
, idx
);
780 list_for_each_entry(insn
, &file
->return_thunk_list
, call_node
) {
782 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
783 idx
* sizeof(int), idx
,
784 insn
->sec
, insn
->offset
))
793 static int create_ibt_endbr_seal_sections(struct objtool_file
*file
)
795 struct instruction
*insn
;
799 sec
= find_section_by_name(file
->elf
, ".ibt_endbr_seal");
801 WARN("file already has .ibt_endbr_seal, skipping");
806 list_for_each_entry(insn
, &file
->endbr_list
, call_node
)
810 printf("ibt: ENDBR at function start: %d\n", file
->nr_endbr
);
811 printf("ibt: ENDBR inside functions: %d\n", file
->nr_endbr_int
);
812 printf("ibt: superfluous ENDBR: %d\n", idx
);
818 sec
= elf_create_section_pair(file
->elf
, ".ibt_endbr_seal",
819 sizeof(int), idx
, idx
);
824 list_for_each_entry(insn
, &file
->endbr_list
, call_node
) {
826 int *site
= (int *)sec
->data
->d_buf
+ idx
;
827 struct symbol
*sym
= insn
->sym
;
830 if (opts
.module
&& sym
&& sym
->type
== STT_FUNC
&&
831 insn
->offset
== sym
->offset
&&
832 (!strcmp(sym
->name
, "init_module") ||
833 !strcmp(sym
->name
, "cleanup_module"))) {
834 ERROR("%s(): Magic init_module() function name is deprecated, use module_init(fn) instead",
839 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
840 idx
* sizeof(int), idx
,
841 insn
->sec
, insn
->offset
))
850 static int create_cfi_sections(struct objtool_file
*file
)
856 sec
= find_section_by_name(file
->elf
, ".cfi_sites");
858 INIT_LIST_HEAD(&file
->call_list
);
859 WARN("file already has .cfi_sites section, skipping");
864 for_each_sym(file
, sym
) {
865 if (sym
->type
!= STT_FUNC
)
868 if (strncmp(sym
->name
, "__cfi_", 6))
874 sec
= elf_create_section_pair(file
->elf
, ".cfi_sites",
875 sizeof(unsigned int), idx
, idx
);
880 for_each_sym(file
, sym
) {
881 if (sym
->type
!= STT_FUNC
)
884 if (strncmp(sym
->name
, "__cfi_", 6))
887 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
888 idx
* sizeof(unsigned int), idx
,
889 sym
->sec
, sym
->offset
))
898 static int create_mcount_loc_sections(struct objtool_file
*file
)
900 size_t addr_size
= elf_addr_size(file
->elf
);
901 struct instruction
*insn
;
905 sec
= find_section_by_name(file
->elf
, "__mcount_loc");
907 INIT_LIST_HEAD(&file
->mcount_loc_list
);
908 WARN("file already has __mcount_loc section, skipping");
912 if (list_empty(&file
->mcount_loc_list
))
916 list_for_each_entry(insn
, &file
->mcount_loc_list
, call_node
)
919 sec
= elf_create_section_pair(file
->elf
, "__mcount_loc", addr_size
,
924 sec
->sh
.sh_addralign
= addr_size
;
927 list_for_each_entry(insn
, &file
->mcount_loc_list
, call_node
) {
931 reloc
= elf_init_reloc_text_sym(file
->elf
, sec
, idx
* addr_size
, idx
,
932 insn
->sec
, insn
->offset
);
936 set_reloc_type(file
->elf
, reloc
, addr_size
== 8 ? R_ABS64
: R_ABS32
);
944 static int create_direct_call_sections(struct objtool_file
*file
)
946 struct instruction
*insn
;
950 sec
= find_section_by_name(file
->elf
, ".call_sites");
952 INIT_LIST_HEAD(&file
->call_list
);
953 WARN("file already has .call_sites section, skipping");
957 if (list_empty(&file
->call_list
))
961 list_for_each_entry(insn
, &file
->call_list
, call_node
)
964 sec
= elf_create_section_pair(file
->elf
, ".call_sites",
965 sizeof(unsigned int), idx
, idx
);
970 list_for_each_entry(insn
, &file
->call_list
, call_node
) {
972 if (!elf_init_reloc_text_sym(file
->elf
, sec
,
973 idx
* sizeof(unsigned int), idx
,
974 insn
->sec
, insn
->offset
))
984 * Warnings shouldn't be reported for ignored functions.
986 static int add_ignores(struct objtool_file
*file
)
988 struct section
*rsec
;
992 rsec
= find_section_by_name(file
->elf
, ".rela.discard.func_stack_frame_non_standard");
996 for_each_reloc(rsec
, reloc
) {
997 switch (reloc
->sym
->type
) {
1003 func
= find_func_by_offset(reloc
->sym
->sec
, reloc_addend(reloc
));
1009 ERROR("unexpected relocation symbol type in %s: %d",
1010 rsec
->name
, reloc
->sym
->type
);
1014 func
->ignore
= true;
1016 func
->cfunc
->ignore
= true;
1023 * This is a whitelist of functions that is allowed to be called with AC set.
1024 * The list is meant to be minimal and only contains compiler instrumentation
1025 * ABI and a few functions used to implement *_{to,from}_user() functions.
1027 * These functions must not directly change AC, but may PUSHF/POPF.
1029 static const char *uaccess_safe_builtin
[] = {
1032 "kasan_check_range",
1033 /* KASAN out-of-line */
1034 "__asan_loadN_noabort",
1035 "__asan_load1_noabort",
1036 "__asan_load2_noabort",
1037 "__asan_load4_noabort",
1038 "__asan_load8_noabort",
1039 "__asan_load16_noabort",
1040 "__asan_storeN_noabort",
1041 "__asan_store1_noabort",
1042 "__asan_store2_noabort",
1043 "__asan_store4_noabort",
1044 "__asan_store8_noabort",
1045 "__asan_store16_noabort",
1046 "__kasan_check_read",
1047 "__kasan_check_write",
1049 "__asan_report_load_n_noabort",
1050 "__asan_report_load1_noabort",
1051 "__asan_report_load2_noabort",
1052 "__asan_report_load4_noabort",
1053 "__asan_report_load8_noabort",
1054 "__asan_report_load16_noabort",
1055 "__asan_report_store_n_noabort",
1056 "__asan_report_store1_noabort",
1057 "__asan_report_store2_noabort",
1058 "__asan_report_store4_noabort",
1059 "__asan_report_store8_noabort",
1060 "__asan_report_store16_noabort",
1062 "__kcsan_check_access",
1067 "kcsan_found_watchpoint",
1068 "kcsan_setup_watchpoint",
1069 "kcsan_check_scoped_accesses",
1070 "kcsan_disable_current",
1071 "kcsan_enable_current_nowarn",
1073 "__tsan_func_entry",
1075 "__tsan_read_range",
1076 "__tsan_write_range",
1087 "__tsan_read_write1",
1088 "__tsan_read_write2",
1089 "__tsan_read_write4",
1090 "__tsan_read_write8",
1091 "__tsan_read_write16",
1092 "__tsan_volatile_read1",
1093 "__tsan_volatile_read2",
1094 "__tsan_volatile_read4",
1095 "__tsan_volatile_read8",
1096 "__tsan_volatile_read16",
1097 "__tsan_volatile_write1",
1098 "__tsan_volatile_write2",
1099 "__tsan_volatile_write4",
1100 "__tsan_volatile_write8",
1101 "__tsan_volatile_write16",
1102 "__tsan_atomic8_load",
1103 "__tsan_atomic16_load",
1104 "__tsan_atomic32_load",
1105 "__tsan_atomic64_load",
1106 "__tsan_atomic8_store",
1107 "__tsan_atomic16_store",
1108 "__tsan_atomic32_store",
1109 "__tsan_atomic64_store",
1110 "__tsan_atomic8_exchange",
1111 "__tsan_atomic16_exchange",
1112 "__tsan_atomic32_exchange",
1113 "__tsan_atomic64_exchange",
1114 "__tsan_atomic8_fetch_add",
1115 "__tsan_atomic16_fetch_add",
1116 "__tsan_atomic32_fetch_add",
1117 "__tsan_atomic64_fetch_add",
1118 "__tsan_atomic8_fetch_sub",
1119 "__tsan_atomic16_fetch_sub",
1120 "__tsan_atomic32_fetch_sub",
1121 "__tsan_atomic64_fetch_sub",
1122 "__tsan_atomic8_fetch_and",
1123 "__tsan_atomic16_fetch_and",
1124 "__tsan_atomic32_fetch_and",
1125 "__tsan_atomic64_fetch_and",
1126 "__tsan_atomic8_fetch_or",
1127 "__tsan_atomic16_fetch_or",
1128 "__tsan_atomic32_fetch_or",
1129 "__tsan_atomic64_fetch_or",
1130 "__tsan_atomic8_fetch_xor",
1131 "__tsan_atomic16_fetch_xor",
1132 "__tsan_atomic32_fetch_xor",
1133 "__tsan_atomic64_fetch_xor",
1134 "__tsan_atomic8_fetch_nand",
1135 "__tsan_atomic16_fetch_nand",
1136 "__tsan_atomic32_fetch_nand",
1137 "__tsan_atomic64_fetch_nand",
1138 "__tsan_atomic8_compare_exchange_strong",
1139 "__tsan_atomic16_compare_exchange_strong",
1140 "__tsan_atomic32_compare_exchange_strong",
1141 "__tsan_atomic64_compare_exchange_strong",
1142 "__tsan_atomic8_compare_exchange_weak",
1143 "__tsan_atomic16_compare_exchange_weak",
1144 "__tsan_atomic32_compare_exchange_weak",
1145 "__tsan_atomic64_compare_exchange_weak",
1146 "__tsan_atomic8_compare_exchange_val",
1147 "__tsan_atomic16_compare_exchange_val",
1148 "__tsan_atomic32_compare_exchange_val",
1149 "__tsan_atomic64_compare_exchange_val",
1150 "__tsan_atomic_thread_fence",
1151 "__tsan_atomic_signal_fence",
1152 "__tsan_unaligned_read16",
1153 "__tsan_unaligned_write16",
1157 "__sanitizer_cov_trace_pc",
1158 "__sanitizer_cov_trace_const_cmp1",
1159 "__sanitizer_cov_trace_const_cmp2",
1160 "__sanitizer_cov_trace_const_cmp4",
1161 "__sanitizer_cov_trace_const_cmp8",
1162 "__sanitizer_cov_trace_cmp1",
1163 "__sanitizer_cov_trace_cmp2",
1164 "__sanitizer_cov_trace_cmp4",
1165 "__sanitizer_cov_trace_cmp8",
1166 "__sanitizer_cov_trace_switch",
1168 "kmsan_copy_to_user",
1169 "kmsan_disable_current",
1170 "kmsan_enable_current",
1172 "kmsan_unpoison_entry_regs",
1173 "kmsan_unpoison_memory",
1174 "__msan_chain_origin",
1175 "__msan_get_context_state",
1176 "__msan_instrument_asm_store",
1177 "__msan_metadata_ptr_for_load_1",
1178 "__msan_metadata_ptr_for_load_2",
1179 "__msan_metadata_ptr_for_load_4",
1180 "__msan_metadata_ptr_for_load_8",
1181 "__msan_metadata_ptr_for_load_n",
1182 "__msan_metadata_ptr_for_store_1",
1183 "__msan_metadata_ptr_for_store_2",
1184 "__msan_metadata_ptr_for_store_4",
1185 "__msan_metadata_ptr_for_store_8",
1186 "__msan_metadata_ptr_for_store_n",
1187 "__msan_poison_alloca",
1190 "ubsan_type_mismatch_common",
1191 "__ubsan_handle_type_mismatch",
1192 "__ubsan_handle_type_mismatch_v1",
1193 "__ubsan_handle_shift_out_of_bounds",
1194 "__ubsan_handle_load_invalid_value",
1196 "stackleak_track_stack",
1197 /* TRACE_BRANCH_PROFILING */
1198 "ftrace_likely_update",
1199 /* STACKPROTECTOR */
1202 "csum_partial_copy_generic",
1204 "copy_mc_fragile_handle_tail",
1205 "copy_mc_enhanced_fast_string",
1206 "rep_stos_alternative",
1207 "rep_movs_alternative",
1208 "__copy_user_nocache",
1212 static void add_uaccess_safe(struct objtool_file
*file
)
1214 struct symbol
*func
;
1220 for (name
= uaccess_safe_builtin
; *name
; name
++) {
1221 func
= find_symbol_by_name(file
->elf
, *name
);
1225 func
->uaccess_safe
= true;
1230 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
1231 * will be added to the .retpoline_sites section.
1233 __weak
bool arch_is_retpoline(struct symbol
*sym
)
1239 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
1240 * will be added to the .return_sites section.
1242 __weak
bool arch_is_rethunk(struct symbol
*sym
)
1248 * Symbols that are embedded inside other instructions, because sometimes crazy
1249 * code exists. These are mostly ignored for validation purposes.
1251 __weak
bool arch_is_embedded_insn(struct symbol
*sym
)
1256 static struct reloc
*insn_reloc(struct objtool_file
*file
, struct instruction
*insn
)
1258 struct reloc
*reloc
;
1266 reloc
= find_reloc_by_dest_range(file
->elf
, insn
->sec
,
1267 insn
->offset
, insn
->len
);
1276 static void remove_insn_ops(struct instruction
*insn
)
1278 struct stack_op
*op
, *next
;
1280 for (op
= insn
->stack_ops
; op
; op
= next
) {
1284 insn
->stack_ops
= NULL
;
1287 static int annotate_call_site(struct objtool_file
*file
,
1288 struct instruction
*insn
, bool sibling
)
1290 struct reloc
*reloc
= insn_reloc(file
, insn
);
1291 struct symbol
*sym
= insn_call_dest(insn
);
1296 if (sym
->static_call_tramp
) {
1297 list_add_tail(&insn
->call_node
, &file
->static_call_list
);
1301 if (sym
->retpoline_thunk
) {
1302 list_add_tail(&insn
->call_node
, &file
->retpoline_call_list
);
1307 * Many compilers cannot disable KCOV or sanitizer calls with a function
1308 * attribute so they need a little help, NOP out any such calls from
1311 if (opts
.hack_noinstr
&& insn
->sec
->noinstr
&& sym
->profiling_func
) {
1313 set_reloc_type(file
->elf
, reloc
, R_NONE
);
1315 if (elf_write_insn(file
->elf
, insn
->sec
,
1316 insn
->offset
, insn
->len
,
1317 sibling
? arch_ret_insn(insn
->len
)
1318 : arch_nop_insn(insn
->len
))) {
1322 insn
->type
= sibling
? INSN_RETURN
: INSN_NOP
;
1326 * We've replaced the tail-call JMP insn by two new
1327 * insn: RET; INT3, except we only have a single struct
1328 * insn here. Mark it retpoline_safe to avoid the SLS
1329 * warning, instead of adding another insn.
1331 insn
->retpoline_safe
= true;
1337 if (opts
.mcount
&& sym
->fentry
) {
1339 WARN_INSN(insn
, "tail call to __fentry__ !?!?");
1342 set_reloc_type(file
->elf
, reloc
, R_NONE
);
1344 if (elf_write_insn(file
->elf
, insn
->sec
,
1345 insn
->offset
, insn
->len
,
1346 arch_nop_insn(insn
->len
))) {
1350 insn
->type
= INSN_NOP
;
1353 list_add_tail(&insn
->call_node
, &file
->mcount_loc_list
);
1357 if (insn
->type
== INSN_CALL
&& !insn
->sec
->init
&&
1358 !insn
->_call_dest
->embedded_insn
)
1359 list_add_tail(&insn
->call_node
, &file
->call_list
);
1361 if (!sibling
&& dead_end_function(file
, sym
))
1362 insn
->dead_end
= true;
1367 static int add_call_dest(struct objtool_file
*file
, struct instruction
*insn
,
1368 struct symbol
*dest
, bool sibling
)
1370 insn
->_call_dest
= dest
;
1375 * Whatever stack impact regular CALLs have, should be undone
1376 * by the RETURN of the called function.
1378 * Annotated intra-function calls retain the stack_ops but
1379 * are converted to JUMP, see read_intra_function_calls().
1381 remove_insn_ops(insn
);
1383 return annotate_call_site(file
, insn
, sibling
);
1386 static int add_retpoline_call(struct objtool_file
*file
, struct instruction
*insn
)
1389 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1390 * so convert them accordingly.
1392 switch (insn
->type
) {
1394 insn
->type
= INSN_CALL_DYNAMIC
;
1396 case INSN_JUMP_UNCONDITIONAL
:
1397 insn
->type
= INSN_JUMP_DYNAMIC
;
1399 case INSN_JUMP_CONDITIONAL
:
1400 insn
->type
= INSN_JUMP_DYNAMIC_CONDITIONAL
;
1406 insn
->retpoline_safe
= true;
1409 * Whatever stack impact regular CALLs have, should be undone
1410 * by the RETURN of the called function.
1412 * Annotated intra-function calls retain the stack_ops but
1413 * are converted to JUMP, see read_intra_function_calls().
1415 remove_insn_ops(insn
);
1417 return annotate_call_site(file
, insn
, false);
1420 static void add_return_call(struct objtool_file
*file
, struct instruction
*insn
, bool add
)
1423 * Return thunk tail calls are really just returns in disguise,
1424 * so convert them accordingly.
1426 insn
->type
= INSN_RETURN
;
1427 insn
->retpoline_safe
= true;
1430 list_add_tail(&insn
->call_node
, &file
->return_thunk_list
);
1433 static bool is_first_func_insn(struct objtool_file
*file
,
1434 struct instruction
*insn
, struct symbol
*sym
)
1436 if (insn
->offset
== sym
->offset
)
1439 /* Allow direct CALL/JMP past ENDBR */
1441 struct instruction
*prev
= prev_insn_same_sym(file
, insn
);
1443 if (prev
&& prev
->type
== INSN_ENDBR
&&
1444 insn
->offset
== sym
->offset
+ prev
->len
)
1452 * A sibling call is a tail-call to another symbol -- to differentiate from a
1453 * recursive tail-call which is to the same symbol.
1455 static bool jump_is_sibling_call(struct objtool_file
*file
,
1456 struct instruction
*from
, struct instruction
*to
)
1458 struct symbol
*fs
= from
->sym
;
1459 struct symbol
*ts
= to
->sym
;
1461 /* Not a sibling call if from/to a symbol hole */
1465 /* Not a sibling call if not targeting the start of a symbol. */
1466 if (!is_first_func_insn(file
, to
, ts
))
1469 /* Disallow sibling calls into STT_NOTYPE */
1470 if (ts
->type
== STT_NOTYPE
)
1473 /* Must not be self to be a sibling */
1474 return fs
->pfunc
!= ts
->pfunc
;
1478 * Find the destination instructions for all jumps.
1480 static int add_jump_destinations(struct objtool_file
*file
)
1482 struct instruction
*insn
, *jump_dest
;
1483 struct reloc
*reloc
;
1484 struct section
*dest_sec
;
1485 unsigned long dest_off
;
1488 for_each_insn(file
, insn
) {
1489 struct symbol
*func
= insn_func(insn
);
1491 if (insn
->jump_dest
) {
1493 * handle_group_alt() may have previously set
1494 * 'jump_dest' for some alternatives.
1498 if (!is_static_jump(insn
))
1501 reloc
= insn_reloc(file
, insn
);
1503 dest_sec
= insn
->sec
;
1504 dest_off
= arch_jump_destination(insn
);
1505 } else if (reloc
->sym
->type
== STT_SECTION
) {
1506 dest_sec
= reloc
->sym
->sec
;
1507 dest_off
= arch_dest_reloc_offset(reloc_addend(reloc
));
1508 } else if (reloc
->sym
->retpoline_thunk
) {
1509 ret
= add_retpoline_call(file
, insn
);
1513 } else if (reloc
->sym
->return_thunk
) {
1514 add_return_call(file
, insn
, true);
1518 * External sibling call or internal sibling call with
1521 ret
= add_call_dest(file
, insn
, reloc
->sym
, true);
1525 } else if (reloc
->sym
->sec
->idx
) {
1526 dest_sec
= reloc
->sym
->sec
;
1527 dest_off
= reloc
->sym
->sym
.st_value
+
1528 arch_dest_reloc_offset(reloc_addend(reloc
));
1530 /* non-func asm code jumping to another file */
1534 jump_dest
= find_insn(file
, dest_sec
, dest_off
);
1536 struct symbol
*sym
= find_symbol_by_offset(dest_sec
, dest_off
);
1539 * This is a special case for retbleed_untrain_ret().
1540 * It jumps to __x86_return_thunk(), but objtool
1541 * can't find the thunk's starting RET
1542 * instruction, because the RET is also in the
1543 * middle of another instruction. Objtool only
1544 * knows about the outer instruction.
1546 if (sym
&& sym
->embedded_insn
) {
1547 add_return_call(file
, insn
, false);
1552 * GCOV/KCOV dead code can jump to the end of the
1555 if (file
->ignore_unreachables
&& func
&&
1556 dest_sec
== insn
->sec
&&
1557 dest_off
== func
->offset
+ func
->len
)
1560 ERROR_INSN(insn
, "can't find jump dest instruction at %s+0x%lx",
1561 dest_sec
->name
, dest_off
);
1566 * An intra-TU jump in retpoline.o might not have a relocation
1567 * for its jump dest, in which case the above
1568 * add_{retpoline,return}_call() didn't happen.
1570 if (jump_dest
->sym
&& jump_dest
->offset
== jump_dest
->sym
->offset
) {
1571 if (jump_dest
->sym
->retpoline_thunk
) {
1572 ret
= add_retpoline_call(file
, insn
);
1577 if (jump_dest
->sym
->return_thunk
) {
1578 add_return_call(file
, insn
, true);
1584 * Cross-function jump.
1586 if (func
&& insn_func(jump_dest
) && func
!= insn_func(jump_dest
)) {
1589 * For GCC 8+, create parent/child links for any cold
1590 * subfunctions. This is _mostly_ redundant with a
1591 * similar initialization in read_symbols().
1593 * If a function has aliases, we want the *first* such
1594 * function in the symbol table to be the subfunction's
1595 * parent. In that case we overwrite the
1596 * initialization done in read_symbols().
1598 * However this code can't completely replace the
1599 * read_symbols() code because this doesn't detect the
1600 * case where the parent function's only reference to a
1601 * subfunction is through a jump table.
1603 if (!strstr(func
->name
, ".cold") &&
1604 strstr(insn_func(jump_dest
)->name
, ".cold")) {
1605 func
->cfunc
= insn_func(jump_dest
);
1606 insn_func(jump_dest
)->pfunc
= func
;
1610 if (jump_is_sibling_call(file
, insn
, jump_dest
)) {
1612 * Internal sibling call without reloc or with
1613 * STT_SECTION reloc.
1615 ret
= add_call_dest(file
, insn
, insn_func(jump_dest
), true);
1621 insn
->jump_dest
= jump_dest
;
1627 static struct symbol
*find_call_destination(struct section
*sec
, unsigned long offset
)
1629 struct symbol
*call_dest
;
1631 call_dest
= find_func_by_offset(sec
, offset
);
1633 call_dest
= find_symbol_by_offset(sec
, offset
);
1639 * Find the destination instructions for all calls.
1641 static int add_call_destinations(struct objtool_file
*file
)
1643 struct instruction
*insn
;
1644 unsigned long dest_off
;
1645 struct symbol
*dest
;
1646 struct reloc
*reloc
;
1649 for_each_insn(file
, insn
) {
1650 struct symbol
*func
= insn_func(insn
);
1651 if (insn
->type
!= INSN_CALL
)
1654 reloc
= insn_reloc(file
, insn
);
1656 dest_off
= arch_jump_destination(insn
);
1657 dest
= find_call_destination(insn
->sec
, dest_off
);
1659 ret
= add_call_dest(file
, insn
, dest
, false);
1663 if (func
&& func
->ignore
)
1666 if (!insn_call_dest(insn
)) {
1667 ERROR_INSN(insn
, "unannotated intra-function call");
1671 if (func
&& insn_call_dest(insn
)->type
!= STT_FUNC
) {
1672 ERROR_INSN(insn
, "unsupported call to non-function");
1676 } else if (reloc
->sym
->type
== STT_SECTION
) {
1677 dest_off
= arch_dest_reloc_offset(reloc_addend(reloc
));
1678 dest
= find_call_destination(reloc
->sym
->sec
, dest_off
);
1680 ERROR_INSN(insn
, "can't find call dest symbol at %s+0x%lx",
1681 reloc
->sym
->sec
->name
, dest_off
);
1685 ret
= add_call_dest(file
, insn
, dest
, false);
1689 } else if (reloc
->sym
->retpoline_thunk
) {
1690 ret
= add_retpoline_call(file
, insn
);
1695 ret
= add_call_dest(file
, insn
, reloc
->sym
, false);
1705 * The .alternatives section requires some extra special care over and above
1706 * other special sections because alternatives are patched in place.
1708 static int handle_group_alt(struct objtool_file
*file
,
1709 struct special_alt
*special_alt
,
1710 struct instruction
*orig_insn
,
1711 struct instruction
**new_insn
)
1713 struct instruction
*last_new_insn
= NULL
, *insn
, *nop
= NULL
;
1714 struct alt_group
*orig_alt_group
, *new_alt_group
;
1715 unsigned long dest_off
;
1717 orig_alt_group
= orig_insn
->alt_group
;
1718 if (!orig_alt_group
) {
1719 struct instruction
*last_orig_insn
= NULL
;
1721 orig_alt_group
= calloc(1, sizeof(*orig_alt_group
));
1722 if (!orig_alt_group
) {
1723 ERROR_GLIBC("calloc");
1726 orig_alt_group
->cfi
= calloc(special_alt
->orig_len
,
1727 sizeof(struct cfi_state
*));
1728 if (!orig_alt_group
->cfi
) {
1729 ERROR_GLIBC("calloc");
1734 sec_for_each_insn_from(file
, insn
) {
1735 if (insn
->offset
>= special_alt
->orig_off
+ special_alt
->orig_len
)
1738 insn
->alt_group
= orig_alt_group
;
1739 last_orig_insn
= insn
;
1741 orig_alt_group
->orig_group
= NULL
;
1742 orig_alt_group
->first_insn
= orig_insn
;
1743 orig_alt_group
->last_insn
= last_orig_insn
;
1744 orig_alt_group
->nop
= NULL
;
1745 orig_alt_group
->ignore
= orig_insn
->ignore_alts
;
1747 if (orig_alt_group
->last_insn
->offset
+ orig_alt_group
->last_insn
->len
-
1748 orig_alt_group
->first_insn
->offset
!= special_alt
->orig_len
) {
1749 ERROR_INSN(orig_insn
, "weirdly overlapping alternative! %ld != %d",
1750 orig_alt_group
->last_insn
->offset
+
1751 orig_alt_group
->last_insn
->len
-
1752 orig_alt_group
->first_insn
->offset
,
1753 special_alt
->orig_len
);
1758 new_alt_group
= calloc(1, sizeof(*new_alt_group
));
1759 if (!new_alt_group
) {
1760 ERROR_GLIBC("calloc");
1764 if (special_alt
->new_len
< special_alt
->orig_len
) {
1766 * Insert a fake nop at the end to make the replacement
1767 * alt_group the same size as the original. This is needed to
1768 * allow propagate_alt_cfi() to do its magic. When the last
1769 * instruction affects the stack, the instruction after it (the
1770 * nop) will propagate the new state to the shared CFI array.
1772 nop
= calloc(1, sizeof(*nop
));
1774 ERROR_GLIBC("calloc");
1777 memset(nop
, 0, sizeof(*nop
));
1779 nop
->sec
= special_alt
->new_sec
;
1780 nop
->offset
= special_alt
->new_off
+ special_alt
->new_len
;
1781 nop
->len
= special_alt
->orig_len
- special_alt
->new_len
;
1782 nop
->type
= INSN_NOP
;
1783 nop
->sym
= orig_insn
->sym
;
1784 nop
->alt_group
= new_alt_group
;
1787 if (!special_alt
->new_len
) {
1793 sec_for_each_insn_from(file
, insn
) {
1794 struct reloc
*alt_reloc
;
1796 if (insn
->offset
>= special_alt
->new_off
+ special_alt
->new_len
)
1799 last_new_insn
= insn
;
1801 insn
->sym
= orig_insn
->sym
;
1802 insn
->alt_group
= new_alt_group
;
1805 * Since alternative replacement code is copy/pasted by the
1806 * kernel after applying relocations, generally such code can't
1807 * have relative-address relocation references to outside the
1808 * .altinstr_replacement section, unless the arch's
1809 * alternatives code can adjust the relative offsets
1812 alt_reloc
= insn_reloc(file
, insn
);
1813 if (alt_reloc
&& arch_pc_relative_reloc(alt_reloc
) &&
1814 !arch_support_alt_relocation(special_alt
, insn
, alt_reloc
)) {
1816 ERROR_INSN(insn
, "unsupported relocation in alternatives section");
1820 if (!is_static_jump(insn
))
1823 if (!insn
->immediate
)
1826 dest_off
= arch_jump_destination(insn
);
1827 if (dest_off
== special_alt
->new_off
+ special_alt
->new_len
) {
1828 insn
->jump_dest
= next_insn_same_sec(file
, orig_alt_group
->last_insn
);
1829 if (!insn
->jump_dest
) {
1830 ERROR_INSN(insn
, "can't find alternative jump destination");
1836 if (!last_new_insn
) {
1837 ERROR_FUNC(special_alt
->new_sec
, special_alt
->new_off
,
1838 "can't find last new alternative instruction");
1843 new_alt_group
->orig_group
= orig_alt_group
;
1844 new_alt_group
->first_insn
= *new_insn
;
1845 new_alt_group
->last_insn
= last_new_insn
;
1846 new_alt_group
->nop
= nop
;
1847 new_alt_group
->ignore
= (*new_insn
)->ignore_alts
;
1848 new_alt_group
->cfi
= orig_alt_group
->cfi
;
1853 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1854 * If the original instruction is a jump, make the alt entry an effective nop
1855 * by just skipping the original instruction.
1857 static int handle_jump_alt(struct objtool_file
*file
,
1858 struct special_alt
*special_alt
,
1859 struct instruction
*orig_insn
,
1860 struct instruction
**new_insn
)
1862 if (orig_insn
->type
!= INSN_JUMP_UNCONDITIONAL
&&
1863 orig_insn
->type
!= INSN_NOP
) {
1865 ERROR_INSN(orig_insn
, "unsupported instruction at jump label");
1869 if (opts
.hack_jump_label
&& special_alt
->key_addend
& 2) {
1870 struct reloc
*reloc
= insn_reloc(file
, orig_insn
);
1873 set_reloc_type(file
->elf
, reloc
, R_NONE
);
1875 if (elf_write_insn(file
->elf
, orig_insn
->sec
,
1876 orig_insn
->offset
, orig_insn
->len
,
1877 arch_nop_insn(orig_insn
->len
))) {
1881 orig_insn
->type
= INSN_NOP
;
1884 if (orig_insn
->type
== INSN_NOP
) {
1885 if (orig_insn
->len
== 2)
1886 file
->jl_nop_short
++;
1888 file
->jl_nop_long
++;
1893 if (orig_insn
->len
== 2)
1898 *new_insn
= next_insn_same_sec(file
, orig_insn
);
1903 * Read all the special sections which have alternate instructions which can be
1904 * patched in or redirected to at runtime. Each instruction having alternate
1905 * instruction(s) has them added to its insn->alts list, which will be
1906 * traversed in validate_branch().
1908 static int add_special_section_alts(struct objtool_file
*file
)
1910 struct list_head special_alts
;
1911 struct instruction
*orig_insn
, *new_insn
;
1912 struct special_alt
*special_alt
, *tmp
;
1913 struct alternative
*alt
;
1916 if (special_get_alts(file
->elf
, &special_alts
))
1919 list_for_each_entry_safe(special_alt
, tmp
, &special_alts
, list
) {
1921 orig_insn
= find_insn(file
, special_alt
->orig_sec
,
1922 special_alt
->orig_off
);
1924 ERROR_FUNC(special_alt
->orig_sec
, special_alt
->orig_off
,
1925 "special: can't find orig instruction");
1930 if (!special_alt
->group
|| special_alt
->new_len
) {
1931 new_insn
= find_insn(file
, special_alt
->new_sec
,
1932 special_alt
->new_off
);
1934 ERROR_FUNC(special_alt
->new_sec
, special_alt
->new_off
,
1935 "special: can't find new instruction");
1940 if (special_alt
->group
) {
1941 if (!special_alt
->orig_len
) {
1942 ERROR_INSN(orig_insn
, "empty alternative entry");
1946 ret
= handle_group_alt(file
, special_alt
, orig_insn
,
1951 } else if (special_alt
->jump_or_nop
) {
1952 ret
= handle_jump_alt(file
, special_alt
, orig_insn
,
1958 alt
= calloc(1, sizeof(*alt
));
1960 ERROR_GLIBC("calloc");
1964 alt
->insn
= new_insn
;
1965 alt
->next
= orig_insn
->alts
;
1966 orig_insn
->alts
= alt
;
1968 list_del(&special_alt
->list
);
1973 printf("jl\\\tNOP\tJMP\n");
1974 printf("short:\t%ld\t%ld\n", file
->jl_nop_short
, file
->jl_short
);
1975 printf("long:\t%ld\t%ld\n", file
->jl_nop_long
, file
->jl_long
);
1981 __weak
unsigned long arch_jump_table_sym_offset(struct reloc
*reloc
, struct reloc
*table
)
1983 return reloc
->sym
->offset
+ reloc_addend(reloc
);
1986 static int add_jump_table(struct objtool_file
*file
, struct instruction
*insn
)
1988 unsigned long table_size
= insn_jump_table_size(insn
);
1989 struct symbol
*pfunc
= insn_func(insn
)->pfunc
;
1990 struct reloc
*table
= insn_jump_table(insn
);
1991 struct instruction
*dest_insn
;
1992 unsigned int prev_offset
= 0;
1993 struct reloc
*reloc
= table
;
1994 struct alternative
*alt
;
1995 unsigned long sym_offset
;
1998 * Each @reloc is a switch table relocation which points to the target
2001 for_each_reloc_from(table
->sec
, reloc
) {
2003 /* Check for the end of the table: */
2004 if (table_size
&& reloc_offset(reloc
) - reloc_offset(table
) >= table_size
)
2006 if (reloc
!= table
&& is_jump_table(reloc
))
2009 /* Make sure the table entries are consecutive: */
2010 if (prev_offset
&& reloc_offset(reloc
) != prev_offset
+ arch_reloc_size(reloc
))
2013 sym_offset
= arch_jump_table_sym_offset(reloc
, table
);
2015 /* Detect function pointers from contiguous objects: */
2016 if (reloc
->sym
->sec
== pfunc
->sec
&& sym_offset
== pfunc
->offset
)
2020 * Clang sometimes leaves dangling unused jump table entries
2021 * which point to the end of the function. Ignore them.
2023 if (reloc
->sym
->sec
== pfunc
->sec
&&
2024 sym_offset
== pfunc
->offset
+ pfunc
->len
)
2027 dest_insn
= find_insn(file
, reloc
->sym
->sec
, sym_offset
);
2031 /* Make sure the destination is in the same function: */
2032 if (!insn_func(dest_insn
) || insn_func(dest_insn
)->pfunc
!= pfunc
)
2035 alt
= calloc(1, sizeof(*alt
));
2037 ERROR_GLIBC("calloc");
2041 alt
->insn
= dest_insn
;
2042 alt
->next
= insn
->alts
;
2045 prev_offset
= reloc_offset(reloc
);
2049 ERROR_INSN(insn
, "can't find switch jump table");
2057 * find_jump_table() - Given a dynamic jump, find the switch jump table
2058 * associated with it.
2060 static void find_jump_table(struct objtool_file
*file
, struct symbol
*func
,
2061 struct instruction
*insn
)
2063 struct reloc
*table_reloc
;
2064 struct instruction
*dest_insn
, *orig_insn
= insn
;
2065 unsigned long table_size
;
2066 unsigned long sym_offset
;
2069 * Backward search using the @first_jump_src links, these help avoid
2070 * much of the 'in between' code. Which avoids us getting confused by
2074 insn
&& insn_func(insn
) && insn_func(insn
)->pfunc
== func
;
2075 insn
= insn
->first_jump_src
?: prev_insn_same_sym(file
, insn
)) {
2077 if (insn
!= orig_insn
&& insn
->type
== INSN_JUMP_DYNAMIC
)
2080 /* allow small jumps within the range */
2081 if (insn
->type
== INSN_JUMP_UNCONDITIONAL
&&
2083 (insn
->jump_dest
->offset
<= insn
->offset
||
2084 insn
->jump_dest
->offset
> orig_insn
->offset
))
2087 table_reloc
= arch_find_switch_table(file
, insn
, &table_size
);
2091 sym_offset
= table_reloc
->sym
->offset
+ reloc_addend(table_reloc
);
2093 dest_insn
= find_insn(file
, table_reloc
->sym
->sec
, sym_offset
);
2094 if (!dest_insn
|| !insn_func(dest_insn
) || insn_func(dest_insn
)->pfunc
!= func
)
2097 set_jump_table(table_reloc
);
2098 orig_insn
->_jump_table
= table_reloc
;
2099 orig_insn
->_jump_table_size
= table_size
;
2106 * First pass: Mark the head of each jump table so that in the next pass,
2107 * we know when a given jump table ends and the next one starts.
2109 static void mark_func_jump_tables(struct objtool_file
*file
,
2110 struct symbol
*func
)
2112 struct instruction
*insn
, *last
= NULL
;
2114 func_for_each_insn(file
, func
, insn
) {
2119 * Store back-pointers for unconditional forward jumps such
2120 * that find_jump_table() can back-track using those and
2121 * avoid some potentially confusing code.
2123 if (insn
->type
== INSN_JUMP_UNCONDITIONAL
&& insn
->jump_dest
&&
2124 insn
->offset
> last
->offset
&&
2125 insn
->jump_dest
->offset
> insn
->offset
&&
2126 !insn
->jump_dest
->first_jump_src
) {
2128 insn
->jump_dest
->first_jump_src
= insn
;
2129 last
= insn
->jump_dest
;
2132 if (insn
->type
!= INSN_JUMP_DYNAMIC
)
2135 find_jump_table(file
, func
, insn
);
2139 static int add_func_jump_tables(struct objtool_file
*file
,
2140 struct symbol
*func
)
2142 struct instruction
*insn
;
2145 func_for_each_insn(file
, func
, insn
) {
2146 if (!insn_jump_table(insn
))
2149 ret
= add_jump_table(file
, insn
);
2158 * For some switch statements, gcc generates a jump table in the .rodata
2159 * section which contains a list of addresses within the function to jump to.
2160 * This finds these jump tables and adds them to the insn->alts lists.
2162 static int add_jump_table_alts(struct objtool_file
*file
)
2164 struct symbol
*func
;
2170 for_each_sym(file
, func
) {
2171 if (func
->type
!= STT_FUNC
)
2174 mark_func_jump_tables(file
, func
);
2175 ret
= add_func_jump_tables(file
, func
);
2183 static void set_func_state(struct cfi_state
*state
)
2185 state
->cfa
= initial_func_cfi
.cfa
;
2186 memcpy(&state
->regs
, &initial_func_cfi
.regs
,
2187 CFI_NUM_REGS
* sizeof(struct cfi_reg
));
2188 state
->stack_size
= initial_func_cfi
.cfa
.offset
;
2189 state
->type
= UNWIND_HINT_TYPE_CALL
;
2192 static int read_unwind_hints(struct objtool_file
*file
)
2194 struct cfi_state cfi
= init_cfi
;
2195 struct section
*sec
;
2196 struct unwind_hint
*hint
;
2197 struct instruction
*insn
;
2198 struct reloc
*reloc
;
2199 unsigned long offset
;
2202 sec
= find_section_by_name(file
->elf
, ".discard.unwind_hints");
2207 ERROR("missing .rela.discard.unwind_hints section");
2211 if (sec
->sh
.sh_size
% sizeof(struct unwind_hint
)) {
2212 ERROR("struct unwind_hint size mismatch");
2218 for (i
= 0; i
< sec
->sh
.sh_size
/ sizeof(struct unwind_hint
); i
++) {
2219 hint
= (struct unwind_hint
*)sec
->data
->d_buf
+ i
;
2221 reloc
= find_reloc_by_dest(file
->elf
, sec
, i
* sizeof(*hint
));
2223 ERROR("can't find reloc for unwind_hints[%d]", i
);
2227 if (reloc
->sym
->type
== STT_SECTION
) {
2228 offset
= reloc_addend(reloc
);
2229 } else if (reloc
->sym
->local_label
) {
2230 offset
= reloc
->sym
->offset
;
2232 ERROR("unexpected relocation symbol type in %s", sec
->rsec
->name
);
2236 insn
= find_insn(file
, reloc
->sym
->sec
, offset
);
2238 ERROR("can't find insn for unwind_hints[%d]", i
);
2244 if (hint
->type
== UNWIND_HINT_TYPE_UNDEFINED
) {
2245 insn
->cfi
= &force_undefined_cfi
;
2249 if (hint
->type
== UNWIND_HINT_TYPE_SAVE
) {
2255 if (hint
->type
== UNWIND_HINT_TYPE_RESTORE
) {
2256 insn
->restore
= true;
2260 if (hint
->type
== UNWIND_HINT_TYPE_REGS_PARTIAL
) {
2261 struct symbol
*sym
= find_symbol_by_offset(insn
->sec
, insn
->offset
);
2263 if (sym
&& sym
->bind
== STB_GLOBAL
) {
2264 if (opts
.ibt
&& insn
->type
!= INSN_ENDBR
&& !insn
->noendbr
) {
2265 ERROR_INSN(insn
, "UNWIND_HINT_IRET_REGS without ENDBR");
2271 if (hint
->type
== UNWIND_HINT_TYPE_FUNC
) {
2272 insn
->cfi
= &func_cfi
;
2279 if (arch_decode_hint_reg(hint
->sp_reg
, &cfi
.cfa
.base
)) {
2280 ERROR_INSN(insn
, "unsupported unwind_hint sp base reg %d", hint
->sp_reg
);
2284 cfi
.cfa
.offset
= bswap_if_needed(file
->elf
, hint
->sp_offset
);
2285 cfi
.type
= hint
->type
;
2286 cfi
.signal
= hint
->signal
;
2288 insn
->cfi
= cfi_hash_find_or_add(&cfi
);
2294 static int read_annotate(struct objtool_file
*file
,
2295 int (*func
)(struct objtool_file
*file
, int type
, struct instruction
*insn
))
2297 struct section
*sec
;
2298 struct instruction
*insn
;
2299 struct reloc
*reloc
;
2303 sec
= find_section_by_name(file
->elf
, ".discard.annotate_insn");
2310 if (sec
->sh
.sh_entsize
!= 8) {
2311 static bool warned
= false;
2312 if (!warned
&& opts
.verbose
) {
2313 WARN("%s: dodgy linker, sh_entsize != 8", sec
->name
);
2316 sec
->sh
.sh_entsize
= 8;
2319 for_each_reloc(sec
->rsec
, reloc
) {
2320 type
= *(u32
*)(sec
->data
->d_buf
+ (reloc_idx(reloc
) * sec
->sh
.sh_entsize
) + 4);
2322 offset
= reloc
->sym
->offset
+ reloc_addend(reloc
);
2323 insn
= find_insn(file
, reloc
->sym
->sec
, offset
);
2326 ERROR("bad .discard.annotate_insn entry: %d of type %d", reloc_idx(reloc
), type
);
2330 ret
= func(file
, type
, insn
);
2338 static int __annotate_early(struct objtool_file
*file
, int type
, struct instruction
*insn
)
2342 /* Must be before add_special_section_alts() */
2343 case ANNOTYPE_IGNORE_ALTS
:
2344 insn
->ignore_alts
= true;
2348 * Must be before read_unwind_hints() since that needs insn->noendbr.
2350 case ANNOTYPE_NOENDBR
:
2361 static int __annotate_ifc(struct objtool_file
*file
, int type
, struct instruction
*insn
)
2363 unsigned long dest_off
;
2365 if (type
!= ANNOTYPE_INTRA_FUNCTION_CALL
)
2368 if (insn
->type
!= INSN_CALL
) {
2369 ERROR_INSN(insn
, "intra_function_call not a direct call");
2374 * Treat intra-function CALLs as JMPs, but with a stack_op.
2375 * See add_call_destinations(), which strips stack_ops from
2378 insn
->type
= INSN_JUMP_UNCONDITIONAL
;
2380 dest_off
= arch_jump_destination(insn
);
2381 insn
->jump_dest
= find_insn(file
, insn
->sec
, dest_off
);
2382 if (!insn
->jump_dest
) {
2383 ERROR_INSN(insn
, "can't find call dest at %s+0x%lx",
2384 insn
->sec
->name
, dest_off
);
2391 static int __annotate_late(struct objtool_file
*file
, int type
, struct instruction
*insn
)
2394 case ANNOTYPE_NOENDBR
:
2398 case ANNOTYPE_RETPOLINE_SAFE
:
2399 if (insn
->type
!= INSN_JUMP_DYNAMIC
&&
2400 insn
->type
!= INSN_CALL_DYNAMIC
&&
2401 insn
->type
!= INSN_RETURN
&&
2402 insn
->type
!= INSN_NOP
) {
2403 ERROR_INSN(insn
, "retpoline_safe hint not an indirect jump/call/ret/nop");
2407 insn
->retpoline_safe
= true;
2410 case ANNOTYPE_INSTR_BEGIN
:
2414 case ANNOTYPE_INSTR_END
:
2418 case ANNOTYPE_UNRET_BEGIN
:
2422 case ANNOTYPE_IGNORE_ALTS
:
2426 case ANNOTYPE_INTRA_FUNCTION_CALL
:
2430 case ANNOTYPE_REACHABLE
:
2431 insn
->dead_end
= false;
2435 ERROR_INSN(insn
, "Unknown annotation type: %d", type
);
2443 * Return true if name matches an instrumentation function, where calls to that
2444 * function from noinstr code can safely be removed, but compilers won't do so.
2446 static bool is_profiling_func(const char *name
)
2449 * Many compilers cannot disable KCOV with a function attribute.
2451 if (!strncmp(name
, "__sanitizer_cov_", 16))
2455 * Some compilers currently do not remove __tsan_func_entry/exit nor
2456 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2457 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2458 * minimum Clang version is 14.0, this can be removed.
2460 if (!strncmp(name
, "__tsan_func_", 12) ||
2461 !strcmp(name
, "__tsan_atomic_signal_fence"))
2467 static int classify_symbols(struct objtool_file
*file
)
2469 struct symbol
*func
;
2471 for_each_sym(file
, func
) {
2472 if (func
->type
== STT_NOTYPE
&& strstarts(func
->name
, ".L"))
2473 func
->local_label
= true;
2475 if (func
->bind
!= STB_GLOBAL
)
2478 if (!strncmp(func
->name
, STATIC_CALL_TRAMP_PREFIX_STR
,
2479 strlen(STATIC_CALL_TRAMP_PREFIX_STR
)))
2480 func
->static_call_tramp
= true;
2482 if (arch_is_retpoline(func
))
2483 func
->retpoline_thunk
= true;
2485 if (arch_is_rethunk(func
))
2486 func
->return_thunk
= true;
2488 if (arch_is_embedded_insn(func
))
2489 func
->embedded_insn
= true;
2491 if (arch_ftrace_match(func
->name
))
2492 func
->fentry
= true;
2494 if (is_profiling_func(func
->name
))
2495 func
->profiling_func
= true;
2501 static void mark_rodata(struct objtool_file
*file
)
2503 struct section
*sec
;
2507 * Search for the following rodata sections, each of which can
2508 * potentially contain jump tables:
2510 * - .rodata: can contain GCC switch tables
2511 * - .rodata.<func>: same, if -fdata-sections is being used
2512 * - .data.rel.ro.c_jump_table: contains C annotated jump tables
2514 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2516 for_each_sec(file
, sec
) {
2517 if ((!strncmp(sec
->name
, ".rodata", 7) &&
2518 !strstr(sec
->name
, ".str1.")) ||
2519 !strncmp(sec
->name
, ".data.rel.ro", 12)) {
2525 file
->rodata
= found
;
2528 static int decode_sections(struct objtool_file
*file
)
2534 ret
= init_pv_ops(file
);
2539 * Must be before add_{jump_call}_destination.
2541 ret
= classify_symbols(file
);
2545 ret
= decode_instructions(file
);
2549 ret
= add_ignores(file
);
2553 add_uaccess_safe(file
);
2555 ret
= read_annotate(file
, __annotate_early
);
2560 * Must be before add_jump_destinations(), which depends on 'func'
2561 * being set for alternatives, to enable proper sibling call detection.
2563 if (opts
.stackval
|| opts
.orc
|| opts
.uaccess
|| opts
.noinstr
) {
2564 ret
= add_special_section_alts(file
);
2569 ret
= add_jump_destinations(file
);
2574 * Must be before add_call_destination(); it changes INSN_CALL to
2577 ret
= read_annotate(file
, __annotate_ifc
);
2581 ret
= add_call_destinations(file
);
2585 ret
= add_jump_table_alts(file
);
2589 ret
= read_unwind_hints(file
);
2594 * Must be after add_call_destinations() such that it can override
2595 * dead_end_function() marks.
2597 ret
= read_annotate(file
, __annotate_late
);
2604 static bool is_special_call(struct instruction
*insn
)
2606 if (insn
->type
== INSN_CALL
) {
2607 struct symbol
*dest
= insn_call_dest(insn
);
2612 if (dest
->fentry
|| dest
->embedded_insn
)
2619 static bool has_modified_stack_frame(struct instruction
*insn
, struct insn_state
*state
)
2621 struct cfi_state
*cfi
= &state
->cfi
;
2624 if (cfi
->cfa
.base
!= initial_func_cfi
.cfa
.base
|| cfi
->drap
)
2627 if (cfi
->cfa
.offset
!= initial_func_cfi
.cfa
.offset
)
2630 if (cfi
->stack_size
!= initial_func_cfi
.cfa
.offset
)
2633 for (i
= 0; i
< CFI_NUM_REGS
; i
++) {
2634 if (cfi
->regs
[i
].base
!= initial_func_cfi
.regs
[i
].base
||
2635 cfi
->regs
[i
].offset
!= initial_func_cfi
.regs
[i
].offset
)
2642 static bool check_reg_frame_pos(const struct cfi_reg
*reg
,
2643 int expected_offset
)
2645 return reg
->base
== CFI_CFA
&&
2646 reg
->offset
== expected_offset
;
2649 static bool has_valid_stack_frame(struct insn_state
*state
)
2651 struct cfi_state
*cfi
= &state
->cfi
;
2653 if (cfi
->cfa
.base
== CFI_BP
&&
2654 check_reg_frame_pos(&cfi
->regs
[CFI_BP
], -cfi
->cfa
.offset
) &&
2655 check_reg_frame_pos(&cfi
->regs
[CFI_RA
], -cfi
->cfa
.offset
+ 8))
2658 if (cfi
->drap
&& cfi
->regs
[CFI_BP
].base
== CFI_BP
)
2664 static int update_cfi_state_regs(struct instruction
*insn
,
2665 struct cfi_state
*cfi
,
2666 struct stack_op
*op
)
2668 struct cfi_reg
*cfa
= &cfi
->cfa
;
2670 if (cfa
->base
!= CFI_SP
&& cfa
->base
!= CFI_SP_INDIRECT
)
2674 if (op
->dest
.type
== OP_DEST_PUSH
|| op
->dest
.type
== OP_DEST_PUSHF
)
2678 if (op
->src
.type
== OP_SRC_POP
|| op
->src
.type
== OP_SRC_POPF
)
2681 /* add immediate to sp */
2682 if (op
->dest
.type
== OP_DEST_REG
&& op
->src
.type
== OP_SRC_ADD
&&
2683 op
->dest
.reg
== CFI_SP
&& op
->src
.reg
== CFI_SP
)
2684 cfa
->offset
-= op
->src
.offset
;
2689 static void save_reg(struct cfi_state
*cfi
, unsigned char reg
, int base
, int offset
)
2691 if (arch_callee_saved_reg(reg
) &&
2692 cfi
->regs
[reg
].base
== CFI_UNDEFINED
) {
2693 cfi
->regs
[reg
].base
= base
;
2694 cfi
->regs
[reg
].offset
= offset
;
2698 static void restore_reg(struct cfi_state
*cfi
, unsigned char reg
)
2700 cfi
->regs
[reg
].base
= initial_func_cfi
.regs
[reg
].base
;
2701 cfi
->regs
[reg
].offset
= initial_func_cfi
.regs
[reg
].offset
;
2705 * A note about DRAP stack alignment:
2707 * GCC has the concept of a DRAP register, which is used to help keep track of
2708 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2709 * register. The typical DRAP pattern is:
2711 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2712 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2713 * 41 ff 72 f8 pushq -0x8(%r10)
2715 * 48 89 e5 mov %rsp,%rbp
2722 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2725 * There are some variations in the epilogues, like:
2733 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2738 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2739 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2740 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2741 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2743 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2746 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2747 * restored beforehand:
2750 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2751 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2753 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2757 static int update_cfi_state(struct instruction
*insn
,
2758 struct instruction
*next_insn
,
2759 struct cfi_state
*cfi
, struct stack_op
*op
)
2761 struct cfi_reg
*cfa
= &cfi
->cfa
;
2762 struct cfi_reg
*regs
= cfi
->regs
;
2764 /* ignore UNWIND_HINT_UNDEFINED regions */
2765 if (cfi
->force_undefined
)
2768 /* stack operations don't make sense with an undefined CFA */
2769 if (cfa
->base
== CFI_UNDEFINED
) {
2770 if (insn_func(insn
)) {
2771 WARN_INSN(insn
, "undefined stack state");
2777 if (cfi
->type
== UNWIND_HINT_TYPE_REGS
||
2778 cfi
->type
== UNWIND_HINT_TYPE_REGS_PARTIAL
)
2779 return update_cfi_state_regs(insn
, cfi
, op
);
2781 switch (op
->dest
.type
) {
2784 switch (op
->src
.type
) {
2787 if (op
->src
.reg
== CFI_SP
&& op
->dest
.reg
== CFI_BP
&&
2788 cfa
->base
== CFI_SP
&&
2789 check_reg_frame_pos(®s
[CFI_BP
], -cfa
->offset
)) {
2791 /* mov %rsp, %rbp */
2792 cfa
->base
= op
->dest
.reg
;
2793 cfi
->bp_scratch
= false;
2796 else if (op
->src
.reg
== CFI_SP
&&
2797 op
->dest
.reg
== CFI_BP
&& cfi
->drap
) {
2799 /* drap: mov %rsp, %rbp */
2800 regs
[CFI_BP
].base
= CFI_BP
;
2801 regs
[CFI_BP
].offset
= -cfi
->stack_size
;
2802 cfi
->bp_scratch
= false;
2805 else if (op
->src
.reg
== CFI_SP
&& cfa
->base
== CFI_SP
) {
2810 * This is needed for the rare case where GCC
2817 cfi
->vals
[op
->dest
.reg
].base
= CFI_CFA
;
2818 cfi
->vals
[op
->dest
.reg
].offset
= -cfi
->stack_size
;
2821 else if (op
->src
.reg
== CFI_BP
&& op
->dest
.reg
== CFI_SP
&&
2822 (cfa
->base
== CFI_BP
|| cfa
->base
== cfi
->drap_reg
)) {
2827 * Restore the original stack pointer (Clang).
2829 cfi
->stack_size
= -cfi
->regs
[CFI_BP
].offset
;
2832 else if (op
->dest
.reg
== cfa
->base
) {
2834 /* mov %reg, %rsp */
2835 if (cfa
->base
== CFI_SP
&&
2836 cfi
->vals
[op
->src
.reg
].base
== CFI_CFA
) {
2839 * This is needed for the rare case
2840 * where GCC does something dumb like:
2842 * lea 0x8(%rsp), %rcx
2846 cfa
->offset
= -cfi
->vals
[op
->src
.reg
].offset
;
2847 cfi
->stack_size
= cfa
->offset
;
2849 } else if (cfa
->base
== CFI_SP
&&
2850 cfi
->vals
[op
->src
.reg
].base
== CFI_SP_INDIRECT
&&
2851 cfi
->vals
[op
->src
.reg
].offset
== cfa
->offset
) {
2856 * 1: mov %rsp, (%[tos])
2857 * 2: mov %[tos], %rsp
2863 * 1 - places a pointer to the previous
2864 * stack at the Top-of-Stack of the
2867 * 2 - switches to the new stack.
2869 * 3 - pops the Top-of-Stack to restore
2870 * the original stack.
2872 * Note: we set base to SP_INDIRECT
2873 * here and preserve offset. Therefore
2874 * when the unwinder reaches ToS it
2875 * will dereference SP and then add the
2876 * offset to find the next frame, IOW:
2879 cfa
->base
= CFI_SP_INDIRECT
;
2882 cfa
->base
= CFI_UNDEFINED
;
2887 else if (op
->dest
.reg
== CFI_SP
&&
2888 cfi
->vals
[op
->src
.reg
].base
== CFI_SP_INDIRECT
&&
2889 cfi
->vals
[op
->src
.reg
].offset
== cfa
->offset
) {
2892 * The same stack swizzle case 2) as above. But
2893 * because we can't change cfa->base, case 3)
2894 * will become a regular POP. Pretend we're a
2895 * PUSH so things don't go unbalanced.
2897 cfi
->stack_size
+= 8;
2904 if (op
->dest
.reg
== CFI_SP
&& op
->src
.reg
== CFI_SP
) {
2907 cfi
->stack_size
-= op
->src
.offset
;
2908 if (cfa
->base
== CFI_SP
)
2909 cfa
->offset
-= op
->src
.offset
;
2913 if (op
->dest
.reg
== CFI_BP
&& op
->src
.reg
== CFI_SP
&&
2914 insn
->sym
->frame_pointer
) {
2915 /* addi.d fp,sp,imm on LoongArch */
2916 if (cfa
->base
== CFI_SP
&& cfa
->offset
== op
->src
.offset
) {
2923 if (op
->dest
.reg
== CFI_SP
&& op
->src
.reg
== CFI_BP
) {
2924 /* addi.d sp,fp,imm on LoongArch */
2925 if (cfa
->base
== CFI_BP
&& cfa
->offset
== 0) {
2926 if (insn
->sym
->frame_pointer
) {
2928 cfa
->offset
= -op
->src
.offset
;
2931 /* lea disp(%rbp), %rsp */
2932 cfi
->stack_size
= -(op
->src
.offset
+ regs
[CFI_BP
].offset
);
2937 if (op
->src
.reg
== CFI_SP
&& cfa
->base
== CFI_SP
) {
2939 /* drap: lea disp(%rsp), %drap */
2940 cfi
->drap_reg
= op
->dest
.reg
;
2943 * lea disp(%rsp), %reg
2945 * This is needed for the rare case where GCC
2946 * does something dumb like:
2948 * lea 0x8(%rsp), %rcx
2952 cfi
->vals
[op
->dest
.reg
].base
= CFI_CFA
;
2953 cfi
->vals
[op
->dest
.reg
].offset
= \
2954 -cfi
->stack_size
+ op
->src
.offset
;
2959 if (cfi
->drap
&& op
->dest
.reg
== CFI_SP
&&
2960 op
->src
.reg
== cfi
->drap_reg
) {
2962 /* drap: lea disp(%drap), %rsp */
2964 cfa
->offset
= cfi
->stack_size
= -op
->src
.offset
;
2965 cfi
->drap_reg
= CFI_UNDEFINED
;
2970 if (op
->dest
.reg
== cfi
->cfa
.base
&& !(next_insn
&& next_insn
->hint
)) {
2971 WARN_INSN(insn
, "unsupported stack register modification");
2978 if (op
->dest
.reg
!= CFI_SP
||
2979 (cfi
->drap_reg
!= CFI_UNDEFINED
&& cfa
->base
!= CFI_SP
) ||
2980 (cfi
->drap_reg
== CFI_UNDEFINED
&& cfa
->base
!= CFI_BP
)) {
2981 WARN_INSN(insn
, "unsupported stack pointer realignment");
2985 if (cfi
->drap_reg
!= CFI_UNDEFINED
) {
2986 /* drap: and imm, %rsp */
2987 cfa
->base
= cfi
->drap_reg
;
2988 cfa
->offset
= cfi
->stack_size
= 0;
2993 * Older versions of GCC (4.8ish) realign the stack
2994 * without DRAP, with a frame pointer.
3001 if (op
->dest
.reg
== CFI_SP
&& cfa
->base
== CFI_SP_INDIRECT
) {
3003 /* pop %rsp; # restore from a stack swizzle */
3008 if (!cfi
->drap
&& op
->dest
.reg
== cfa
->base
) {
3014 if (cfi
->drap
&& cfa
->base
== CFI_BP_INDIRECT
&&
3015 op
->dest
.reg
== cfi
->drap_reg
&&
3016 cfi
->drap_offset
== -cfi
->stack_size
) {
3018 /* drap: pop %drap */
3019 cfa
->base
= cfi
->drap_reg
;
3021 cfi
->drap_offset
= -1;
3023 } else if (cfi
->stack_size
== -regs
[op
->dest
.reg
].offset
) {
3026 restore_reg(cfi
, op
->dest
.reg
);
3029 cfi
->stack_size
-= 8;
3030 if (cfa
->base
== CFI_SP
)
3035 case OP_SRC_REG_INDIRECT
:
3036 if (!cfi
->drap
&& op
->dest
.reg
== cfa
->base
&&
3037 op
->dest
.reg
== CFI_BP
) {
3039 /* mov disp(%rsp), %rbp */
3041 cfa
->offset
= cfi
->stack_size
;
3044 if (cfi
->drap
&& op
->src
.reg
== CFI_BP
&&
3045 op
->src
.offset
== cfi
->drap_offset
) {
3047 /* drap: mov disp(%rbp), %drap */
3048 cfa
->base
= cfi
->drap_reg
;
3050 cfi
->drap_offset
= -1;
3053 if (cfi
->drap
&& op
->src
.reg
== CFI_BP
&&
3054 op
->src
.offset
== regs
[op
->dest
.reg
].offset
) {
3056 /* drap: mov disp(%rbp), %reg */
3057 restore_reg(cfi
, op
->dest
.reg
);
3059 } else if (op
->src
.reg
== cfa
->base
&&
3060 op
->src
.offset
== regs
[op
->dest
.reg
].offset
+ cfa
->offset
) {
3062 /* mov disp(%rbp), %reg */
3063 /* mov disp(%rsp), %reg */
3064 restore_reg(cfi
, op
->dest
.reg
);
3066 } else if (op
->src
.reg
== CFI_SP
&&
3067 op
->src
.offset
== regs
[op
->dest
.reg
].offset
+ cfi
->stack_size
) {
3069 /* mov disp(%rsp), %reg */
3070 restore_reg(cfi
, op
->dest
.reg
);
3076 WARN_INSN(insn
, "unknown stack-related instruction");
3084 cfi
->stack_size
+= 8;
3085 if (cfa
->base
== CFI_SP
)
3088 if (op
->src
.type
!= OP_SRC_REG
)
3092 if (op
->src
.reg
== cfa
->base
&& op
->src
.reg
== cfi
->drap_reg
) {
3094 /* drap: push %drap */
3095 cfa
->base
= CFI_BP_INDIRECT
;
3096 cfa
->offset
= -cfi
->stack_size
;
3098 /* save drap so we know when to restore it */
3099 cfi
->drap_offset
= -cfi
->stack_size
;
3101 } else if (op
->src
.reg
== CFI_BP
&& cfa
->base
== cfi
->drap_reg
) {
3103 /* drap: push %rbp */
3104 cfi
->stack_size
= 0;
3108 /* drap: push %reg */
3109 save_reg(cfi
, op
->src
.reg
, CFI_BP
, -cfi
->stack_size
);
3115 save_reg(cfi
, op
->src
.reg
, CFI_CFA
, -cfi
->stack_size
);
3118 /* detect when asm code uses rbp as a scratch register */
3119 if (opts
.stackval
&& insn_func(insn
) && op
->src
.reg
== CFI_BP
&&
3120 cfa
->base
!= CFI_BP
)
3121 cfi
->bp_scratch
= true;
3124 case OP_DEST_REG_INDIRECT
:
3127 if (op
->src
.reg
== cfa
->base
&& op
->src
.reg
== cfi
->drap_reg
) {
3129 /* drap: mov %drap, disp(%rbp) */
3130 cfa
->base
= CFI_BP_INDIRECT
;
3131 cfa
->offset
= op
->dest
.offset
;
3133 /* save drap offset so we know when to restore it */
3134 cfi
->drap_offset
= op
->dest
.offset
;
3137 /* drap: mov reg, disp(%rbp) */
3138 save_reg(cfi
, op
->src
.reg
, CFI_BP
, op
->dest
.offset
);
3141 } else if (op
->dest
.reg
== cfa
->base
) {
3143 /* mov reg, disp(%rbp) */
3144 /* mov reg, disp(%rsp) */
3145 save_reg(cfi
, op
->src
.reg
, CFI_CFA
,
3146 op
->dest
.offset
- cfi
->cfa
.offset
);
3148 } else if (op
->dest
.reg
== CFI_SP
) {
3150 /* mov reg, disp(%rsp) */
3151 save_reg(cfi
, op
->src
.reg
, CFI_CFA
,
3152 op
->dest
.offset
- cfi
->stack_size
);
3154 } else if (op
->src
.reg
== CFI_SP
&& op
->dest
.offset
== 0) {
3156 /* mov %rsp, (%reg); # setup a stack swizzle. */
3157 cfi
->vals
[op
->dest
.reg
].base
= CFI_SP_INDIRECT
;
3158 cfi
->vals
[op
->dest
.reg
].offset
= cfa
->offset
;
3164 if (op
->src
.type
!= OP_SRC_POP
&& op
->src
.type
!= OP_SRC_POPF
) {
3165 WARN_INSN(insn
, "unknown stack-related memory operation");
3170 cfi
->stack_size
-= 8;
3171 if (cfa
->base
== CFI_SP
)
3177 WARN_INSN(insn
, "unknown stack-related instruction");
3185 * The stack layouts of alternatives instructions can sometimes diverge when
3186 * they have stack modifications. That's fine as long as the potential stack
3187 * layouts don't conflict at any given potential instruction boundary.
3189 * Flatten the CFIs of the different alternative code streams (both original
3190 * and replacement) into a single shared CFI array which can be used to detect
3191 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3193 static int propagate_alt_cfi(struct objtool_file
*file
, struct instruction
*insn
)
3195 struct cfi_state
**alt_cfi
;
3198 if (!insn
->alt_group
)
3202 WARN("CFI missing");
3206 alt_cfi
= insn
->alt_group
->cfi
;
3207 group_off
= insn
->offset
- insn
->alt_group
->first_insn
->offset
;
3209 if (!alt_cfi
[group_off
]) {
3210 alt_cfi
[group_off
] = insn
->cfi
;
3212 if (cficmp(alt_cfi
[group_off
], insn
->cfi
)) {
3213 struct alt_group
*orig_group
= insn
->alt_group
->orig_group
?: insn
->alt_group
;
3214 struct instruction
*orig
= orig_group
->first_insn
;
3215 WARN_INSN(orig
, "stack layout conflict in alternatives: %s",
3216 offstr(insn
->sec
, insn
->offset
));
3224 static int handle_insn_ops(struct instruction
*insn
,
3225 struct instruction
*next_insn
,
3226 struct insn_state
*state
)
3228 struct stack_op
*op
;
3231 for (op
= insn
->stack_ops
; op
; op
= op
->next
) {
3233 ret
= update_cfi_state(insn
, next_insn
, &state
->cfi
, op
);
3237 if (!opts
.uaccess
|| !insn
->alt_group
)
3240 if (op
->dest
.type
== OP_DEST_PUSHF
) {
3241 if (!state
->uaccess_stack
) {
3242 state
->uaccess_stack
= 1;
3243 } else if (state
->uaccess_stack
>> 31) {
3244 WARN_INSN(insn
, "PUSHF stack exhausted");
3247 state
->uaccess_stack
<<= 1;
3248 state
->uaccess_stack
|= state
->uaccess
;
3251 if (op
->src
.type
== OP_SRC_POPF
) {
3252 if (state
->uaccess_stack
) {
3253 state
->uaccess
= state
->uaccess_stack
& 1;
3254 state
->uaccess_stack
>>= 1;
3255 if (state
->uaccess_stack
== 1)
3256 state
->uaccess_stack
= 0;
3264 static bool insn_cfi_match(struct instruction
*insn
, struct cfi_state
*cfi2
)
3266 struct cfi_state
*cfi1
= insn
->cfi
;
3270 WARN("CFI missing");
3274 if (memcmp(&cfi1
->cfa
, &cfi2
->cfa
, sizeof(cfi1
->cfa
))) {
3276 WARN_INSN(insn
, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3277 cfi1
->cfa
.base
, cfi1
->cfa
.offset
,
3278 cfi2
->cfa
.base
, cfi2
->cfa
.offset
);
3283 if (memcmp(&cfi1
->regs
, &cfi2
->regs
, sizeof(cfi1
->regs
))) {
3284 for (i
= 0; i
< CFI_NUM_REGS
; i
++) {
3286 if (!memcmp(&cfi1
->regs
[i
], &cfi2
->regs
[i
], sizeof(struct cfi_reg
)))
3289 WARN_INSN(insn
, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3290 i
, cfi1
->regs
[i
].base
, cfi1
->regs
[i
].offset
,
3291 i
, cfi2
->regs
[i
].base
, cfi2
->regs
[i
].offset
);
3296 if (cfi1
->type
!= cfi2
->type
) {
3298 WARN_INSN(insn
, "stack state mismatch: type1=%d type2=%d",
3299 cfi1
->type
, cfi2
->type
);
3303 if (cfi1
->drap
!= cfi2
->drap
||
3304 (cfi1
->drap
&& cfi1
->drap_reg
!= cfi2
->drap_reg
) ||
3305 (cfi1
->drap
&& cfi1
->drap_offset
!= cfi2
->drap_offset
)) {
3307 WARN_INSN(insn
, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3308 cfi1
->drap
, cfi1
->drap_reg
, cfi1
->drap_offset
,
3309 cfi2
->drap
, cfi2
->drap_reg
, cfi2
->drap_offset
);
3316 static inline bool func_uaccess_safe(struct symbol
*func
)
3319 return func
->uaccess_safe
;
3324 static inline const char *call_dest_name(struct instruction
*insn
)
3326 static char pvname
[19];
3327 struct reloc
*reloc
;
3330 if (insn_call_dest(insn
))
3331 return insn_call_dest(insn
)->name
;
3333 reloc
= insn_reloc(NULL
, insn
);
3334 if (reloc
&& !strcmp(reloc
->sym
->name
, "pv_ops")) {
3335 idx
= (reloc_addend(reloc
) / sizeof(void *));
3336 snprintf(pvname
, sizeof(pvname
), "pv_ops[%d]", idx
);
3343 static bool pv_call_dest(struct objtool_file
*file
, struct instruction
*insn
)
3345 struct symbol
*target
;
3346 struct reloc
*reloc
;
3349 reloc
= insn_reloc(file
, insn
);
3350 if (!reloc
|| strcmp(reloc
->sym
->name
, "pv_ops"))
3353 idx
= (arch_dest_reloc_offset(reloc_addend(reloc
)) / sizeof(void *));
3355 if (file
->pv_ops
[idx
].clean
)
3358 file
->pv_ops
[idx
].clean
= true;
3360 list_for_each_entry(target
, &file
->pv_ops
[idx
].targets
, pv_target
) {
3361 if (!target
->sec
->noinstr
) {
3362 WARN("pv_ops[%d]: %s", idx
, target
->name
);
3363 file
->pv_ops
[idx
].clean
= false;
3367 return file
->pv_ops
[idx
].clean
;
3370 static inline bool noinstr_call_dest(struct objtool_file
*file
,
3371 struct instruction
*insn
,
3372 struct symbol
*func
)
3375 * We can't deal with indirect function calls at present;
3376 * assume they're instrumented.
3380 return pv_call_dest(file
, insn
);
3386 * If the symbol is from a noinstr section; we good.
3388 if (func
->sec
->noinstr
)
3392 * If the symbol is a static_call trampoline, we can't tell.
3394 if (func
->static_call_tramp
)
3398 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3399 * something 'BAD' happened. At the risk of taking the machine down,
3400 * let them proceed to get the message out.
3402 if (!strncmp(func
->name
, "__ubsan_handle_", 15))
3408 static int validate_call(struct objtool_file
*file
,
3409 struct instruction
*insn
,
3410 struct insn_state
*state
)
3412 if (state
->noinstr
&& state
->instr
<= 0 &&
3413 !noinstr_call_dest(file
, insn
, insn_call_dest(insn
))) {
3414 WARN_INSN(insn
, "call to %s() leaves .noinstr.text section", call_dest_name(insn
));
3418 if (state
->uaccess
&& !func_uaccess_safe(insn_call_dest(insn
))) {
3419 WARN_INSN(insn
, "call to %s() with UACCESS enabled", call_dest_name(insn
));
3424 WARN_INSN(insn
, "call to %s() with DF set", call_dest_name(insn
));
3431 static int validate_sibling_call(struct objtool_file
*file
,
3432 struct instruction
*insn
,
3433 struct insn_state
*state
)
3435 if (insn_func(insn
) && has_modified_stack_frame(insn
, state
)) {
3436 WARN_INSN(insn
, "sibling call from callable instruction with modified stack frame");
3440 return validate_call(file
, insn
, state
);
3443 static int validate_return(struct symbol
*func
, struct instruction
*insn
, struct insn_state
*state
)
3445 if (state
->noinstr
&& state
->instr
> 0) {
3446 WARN_INSN(insn
, "return with instrumentation enabled");
3450 if (state
->uaccess
&& !func_uaccess_safe(func
)) {
3451 WARN_INSN(insn
, "return with UACCESS enabled");
3455 if (!state
->uaccess
&& func_uaccess_safe(func
)) {
3456 WARN_INSN(insn
, "return with UACCESS disabled from a UACCESS-safe function");
3461 WARN_INSN(insn
, "return with DF set");
3465 if (func
&& has_modified_stack_frame(insn
, state
)) {
3466 WARN_INSN(insn
, "return with modified stack frame");
3470 if (state
->cfi
.bp_scratch
) {
3471 WARN_INSN(insn
, "BP used as a scratch register");
3478 static struct instruction
*next_insn_to_validate(struct objtool_file
*file
,
3479 struct instruction
*insn
)
3481 struct alt_group
*alt_group
= insn
->alt_group
;
3484 * Simulate the fact that alternatives are patched in-place. When the
3485 * end of a replacement alt_group is reached, redirect objtool flow to
3486 * the end of the original alt_group.
3488 * insn->alts->insn -> alt_group->first_insn
3490 * alt_group->last_insn
3491 * [alt_group->nop] -> next(orig_group->last_insn)
3494 if (alt_group
->nop
) {
3495 /* ->nop implies ->orig_group */
3496 if (insn
== alt_group
->last_insn
)
3497 return alt_group
->nop
;
3498 if (insn
== alt_group
->nop
)
3501 if (insn
== alt_group
->last_insn
&& alt_group
->orig_group
)
3505 return next_insn_same_sec(file
, insn
);
3508 return next_insn_same_sec(file
, alt_group
->orig_group
->last_insn
);
3511 static bool skip_alt_group(struct instruction
*insn
)
3513 struct instruction
*alt_insn
= insn
->alts
? insn
->alts
->insn
: NULL
;
3515 /* ANNOTATE_IGNORE_ALTERNATIVE */
3516 if (insn
->alt_group
&& insn
->alt_group
->ignore
)
3520 * For NOP patched with CLAC/STAC, only follow the latter to avoid
3521 * impossible code paths combining patched CLAC with unpatched STAC
3524 * ANNOTATE_IGNORE_ALTERNATIVE could have been used here, but Linus
3525 * requested not to do that to avoid hurting .s file readability
3526 * around CLAC/STAC alternative sites.
3532 /* Don't override ASM_{CLAC,STAC}_UNSAFE */
3533 if (alt_insn
->alt_group
&& alt_insn
->alt_group
->ignore
)
3536 return alt_insn
->type
== INSN_CLAC
|| alt_insn
->type
== INSN_STAC
;
3540 * Follow the branch starting at the given instruction, and recursively follow
3541 * any other branches (jumps). Meanwhile, track the frame pointer state at
3542 * each instruction and validate all the rules described in
3543 * tools/objtool/Documentation/objtool.txt.
3545 static int validate_branch(struct objtool_file
*file
, struct symbol
*func
,
3546 struct instruction
*insn
, struct insn_state state
)
3548 struct alternative
*alt
;
3549 struct instruction
*next_insn
, *prev_insn
= NULL
;
3550 struct section
*sec
;
3554 if (func
&& func
->ignore
)
3560 next_insn
= next_insn_to_validate(file
, insn
);
3562 if (func
&& insn_func(insn
) && func
!= insn_func(insn
)->pfunc
) {
3563 /* Ignore KCFI type preambles, which always fall through */
3564 if (!strncmp(func
->name
, "__cfi_", 6) ||
3565 !strncmp(func
->name
, "__pfx_", 6))
3568 if (file
->ignore_unreachables
)
3571 WARN("%s() falls through to next function %s()",
3572 func
->name
, insn_func(insn
)->name
);
3578 visited
= VISITED_BRANCH
<< state
.uaccess
;
3579 if (insn
->visited
& VISITED_BRANCH_MASK
) {
3580 if (!insn
->hint
&& !insn_cfi_match(insn
, &state
.cfi
))
3583 if (insn
->visited
& visited
)
3590 state
.instr
+= insn
->instr
;
3593 if (insn
->restore
) {
3594 struct instruction
*save_insn
, *i
;
3599 sym_for_each_insn_continue_reverse(file
, func
, i
) {
3607 WARN_INSN(insn
, "no corresponding CFI save for CFI restore");
3611 if (!save_insn
->visited
) {
3613 * If the restore hint insn is at the
3614 * beginning of a basic block and was
3615 * branched to from elsewhere, and the
3616 * save insn hasn't been visited yet,
3617 * defer following this branch for now.
3618 * It will be seen later via the
3619 * straight-line path.
3624 WARN_INSN(insn
, "objtool isn't smart enough to handle this CFI save/restore combo");
3628 insn
->cfi
= save_insn
->cfi
;
3632 state
.cfi
= *insn
->cfi
;
3634 /* XXX track if we actually changed state.cfi */
3636 if (prev_insn
&& !cficmp(prev_insn
->cfi
, &state
.cfi
)) {
3637 insn
->cfi
= prev_insn
->cfi
;
3640 insn
->cfi
= cfi_hash_find_or_add(&state
.cfi
);
3644 insn
->visited
|= visited
;
3646 if (propagate_alt_cfi(file
, insn
))
3650 for (alt
= insn
->alts
; alt
; alt
= alt
->next
) {
3651 ret
= validate_branch(file
, func
, alt
->insn
, state
);
3653 BT_INSN(insn
, "(alt)");
3659 if (skip_alt_group(insn
))
3662 if (handle_insn_ops(insn
, next_insn
, &state
))
3665 switch (insn
->type
) {
3668 return validate_return(func
, insn
, &state
);
3671 case INSN_CALL_DYNAMIC
:
3672 ret
= validate_call(file
, insn
, &state
);
3676 if (opts
.stackval
&& func
&& !is_special_call(insn
) &&
3677 !has_valid_stack_frame(&state
)) {
3678 WARN_INSN(insn
, "call without frame pointer save/setup");
3684 case INSN_JUMP_CONDITIONAL
:
3685 case INSN_JUMP_UNCONDITIONAL
:
3686 if (is_sibling_call(insn
)) {
3687 ret
= validate_sibling_call(file
, insn
, &state
);
3691 } else if (insn
->jump_dest
) {
3692 ret
= validate_branch(file
, func
,
3693 insn
->jump_dest
, state
);
3695 BT_INSN(insn
, "(branch)");
3700 if (insn
->type
== INSN_JUMP_UNCONDITIONAL
)
3705 case INSN_JUMP_DYNAMIC
:
3706 case INSN_JUMP_DYNAMIC_CONDITIONAL
:
3707 if (is_sibling_call(insn
)) {
3708 ret
= validate_sibling_call(file
, insn
, &state
);
3713 if (insn
->type
== INSN_JUMP_DYNAMIC
)
3719 if (func
&& (!next_insn
|| !next_insn
->hint
)) {
3720 WARN_INSN(insn
, "unsupported instruction in callable function");
3727 if (func
&& (!next_insn
|| !next_insn
->hint
)) {
3728 WARN_INSN(insn
, "unsupported instruction in callable function");
3738 if (state
.uaccess
) {
3739 WARN_INSN(insn
, "recursive UACCESS enable");
3743 state
.uaccess
= true;
3750 if (!state
.uaccess
&& func
) {
3751 WARN_INSN(insn
, "redundant UACCESS disable");
3755 if (func_uaccess_safe(func
) && !state
.uaccess_stack
) {
3756 WARN_INSN(insn
, "UACCESS-safe disables UACCESS");
3760 state
.uaccess
= false;
3765 WARN_INSN(insn
, "recursive STD");
3773 if (!state
.df
&& func
) {
3774 WARN_INSN(insn
, "redundant CLD");
3789 if (state
.cfi
.cfa
.base
== CFI_UNDEFINED
)
3791 if (file
->ignore_unreachables
)
3794 WARN("%s%sunexpected end of section %s",
3795 func
? func
->name
: "", func
? "(): " : "",
3807 static int validate_unwind_hint(struct objtool_file
*file
,
3808 struct instruction
*insn
,
3809 struct insn_state
*state
)
3811 if (insn
->hint
&& !insn
->visited
) {
3812 int ret
= validate_branch(file
, insn_func(insn
), insn
, *state
);
3814 BT_INSN(insn
, "<=== (hint)");
3821 static int validate_unwind_hints(struct objtool_file
*file
, struct section
*sec
)
3823 struct instruction
*insn
;
3824 struct insn_state state
;
3830 init_insn_state(file
, &state
, sec
);
3833 sec_for_each_insn(file
, sec
, insn
)
3834 warnings
+= validate_unwind_hint(file
, insn
, &state
);
3836 for_each_insn(file
, insn
)
3837 warnings
+= validate_unwind_hint(file
, insn
, &state
);
3844 * Validate rethunk entry constraint: must untrain RET before the first RET.
3846 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3847 * before an actual RET instruction.
3849 static int validate_unret(struct objtool_file
*file
, struct instruction
*insn
)
3851 struct instruction
*next
, *dest
;
3855 next
= next_insn_to_validate(file
, insn
);
3857 if (insn
->visited
& VISITED_UNRET
)
3860 insn
->visited
|= VISITED_UNRET
;
3863 struct alternative
*alt
;
3864 for (alt
= insn
->alts
; alt
; alt
= alt
->next
) {
3865 ret
= validate_unret(file
, alt
->insn
);
3867 BT_INSN(insn
, "(alt)");
3873 switch (insn
->type
) {
3875 case INSN_CALL_DYNAMIC
:
3876 case INSN_JUMP_DYNAMIC
:
3877 case INSN_JUMP_DYNAMIC_CONDITIONAL
:
3878 WARN_INSN(insn
, "early indirect call");
3881 case INSN_JUMP_UNCONDITIONAL
:
3882 case INSN_JUMP_CONDITIONAL
:
3883 if (!is_sibling_call(insn
)) {
3884 if (!insn
->jump_dest
) {
3885 WARN_INSN(insn
, "unresolved jump target after linking?!?");
3888 ret
= validate_unret(file
, insn
->jump_dest
);
3890 BT_INSN(insn
, "(branch%s)",
3891 insn
->type
== INSN_JUMP_CONDITIONAL
? "-cond" : "");
3895 if (insn
->type
== INSN_JUMP_UNCONDITIONAL
)
3903 dest
= find_insn(file
, insn_call_dest(insn
)->sec
,
3904 insn_call_dest(insn
)->offset
);
3906 WARN("Unresolved function after linking!?: %s",
3907 insn_call_dest(insn
)->name
);
3911 ret
= validate_unret(file
, dest
);
3913 BT_INSN(insn
, "(call)");
3917 * If a call returns without error, it must have seen UNTRAIN_RET.
3918 * Therefore any non-error return is a success.
3923 WARN_INSN(insn
, "RET before UNTRAIN");
3933 if (insn
->retpoline_safe
)
3945 WARN_INSN(insn
, "teh end!");
3955 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3956 * VALIDATE_UNRET_END before RET.
3958 static int validate_unrets(struct objtool_file
*file
)
3960 struct instruction
*insn
;
3963 for_each_insn(file
, insn
) {
3967 warnings
+= validate_unret(file
, insn
);
3973 static int validate_retpoline(struct objtool_file
*file
)
3975 struct instruction
*insn
;
3978 for_each_insn(file
, insn
) {
3979 if (insn
->type
!= INSN_JUMP_DYNAMIC
&&
3980 insn
->type
!= INSN_CALL_DYNAMIC
&&
3981 insn
->type
!= INSN_RETURN
)
3984 if (insn
->retpoline_safe
)
3987 if (insn
->sec
->init
)
3990 if (insn
->type
== INSN_RETURN
) {
3992 WARN_INSN(insn
, "'naked' return found in MITIGATION_RETHUNK build");
3998 WARN_INSN(insn
, "indirect %s found in MITIGATION_RETPOLINE build",
3999 insn
->type
== INSN_JUMP_DYNAMIC
? "jump" : "call");
4006 static bool is_kasan_insn(struct instruction
*insn
)
4008 return (insn
->type
== INSN_CALL
&&
4009 !strcmp(insn_call_dest(insn
)->name
, "__asan_handle_no_return"));
4012 static bool is_ubsan_insn(struct instruction
*insn
)
4014 return (insn
->type
== INSN_CALL
&&
4015 !strcmp(insn_call_dest(insn
)->name
,
4016 "__ubsan_handle_builtin_unreachable"));
4019 static bool ignore_unreachable_insn(struct objtool_file
*file
, struct instruction
*insn
)
4021 struct symbol
*func
= insn_func(insn
);
4022 struct instruction
*prev_insn
;
4025 if (insn
->type
== INSN_NOP
|| insn
->type
== INSN_TRAP
|| (func
&& func
->ignore
))
4029 * Ignore alternative replacement instructions. This can happen
4030 * when a whitelisted function uses one of the ALTERNATIVE macros.
4032 if (!strcmp(insn
->sec
->name
, ".altinstr_replacement") ||
4033 !strcmp(insn
->sec
->name
, ".altinstr_aux"))
4037 * Whole archive runs might encounter dead code from weak symbols.
4038 * This is where the linker will have dropped the weak symbol in
4039 * favour of a regular symbol, but leaves the code in place.
4041 * In this case we'll find a piece of code (whole function) that is not
4042 * covered by a !section symbol. Ignore them.
4044 if (opts
.link
&& !func
) {
4045 int size
= find_symbol_hole_containing(insn
->sec
, insn
->offset
);
4046 unsigned long end
= insn
->offset
+ size
;
4048 if (!size
) /* not a hole */
4051 if (size
< 0) /* hole until the end */
4054 sec_for_each_insn_continue(file
, insn
) {
4056 * If we reach a visited instruction at or before the
4057 * end of the hole, ignore the unreachable.
4062 if (insn
->offset
>= end
)
4066 * If this hole jumps to a .cold function, mark it ignore too.
4068 if (insn
->jump_dest
&& insn_func(insn
->jump_dest
) &&
4069 strstr(insn_func(insn
->jump_dest
)->name
, ".cold")) {
4070 insn_func(insn
->jump_dest
)->ignore
= true;
4080 if (func
->static_call_tramp
)
4084 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4085 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4086 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4087 * (or occasionally a JMP to UD2).
4089 * It may also insert a UD2 after calling a __noreturn function.
4091 prev_insn
= prev_insn_same_sec(file
, insn
);
4092 if (prev_insn
&& prev_insn
->dead_end
&&
4093 (insn
->type
== INSN_BUG
||
4094 (insn
->type
== INSN_JUMP_UNCONDITIONAL
&&
4095 insn
->jump_dest
&& insn
->jump_dest
->type
== INSN_BUG
)))
4099 * Check if this (or a subsequent) instruction is related to
4100 * CONFIG_UBSAN or CONFIG_KASAN.
4102 * End the search at 5 instructions to avoid going into the weeds.
4104 for (i
= 0; i
< 5; i
++) {
4106 if (is_kasan_insn(insn
) || is_ubsan_insn(insn
))
4109 if (insn
->type
== INSN_JUMP_UNCONDITIONAL
) {
4110 if (insn
->jump_dest
&&
4111 insn_func(insn
->jump_dest
) == func
) {
4112 insn
= insn
->jump_dest
;
4119 if (insn
->offset
+ insn
->len
>= func
->offset
+ func
->len
)
4122 insn
= next_insn_same_sec(file
, insn
);
4128 static int add_prefix_symbol(struct objtool_file
*file
, struct symbol
*func
)
4130 struct instruction
*insn
, *prev
;
4131 struct cfi_state
*cfi
;
4133 insn
= find_insn(file
, func
->sec
, func
->offset
);
4137 for (prev
= prev_insn_same_sec(file
, insn
);
4139 prev
= prev_insn_same_sec(file
, prev
)) {
4142 if (prev
->type
!= INSN_NOP
)
4145 offset
= func
->offset
- prev
->offset
;
4147 if (offset
> opts
.prefix
)
4150 if (offset
< opts
.prefix
)
4153 elf_create_prefix_symbol(file
->elf
, func
, opts
.prefix
);
4162 * This can happen if stack validation isn't enabled or the
4163 * function is annotated with STACK_FRAME_NON_STANDARD.
4168 /* Propagate insn->cfi to the prefix code */
4169 cfi
= cfi_hash_find_or_add(insn
->cfi
);
4170 for (; prev
!= insn
; prev
= next_insn_same_sec(file
, prev
))
4176 static int add_prefix_symbols(struct objtool_file
*file
)
4178 struct section
*sec
;
4179 struct symbol
*func
;
4181 for_each_sec(file
, sec
) {
4182 if (!(sec
->sh
.sh_flags
& SHF_EXECINSTR
))
4185 sec_for_each_sym(sec
, func
) {
4186 if (func
->type
!= STT_FUNC
)
4189 add_prefix_symbol(file
, func
);
4196 static int validate_symbol(struct objtool_file
*file
, struct section
*sec
,
4197 struct symbol
*sym
, struct insn_state
*state
)
4199 struct instruction
*insn
;
4203 WARN("%s() is missing an ELF size annotation", sym
->name
);
4207 if (sym
->pfunc
!= sym
|| sym
->alias
!= sym
)
4210 insn
= find_insn(file
, sec
, sym
->offset
);
4211 if (!insn
|| insn
->visited
)
4215 state
->uaccess
= sym
->uaccess_safe
;
4217 ret
= validate_branch(file
, insn_func(insn
), insn
, *state
);
4219 BT_INSN(insn
, "<=== (sym)");
4223 static int validate_section(struct objtool_file
*file
, struct section
*sec
)
4225 struct insn_state state
;
4226 struct symbol
*func
;
4229 sec_for_each_sym(sec
, func
) {
4230 if (func
->type
!= STT_FUNC
)
4233 init_insn_state(file
, &state
, sec
);
4234 set_func_state(&state
.cfi
);
4236 warnings
+= validate_symbol(file
, sec
, func
, &state
);
4242 static int validate_noinstr_sections(struct objtool_file
*file
)
4244 struct section
*sec
;
4247 sec
= find_section_by_name(file
->elf
, ".noinstr.text");
4249 warnings
+= validate_section(file
, sec
);
4250 warnings
+= validate_unwind_hints(file
, sec
);
4253 sec
= find_section_by_name(file
->elf
, ".entry.text");
4255 warnings
+= validate_section(file
, sec
);
4256 warnings
+= validate_unwind_hints(file
, sec
);
4259 sec
= find_section_by_name(file
->elf
, ".cpuidle.text");
4261 warnings
+= validate_section(file
, sec
);
4262 warnings
+= validate_unwind_hints(file
, sec
);
4268 static int validate_functions(struct objtool_file
*file
)
4270 struct section
*sec
;
4273 for_each_sec(file
, sec
) {
4274 if (!(sec
->sh
.sh_flags
& SHF_EXECINSTR
))
4277 warnings
+= validate_section(file
, sec
);
4283 static void mark_endbr_used(struct instruction
*insn
)
4285 if (!list_empty(&insn
->call_node
))
4286 list_del_init(&insn
->call_node
);
4289 static bool noendbr_range(struct objtool_file
*file
, struct instruction
*insn
)
4291 struct symbol
*sym
= find_symbol_containing(insn
->sec
, insn
->offset
-1);
4292 struct instruction
*first
;
4297 first
= find_insn(file
, sym
->sec
, sym
->offset
);
4301 if (first
->type
!= INSN_ENDBR
&& !first
->noendbr
)
4304 return insn
->offset
== sym
->offset
+ sym
->len
;
4307 static int __validate_ibt_insn(struct objtool_file
*file
, struct instruction
*insn
,
4308 struct instruction
*dest
)
4310 if (dest
->type
== INSN_ENDBR
) {
4311 mark_endbr_used(dest
);
4315 if (insn_func(dest
) && insn_func(insn
) &&
4316 insn_func(dest
)->pfunc
== insn_func(insn
)->pfunc
) {
4318 * Anything from->to self is either _THIS_IP_ or
4321 * There is no sane way to annotate _THIS_IP_ since the
4322 * compiler treats the relocation as a constant and is
4323 * happy to fold in offsets, skewing any annotation we
4324 * do, leading to vast amounts of false-positives.
4326 * There's also compiler generated _THIS_IP_ through
4327 * KCOV and such which we have no hope of annotating.
4329 * As such, blanket accept self-references without
4336 * Accept anything ANNOTATE_NOENDBR.
4342 * Accept if this is the instruction after a symbol
4343 * that is (no)endbr -- typical code-range usage.
4345 if (noendbr_range(file
, dest
))
4348 WARN_INSN(insn
, "relocation to !ENDBR: %s", offstr(dest
->sec
, dest
->offset
));
4352 static int validate_ibt_insn(struct objtool_file
*file
, struct instruction
*insn
)
4354 struct instruction
*dest
;
4355 struct reloc
*reloc
;
4360 * Looking for function pointer load relocations. Ignore
4361 * direct/indirect branches:
4363 switch (insn
->type
) {
4366 case INSN_CALL_DYNAMIC
:
4367 case INSN_JUMP_CONDITIONAL
:
4368 case INSN_JUMP_UNCONDITIONAL
:
4369 case INSN_JUMP_DYNAMIC
:
4370 case INSN_JUMP_DYNAMIC_CONDITIONAL
:
4376 if (!insn_reloc(file
, insn
)) {
4377 /* local function pointer reference without reloc */
4379 off
= arch_jump_destination(insn
);
4381 dest
= find_insn(file
, insn
->sec
, off
);
4383 WARN_INSN(insn
, "corrupt function pointer reference");
4387 return __validate_ibt_insn(file
, insn
, dest
);
4395 for (reloc
= insn_reloc(file
, insn
);
4397 reloc
= find_reloc_by_dest_range(file
->elf
, insn
->sec
,
4398 reloc_offset(reloc
) + 1,
4399 (insn
->offset
+ insn
->len
) - (reloc_offset(reloc
) + 1))) {
4401 off
= reloc
->sym
->offset
;
4402 if (reloc_type(reloc
) == R_X86_64_PC32
||
4403 reloc_type(reloc
) == R_X86_64_PLT32
)
4404 off
+= arch_dest_reloc_offset(reloc_addend(reloc
));
4406 off
+= reloc_addend(reloc
);
4408 dest
= find_insn(file
, reloc
->sym
->sec
, off
);
4412 warnings
+= __validate_ibt_insn(file
, insn
, dest
);
4418 static int validate_ibt_data_reloc(struct objtool_file
*file
,
4419 struct reloc
*reloc
)
4421 struct instruction
*dest
;
4423 dest
= find_insn(file
, reloc
->sym
->sec
,
4424 reloc
->sym
->offset
+ reloc_addend(reloc
));
4428 if (dest
->type
== INSN_ENDBR
) {
4429 mark_endbr_used(dest
);
4436 WARN_FUNC(reloc
->sec
->base
, reloc_offset(reloc
),
4437 "data relocation to !ENDBR: %s", offstr(dest
->sec
, dest
->offset
));
4443 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4444 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4445 * NOPs) later, in create_ibt_endbr_seal_sections().
4447 static int validate_ibt(struct objtool_file
*file
)
4449 struct section
*sec
;
4450 struct reloc
*reloc
;
4451 struct instruction
*insn
;
4454 for_each_insn(file
, insn
)
4455 warnings
+= validate_ibt_insn(file
, insn
);
4457 for_each_sec(file
, sec
) {
4459 /* Already done by validate_ibt_insn() */
4460 if (sec
->sh
.sh_flags
& SHF_EXECINSTR
)
4467 * These sections can reference text addresses, but not with
4468 * the intent to indirect branch to them.
4470 if ((!strncmp(sec
->name
, ".discard", 8) &&
4471 strcmp(sec
->name
, ".discard.ibt_endbr_noseal")) ||
4472 !strncmp(sec
->name
, ".debug", 6) ||
4473 !strcmp(sec
->name
, ".altinstructions") ||
4474 !strcmp(sec
->name
, ".ibt_endbr_seal") ||
4475 !strcmp(sec
->name
, ".orc_unwind_ip") ||
4476 !strcmp(sec
->name
, ".parainstructions") ||
4477 !strcmp(sec
->name
, ".retpoline_sites") ||
4478 !strcmp(sec
->name
, ".smp_locks") ||
4479 !strcmp(sec
->name
, ".static_call_sites") ||
4480 !strcmp(sec
->name
, "_error_injection_whitelist") ||
4481 !strcmp(sec
->name
, "_kprobe_blacklist") ||
4482 !strcmp(sec
->name
, "__bug_table") ||
4483 !strcmp(sec
->name
, "__ex_table") ||
4484 !strcmp(sec
->name
, "__jump_table") ||
4485 !strcmp(sec
->name
, "__mcount_loc") ||
4486 !strcmp(sec
->name
, ".kcfi_traps") ||
4487 !strcmp(sec
->name
, ".llvm.call-graph-profile") ||
4488 !strcmp(sec
->name
, ".llvm_bb_addr_map") ||
4489 !strcmp(sec
->name
, "__tracepoints") ||
4490 strstr(sec
->name
, "__patchable_function_entries"))
4493 for_each_reloc(sec
->rsec
, reloc
)
4494 warnings
+= validate_ibt_data_reloc(file
, reloc
);
4500 static int validate_sls(struct objtool_file
*file
)
4502 struct instruction
*insn
, *next_insn
;
4505 for_each_insn(file
, insn
) {
4506 next_insn
= next_insn_same_sec(file
, insn
);
4508 if (insn
->retpoline_safe
)
4511 switch (insn
->type
) {
4513 if (!next_insn
|| next_insn
->type
!= INSN_TRAP
) {
4514 WARN_INSN(insn
, "missing int3 after ret");
4519 case INSN_JUMP_DYNAMIC
:
4520 if (!next_insn
|| next_insn
->type
!= INSN_TRAP
) {
4521 WARN_INSN(insn
, "missing int3 after indirect jump");
4533 static int validate_reachable_instructions(struct objtool_file
*file
)
4535 struct instruction
*insn
, *prev_insn
;
4536 struct symbol
*call_dest
;
4539 if (file
->ignore_unreachables
)
4542 for_each_insn(file
, insn
) {
4543 if (insn
->visited
|| ignore_unreachable_insn(file
, insn
))
4546 prev_insn
= prev_insn_same_sec(file
, insn
);
4547 if (prev_insn
&& prev_insn
->dead_end
) {
4548 call_dest
= insn_call_dest(prev_insn
);
4550 WARN_INSN(insn
, "%s() missing __noreturn in .c/.h or NORETURN() in noreturns.h",
4557 WARN_INSN(insn
, "unreachable instruction");
4564 /* 'funcs' is a space-separated list of function names */
4565 static void disas_funcs(const char *funcs
)
4567 const char *objdump_str
, *cross_compile
;
4571 cross_compile
= getenv("CROSS_COMPILE");
4575 objdump_str
= "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
4576 "BEGIN { split(_funcs, funcs); }"
4577 "/^$/ { func_match = 0; }"
4579 "f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
4580 "for (i in funcs) {"
4581 "if (funcs[i] == f) {"
4583 "base = strtonum(\"0x\" $1);"
4590 "addr = strtonum(\"0x\" $1);"
4591 "printf(\"%%04x \", addr - base);"
4596 /* fake snprintf() to calculate the size */
4597 size
= snprintf(NULL
, 0, objdump_str
, cross_compile
, objname
, funcs
) + 1;
4599 WARN("objdump string size calculation failed");
4605 /* real snprintf() */
4606 snprintf(cmd
, size
, objdump_str
, cross_compile
, objname
, funcs
);
4609 WARN("disassembly failed: %d", ret
);
4614 static void disas_warned_funcs(struct objtool_file
*file
)
4617 char *funcs
= NULL
, *tmp
;
4619 for_each_sym(file
, sym
) {
4622 funcs
= malloc(strlen(sym
->name
) + 1);
4624 ERROR_GLIBC("malloc");
4627 strcpy(funcs
, sym
->name
);
4629 tmp
= malloc(strlen(funcs
) + strlen(sym
->name
) + 2);
4631 ERROR_GLIBC("malloc");
4634 sprintf(tmp
, "%s %s", funcs
, sym
->name
);
4647 struct insn_chunk
*next
;
4651 * Reduce peak RSS usage by freeing insns memory before writing the ELF file,
4652 * which can trigger more allocations for .debug_* sections whose data hasn't
4655 static void free_insns(struct objtool_file
*file
)
4657 struct instruction
*insn
;
4658 struct insn_chunk
*chunks
= NULL
, *chunk
;
4660 for_each_insn(file
, insn
) {
4662 chunk
= malloc(sizeof(*chunk
));
4664 chunk
->next
= chunks
;
4669 for (chunk
= chunks
; chunk
; chunk
= chunk
->next
)
4673 int check(struct objtool_file
*file
)
4675 int ret
= 0, warnings
= 0;
4677 arch_initial_func_cfi_state(&initial_func_cfi
);
4678 init_cfi_state(&init_cfi
);
4679 init_cfi_state(&func_cfi
);
4680 set_func_state(&func_cfi
);
4681 init_cfi_state(&force_undefined_cfi
);
4682 force_undefined_cfi
.force_undefined
= true;
4684 if (!cfi_hash_alloc(1UL << (file
->elf
->symbol_bits
- 3))) {
4689 cfi_hash_add(&init_cfi
);
4690 cfi_hash_add(&func_cfi
);
4692 ret
= decode_sections(file
);
4700 warnings
+= validate_retpoline(file
);
4702 if (opts
.stackval
|| opts
.orc
|| opts
.uaccess
) {
4705 w
+= validate_functions(file
);
4706 w
+= validate_unwind_hints(file
, NULL
);
4708 w
+= validate_reachable_instructions(file
);
4712 } else if (opts
.noinstr
) {
4713 warnings
+= validate_noinstr_sections(file
);
4718 * Must be after validate_branch() and friends, it plays
4719 * further games with insn->visited.
4721 warnings
+= validate_unrets(file
);
4725 warnings
+= validate_ibt(file
);
4728 warnings
+= validate_sls(file
);
4730 if (opts
.static_call
) {
4731 ret
= create_static_call_sections(file
);
4736 if (opts
.retpoline
) {
4737 ret
= create_retpoline_sites_sections(file
);
4743 ret
= create_cfi_sections(file
);
4749 ret
= create_return_sites_sections(file
);
4753 if (opts
.hack_skylake
) {
4754 ret
= create_direct_call_sections(file
);
4761 ret
= create_mcount_loc_sections(file
);
4767 ret
= add_prefix_symbols(file
);
4773 ret
= create_ibt_endbr_seal_sections(file
);
4778 if (opts
.orc
&& nr_insns
) {
4779 ret
= orc_create(file
);
4787 printf("nr_insns_visited: %ld\n", nr_insns_visited
);
4788 printf("nr_cfi: %ld\n", nr_cfi
);
4789 printf("nr_cfi_reused: %ld\n", nr_cfi_reused
);
4790 printf("nr_cfi_cache: %ld\n", nr_cfi_cache
);
4794 if (!ret
&& !warnings
)
4797 if (opts
.werror
&& warnings
)
4801 if (opts
.werror
&& warnings
)
4802 WARN("%d warning(s) upgraded to errors", warnings
);
4804 disas_warned_funcs(file
);