]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/objtool/check.c
objtool: Get rid of reloc->offset
[thirdparty/linux.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <objtool/builtin.h>
12 #include <objtool/cfi.h>
13 #include <objtool/arch.h>
14 #include <objtool/check.h>
15 #include <objtool/special.h>
16 #include <objtool/warn.h>
17 #include <objtool/endianness.h>
18
19 #include <linux/objtool_types.h>
20 #include <linux/hashtable.h>
21 #include <linux/kernel.h>
22 #include <linux/static_call_types.h>
23
24 struct alternative {
25 struct alternative *next;
26 struct instruction *insn;
27 bool skip_orig;
28 };
29
30 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
31
32 static struct cfi_init_state initial_func_cfi;
33 static struct cfi_state init_cfi;
34 static struct cfi_state func_cfi;
35 static struct cfi_state force_undefined_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
50 struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 if (insn->idx == INSN_CHUNK_MAX)
54 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56 insn++;
57 if (!insn->len)
58 return NULL;
59
60 return insn;
61 }
62
63 static struct instruction *next_insn_same_func(struct objtool_file *file,
64 struct instruction *insn)
65 {
66 struct instruction *next = next_insn_same_sec(file, insn);
67 struct symbol *func = insn_func(insn);
68
69 if (!func)
70 return NULL;
71
72 if (next && insn_func(next) == func)
73 return next;
74
75 /* Check if we're already in the subfunction: */
76 if (func == func->cfunc)
77 return NULL;
78
79 /* Move to the subfunction: */
80 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 }
82
83 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84 struct instruction *insn)
85 {
86 if (insn->idx == 0) {
87 if (insn->prev_len)
88 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89 return NULL;
90 }
91
92 return insn - 1;
93 }
94
95 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96 struct instruction *insn)
97 {
98 struct instruction *prev = prev_insn_same_sec(file, insn);
99
100 if (prev && insn_func(prev) == insn_func(insn))
101 return prev;
102
103 return NULL;
104 }
105
106 #define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
111
112 #define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
114 insn; \
115 insn = next_insn_same_func(file, insn))
116
117 #define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
121
122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
126
127 #define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
129
130 #define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
133
134 static inline struct symbol *insn_call_dest(struct instruction *insn)
135 {
136 if (insn->type == INSN_JUMP_DYNAMIC ||
137 insn->type == INSN_CALL_DYNAMIC)
138 return NULL;
139
140 return insn->_call_dest;
141 }
142
143 static inline struct reloc *insn_jump_table(struct instruction *insn)
144 {
145 if (insn->type == INSN_JUMP_DYNAMIC ||
146 insn->type == INSN_CALL_DYNAMIC)
147 return insn->_jump_table;
148
149 return NULL;
150 }
151
152 static bool is_jump_table_jump(struct instruction *insn)
153 {
154 struct alt_group *alt_group = insn->alt_group;
155
156 if (insn_jump_table(insn))
157 return true;
158
159 /* Retpoline alternative for a jump table? */
160 return alt_group && alt_group->orig_group &&
161 insn_jump_table(alt_group->orig_group->first_insn);
162 }
163
164 static bool is_sibling_call(struct instruction *insn)
165 {
166 /*
167 * Assume only STT_FUNC calls have jump-tables.
168 */
169 if (insn_func(insn)) {
170 /* An indirect jump is either a sibling call or a jump to a table. */
171 if (insn->type == INSN_JUMP_DYNAMIC)
172 return !is_jump_table_jump(insn);
173 }
174
175 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
176 return (is_static_jump(insn) && insn_call_dest(insn));
177 }
178
179 /*
180 * This checks to see if the given function is a "noreturn" function.
181 *
182 * For global functions which are outside the scope of this object file, we
183 * have to keep a manual list of them.
184 *
185 * For local functions, we have to detect them manually by simply looking for
186 * the lack of a return instruction.
187 */
188 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
189 int recursion)
190 {
191 int i;
192 struct instruction *insn;
193 bool empty = true;
194
195 #define NORETURN(func) __stringify(func),
196 static const char * const global_noreturns[] = {
197 #include "noreturns.h"
198 };
199 #undef NORETURN
200
201 if (!func)
202 return false;
203
204 if (func->bind == STB_GLOBAL || func->bind == STB_WEAK)
205 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
206 if (!strcmp(func->name, global_noreturns[i]))
207 return true;
208
209 if (func->bind == STB_WEAK)
210 return false;
211
212 if (!func->len)
213 return false;
214
215 insn = find_insn(file, func->sec, func->offset);
216 if (!insn || !insn_func(insn))
217 return false;
218
219 func_for_each_insn(file, func, insn) {
220 empty = false;
221
222 if (insn->type == INSN_RETURN)
223 return false;
224 }
225
226 if (empty)
227 return false;
228
229 /*
230 * A function can have a sibling call instead of a return. In that
231 * case, the function's dead-end status depends on whether the target
232 * of the sibling call returns.
233 */
234 func_for_each_insn(file, func, insn) {
235 if (is_sibling_call(insn)) {
236 struct instruction *dest = insn->jump_dest;
237
238 if (!dest)
239 /* sibling call to another file */
240 return false;
241
242 /* local sibling call */
243 if (recursion == 5) {
244 /*
245 * Infinite recursion: two functions have
246 * sibling calls to each other. This is a very
247 * rare case. It means they aren't dead ends.
248 */
249 return false;
250 }
251
252 return __dead_end_function(file, insn_func(dest), recursion+1);
253 }
254 }
255
256 return true;
257 }
258
259 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
260 {
261 return __dead_end_function(file, func, 0);
262 }
263
264 static void init_cfi_state(struct cfi_state *cfi)
265 {
266 int i;
267
268 for (i = 0; i < CFI_NUM_REGS; i++) {
269 cfi->regs[i].base = CFI_UNDEFINED;
270 cfi->vals[i].base = CFI_UNDEFINED;
271 }
272 cfi->cfa.base = CFI_UNDEFINED;
273 cfi->drap_reg = CFI_UNDEFINED;
274 cfi->drap_offset = -1;
275 }
276
277 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
278 struct section *sec)
279 {
280 memset(state, 0, sizeof(*state));
281 init_cfi_state(&state->cfi);
282
283 /*
284 * We need the full vmlinux for noinstr validation, otherwise we can
285 * not correctly determine insn_call_dest(insn)->sec (external symbols
286 * do not have a section).
287 */
288 if (opts.link && opts.noinstr && sec)
289 state->noinstr = sec->noinstr;
290 }
291
292 static struct cfi_state *cfi_alloc(void)
293 {
294 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
295 if (!cfi) {
296 WARN("calloc failed");
297 exit(1);
298 }
299 nr_cfi++;
300 return cfi;
301 }
302
303 static int cfi_bits;
304 static struct hlist_head *cfi_hash;
305
306 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
307 {
308 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
309 (void *)cfi2 + sizeof(cfi2->hash),
310 sizeof(struct cfi_state) - sizeof(struct hlist_node));
311 }
312
313 static inline u32 cfi_key(struct cfi_state *cfi)
314 {
315 return jhash((void *)cfi + sizeof(cfi->hash),
316 sizeof(*cfi) - sizeof(cfi->hash), 0);
317 }
318
319 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
320 {
321 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
322 struct cfi_state *obj;
323
324 hlist_for_each_entry(obj, head, hash) {
325 if (!cficmp(cfi, obj)) {
326 nr_cfi_cache++;
327 return obj;
328 }
329 }
330
331 obj = cfi_alloc();
332 *obj = *cfi;
333 hlist_add_head(&obj->hash, head);
334
335 return obj;
336 }
337
338 static void cfi_hash_add(struct cfi_state *cfi)
339 {
340 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
341
342 hlist_add_head(&cfi->hash, head);
343 }
344
345 static void *cfi_hash_alloc(unsigned long size)
346 {
347 cfi_bits = max(10, ilog2(size));
348 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
349 PROT_READ|PROT_WRITE,
350 MAP_PRIVATE|MAP_ANON, -1, 0);
351 if (cfi_hash == (void *)-1L) {
352 WARN("mmap fail cfi_hash");
353 cfi_hash = NULL;
354 } else if (opts.stats) {
355 printf("cfi_bits: %d\n", cfi_bits);
356 }
357
358 return cfi_hash;
359 }
360
361 static unsigned long nr_insns;
362 static unsigned long nr_insns_visited;
363
364 /*
365 * Call the arch-specific instruction decoder for all the instructions and add
366 * them to the global instruction list.
367 */
368 static int decode_instructions(struct objtool_file *file)
369 {
370 struct section *sec;
371 struct symbol *func;
372 unsigned long offset;
373 struct instruction *insn;
374 int ret;
375
376 for_each_sec(file, sec) {
377 struct instruction *insns = NULL;
378 u8 prev_len = 0;
379 u8 idx = 0;
380
381 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
382 continue;
383
384 if (strcmp(sec->name, ".altinstr_replacement") &&
385 strcmp(sec->name, ".altinstr_aux") &&
386 strncmp(sec->name, ".discard.", 9))
387 sec->text = true;
388
389 if (!strcmp(sec->name, ".noinstr.text") ||
390 !strcmp(sec->name, ".entry.text") ||
391 !strcmp(sec->name, ".cpuidle.text") ||
392 !strncmp(sec->name, ".text.__x86.", 12))
393 sec->noinstr = true;
394
395 /*
396 * .init.text code is ran before userspace and thus doesn't
397 * strictly need retpolines, except for modules which are
398 * loaded late, they very much do need retpoline in their
399 * .init.text
400 */
401 if (!strcmp(sec->name, ".init.text") && !opts.module)
402 sec->init = true;
403
404 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
405 if (!insns || idx == INSN_CHUNK_MAX) {
406 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
407 if (!insns) {
408 WARN("malloc failed");
409 return -1;
410 }
411 idx = 0;
412 } else {
413 idx++;
414 }
415 insn = &insns[idx];
416 insn->idx = idx;
417
418 INIT_LIST_HEAD(&insn->call_node);
419 insn->sec = sec;
420 insn->offset = offset;
421 insn->prev_len = prev_len;
422
423 ret = arch_decode_instruction(file, sec, offset,
424 sec->sh.sh_size - offset,
425 insn);
426 if (ret)
427 return ret;
428
429 prev_len = insn->len;
430
431 /*
432 * By default, "ud2" is a dead end unless otherwise
433 * annotated, because GCC 7 inserts it for certain
434 * divide-by-zero cases.
435 */
436 if (insn->type == INSN_BUG)
437 insn->dead_end = true;
438
439 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
440 nr_insns++;
441 }
442
443 // printf("%s: last chunk used: %d\n", sec->name, (int)idx);
444
445 sec_for_each_sym(sec, func) {
446 if (func->type != STT_NOTYPE && func->type != STT_FUNC)
447 continue;
448
449 if (func->offset == sec->sh.sh_size) {
450 /* Heuristic: likely an "end" symbol */
451 if (func->type == STT_NOTYPE)
452 continue;
453 WARN("%s(): STT_FUNC at end of section",
454 func->name);
455 return -1;
456 }
457
458 if (func->return_thunk || func->alias != func)
459 continue;
460
461 if (!find_insn(file, sec, func->offset)) {
462 WARN("%s(): can't find starting instruction",
463 func->name);
464 return -1;
465 }
466
467 sym_for_each_insn(file, func, insn) {
468 insn->sym = func;
469 if (func->type == STT_FUNC &&
470 insn->type == INSN_ENDBR &&
471 list_empty(&insn->call_node)) {
472 if (insn->offset == func->offset) {
473 list_add_tail(&insn->call_node, &file->endbr_list);
474 file->nr_endbr++;
475 } else {
476 file->nr_endbr_int++;
477 }
478 }
479 }
480 }
481 }
482
483 if (opts.stats)
484 printf("nr_insns: %lu\n", nr_insns);
485
486 return 0;
487 }
488
489 /*
490 * Read the pv_ops[] .data table to find the static initialized values.
491 */
492 static int add_pv_ops(struct objtool_file *file, const char *symname)
493 {
494 struct symbol *sym, *func;
495 unsigned long off, end;
496 struct reloc *reloc;
497 int idx;
498
499 sym = find_symbol_by_name(file->elf, symname);
500 if (!sym)
501 return 0;
502
503 off = sym->offset;
504 end = off + sym->len;
505 for (;;) {
506 reloc = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
507 if (!reloc)
508 break;
509
510 func = reloc->sym;
511 if (func->type == STT_SECTION)
512 func = find_symbol_by_offset(reloc->sym->sec, reloc->addend);
513
514 idx = (reloc_offset(reloc) - sym->offset) / sizeof(unsigned long);
515
516 objtool_pv_add(file, idx, func);
517
518 off = reloc_offset(reloc) + 1;
519 if (off > end)
520 break;
521 }
522
523 return 0;
524 }
525
526 /*
527 * Allocate and initialize file->pv_ops[].
528 */
529 static int init_pv_ops(struct objtool_file *file)
530 {
531 static const char *pv_ops_tables[] = {
532 "pv_ops",
533 "xen_cpu_ops",
534 "xen_irq_ops",
535 "xen_mmu_ops",
536 NULL,
537 };
538 const char *pv_ops;
539 struct symbol *sym;
540 int idx, nr;
541
542 if (!opts.noinstr)
543 return 0;
544
545 file->pv_ops = NULL;
546
547 sym = find_symbol_by_name(file->elf, "pv_ops");
548 if (!sym)
549 return 0;
550
551 nr = sym->len / sizeof(unsigned long);
552 file->pv_ops = calloc(sizeof(struct pv_state), nr);
553 if (!file->pv_ops)
554 return -1;
555
556 for (idx = 0; idx < nr; idx++)
557 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
558
559 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
560 add_pv_ops(file, pv_ops);
561
562 return 0;
563 }
564
565 static struct instruction *find_last_insn(struct objtool_file *file,
566 struct section *sec)
567 {
568 struct instruction *insn = NULL;
569 unsigned int offset;
570 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
571
572 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
573 insn = find_insn(file, sec, offset);
574
575 return insn;
576 }
577
578 /*
579 * Mark "ud2" instructions and manually annotated dead ends.
580 */
581 static int add_dead_ends(struct objtool_file *file)
582 {
583 struct section *rsec;
584 struct reloc *reloc;
585 struct instruction *insn;
586
587 /*
588 * Check for manually annotated dead ends.
589 */
590 rsec = find_section_by_name(file->elf, ".rela.discard.unreachable");
591 if (!rsec)
592 goto reachable;
593
594 for_each_reloc(rsec, reloc) {
595 if (reloc->sym->type != STT_SECTION) {
596 WARN("unexpected relocation symbol type in %s", rsec->name);
597 return -1;
598 }
599 insn = find_insn(file, reloc->sym->sec, reloc->addend);
600 if (insn)
601 insn = prev_insn_same_sec(file, insn);
602 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
603 insn = find_last_insn(file, reloc->sym->sec);
604 if (!insn) {
605 WARN("can't find unreachable insn at %s+0x%" PRIx64,
606 reloc->sym->sec->name, reloc->addend);
607 return -1;
608 }
609 } else {
610 WARN("can't find unreachable insn at %s+0x%" PRIx64,
611 reloc->sym->sec->name, reloc->addend);
612 return -1;
613 }
614
615 insn->dead_end = true;
616 }
617
618 reachable:
619 /*
620 * These manually annotated reachable checks are needed for GCC 4.4,
621 * where the Linux unreachable() macro isn't supported. In that case
622 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
623 * not a dead end.
624 */
625 rsec = find_section_by_name(file->elf, ".rela.discard.reachable");
626 if (!rsec)
627 return 0;
628
629 for_each_reloc(rsec, reloc) {
630 if (reloc->sym->type != STT_SECTION) {
631 WARN("unexpected relocation symbol type in %s", rsec->name);
632 return -1;
633 }
634 insn = find_insn(file, reloc->sym->sec, reloc->addend);
635 if (insn)
636 insn = prev_insn_same_sec(file, insn);
637 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
638 insn = find_last_insn(file, reloc->sym->sec);
639 if (!insn) {
640 WARN("can't find reachable insn at %s+0x%" PRIx64,
641 reloc->sym->sec->name, reloc->addend);
642 return -1;
643 }
644 } else {
645 WARN("can't find reachable insn at %s+0x%" PRIx64,
646 reloc->sym->sec->name, reloc->addend);
647 return -1;
648 }
649
650 insn->dead_end = false;
651 }
652
653 return 0;
654 }
655
656 static int create_static_call_sections(struct objtool_file *file)
657 {
658 struct static_call_site *site;
659 struct section *sec;
660 struct instruction *insn;
661 struct symbol *key_sym;
662 char *key_name, *tmp;
663 int idx;
664
665 sec = find_section_by_name(file->elf, ".static_call_sites");
666 if (sec) {
667 INIT_LIST_HEAD(&file->static_call_list);
668 WARN("file already has .static_call_sites section, skipping");
669 return 0;
670 }
671
672 if (list_empty(&file->static_call_list))
673 return 0;
674
675 idx = 0;
676 list_for_each_entry(insn, &file->static_call_list, call_node)
677 idx++;
678
679 sec = elf_create_section_pair(file->elf, ".static_call_sites",
680 sizeof(*site), idx, idx * 2);
681 if (!sec)
682 return -1;
683
684 /* Allow modules to modify the low bits of static_call_site::key */
685 sec->sh.sh_flags |= SHF_WRITE;
686
687 idx = 0;
688 list_for_each_entry(insn, &file->static_call_list, call_node) {
689
690 /* populate reloc for 'addr' */
691 if (!elf_init_reloc_text_sym(file->elf, sec,
692 idx * sizeof(*site), idx * 2,
693 insn->sec, insn->offset))
694 return -1;
695
696 /* find key symbol */
697 key_name = strdup(insn_call_dest(insn)->name);
698 if (!key_name) {
699 perror("strdup");
700 return -1;
701 }
702 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
703 STATIC_CALL_TRAMP_PREFIX_LEN)) {
704 WARN("static_call: trampoline name malformed: %s", key_name);
705 free(key_name);
706 return -1;
707 }
708 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
709 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
710
711 key_sym = find_symbol_by_name(file->elf, tmp);
712 if (!key_sym) {
713 if (!opts.module) {
714 WARN("static_call: can't find static_call_key symbol: %s", tmp);
715 free(key_name);
716 return -1;
717 }
718
719 /*
720 * For modules(), the key might not be exported, which
721 * means the module can make static calls but isn't
722 * allowed to change them.
723 *
724 * In that case we temporarily set the key to be the
725 * trampoline address. This is fixed up in
726 * static_call_add_module().
727 */
728 key_sym = insn_call_dest(insn);
729 }
730 free(key_name);
731
732 /* populate reloc for 'key' */
733 if (!elf_init_reloc_data_sym(file->elf, sec,
734 idx * sizeof(*site) + 4,
735 (idx * 2) + 1, key_sym,
736 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
737 return -1;
738
739 idx++;
740 }
741
742 return 0;
743 }
744
745 static int create_retpoline_sites_sections(struct objtool_file *file)
746 {
747 struct instruction *insn;
748 struct section *sec;
749 int idx;
750
751 sec = find_section_by_name(file->elf, ".retpoline_sites");
752 if (sec) {
753 WARN("file already has .retpoline_sites, skipping");
754 return 0;
755 }
756
757 idx = 0;
758 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
759 idx++;
760
761 if (!idx)
762 return 0;
763
764 sec = elf_create_section_pair(file->elf, ".retpoline_sites",
765 sizeof(int), idx, idx);
766 if (!sec)
767 return -1;
768
769 idx = 0;
770 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
771
772 if (!elf_init_reloc_text_sym(file->elf, sec,
773 idx * sizeof(int), idx,
774 insn->sec, insn->offset))
775 return -1;
776
777 idx++;
778 }
779
780 return 0;
781 }
782
783 static int create_return_sites_sections(struct objtool_file *file)
784 {
785 struct instruction *insn;
786 struct section *sec;
787 int idx;
788
789 sec = find_section_by_name(file->elf, ".return_sites");
790 if (sec) {
791 WARN("file already has .return_sites, skipping");
792 return 0;
793 }
794
795 idx = 0;
796 list_for_each_entry(insn, &file->return_thunk_list, call_node)
797 idx++;
798
799 if (!idx)
800 return 0;
801
802 sec = elf_create_section_pair(file->elf, ".return_sites",
803 sizeof(int), idx, idx);
804 if (!sec)
805 return -1;
806
807 idx = 0;
808 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
809
810 if (!elf_init_reloc_text_sym(file->elf, sec,
811 idx * sizeof(int), idx,
812 insn->sec, insn->offset))
813 return -1;
814
815 idx++;
816 }
817
818 return 0;
819 }
820
821 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
822 {
823 struct instruction *insn;
824 struct section *sec;
825 int idx;
826
827 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
828 if (sec) {
829 WARN("file already has .ibt_endbr_seal, skipping");
830 return 0;
831 }
832
833 idx = 0;
834 list_for_each_entry(insn, &file->endbr_list, call_node)
835 idx++;
836
837 if (opts.stats) {
838 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
839 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
840 printf("ibt: superfluous ENDBR: %d\n", idx);
841 }
842
843 if (!idx)
844 return 0;
845
846 sec = elf_create_section_pair(file->elf, ".ibt_endbr_seal",
847 sizeof(int), idx, idx);
848 if (!sec)
849 return -1;
850
851 idx = 0;
852 list_for_each_entry(insn, &file->endbr_list, call_node) {
853
854 int *site = (int *)sec->data->d_buf + idx;
855 struct symbol *sym = insn->sym;
856 *site = 0;
857
858 if (opts.module && sym && sym->type == STT_FUNC &&
859 insn->offset == sym->offset &&
860 (!strcmp(sym->name, "init_module") ||
861 !strcmp(sym->name, "cleanup_module")))
862 WARN("%s(): not an indirect call target", sym->name);
863
864 if (!elf_init_reloc_text_sym(file->elf, sec,
865 idx * sizeof(int), idx,
866 insn->sec, insn->offset))
867 return -1;
868
869 idx++;
870 }
871
872 return 0;
873 }
874
875 static int create_cfi_sections(struct objtool_file *file)
876 {
877 struct section *sec;
878 struct symbol *sym;
879 int idx;
880
881 sec = find_section_by_name(file->elf, ".cfi_sites");
882 if (sec) {
883 INIT_LIST_HEAD(&file->call_list);
884 WARN("file already has .cfi_sites section, skipping");
885 return 0;
886 }
887
888 idx = 0;
889 for_each_sym(file, sym) {
890 if (sym->type != STT_FUNC)
891 continue;
892
893 if (strncmp(sym->name, "__cfi_", 6))
894 continue;
895
896 idx++;
897 }
898
899 sec = elf_create_section_pair(file->elf, ".cfi_sites",
900 sizeof(unsigned int), idx, idx);
901 if (!sec)
902 return -1;
903
904 idx = 0;
905 for_each_sym(file, sym) {
906 if (sym->type != STT_FUNC)
907 continue;
908
909 if (strncmp(sym->name, "__cfi_", 6))
910 continue;
911
912 if (!elf_init_reloc_text_sym(file->elf, sec,
913 idx * sizeof(unsigned int), idx,
914 sym->sec, sym->offset))
915 return -1;
916
917 idx++;
918 }
919
920 return 0;
921 }
922
923 static int create_mcount_loc_sections(struct objtool_file *file)
924 {
925 size_t addr_size = elf_addr_size(file->elf);
926 struct instruction *insn;
927 struct section *sec;
928 int idx;
929
930 sec = find_section_by_name(file->elf, "__mcount_loc");
931 if (sec) {
932 INIT_LIST_HEAD(&file->mcount_loc_list);
933 WARN("file already has __mcount_loc section, skipping");
934 return 0;
935 }
936
937 if (list_empty(&file->mcount_loc_list))
938 return 0;
939
940 idx = 0;
941 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
942 idx++;
943
944 sec = elf_create_section_pair(file->elf, "__mcount_loc", addr_size,
945 idx, idx);
946 if (!sec)
947 return -1;
948
949 sec->sh.sh_addralign = addr_size;
950
951 idx = 0;
952 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
953
954 struct reloc *reloc;
955
956 reloc = elf_init_reloc_text_sym(file->elf, sec, idx * addr_size, idx,
957 insn->sec, insn->offset);
958 if (!reloc)
959 return -1;
960
961 reloc->type = addr_size == 8 ? R_ABS64 : R_ABS32;
962
963 idx++;
964 }
965
966 return 0;
967 }
968
969 static int create_direct_call_sections(struct objtool_file *file)
970 {
971 struct instruction *insn;
972 struct section *sec;
973 int idx;
974
975 sec = find_section_by_name(file->elf, ".call_sites");
976 if (sec) {
977 INIT_LIST_HEAD(&file->call_list);
978 WARN("file already has .call_sites section, skipping");
979 return 0;
980 }
981
982 if (list_empty(&file->call_list))
983 return 0;
984
985 idx = 0;
986 list_for_each_entry(insn, &file->call_list, call_node)
987 idx++;
988
989 sec = elf_create_section_pair(file->elf, ".call_sites",
990 sizeof(unsigned int), idx, idx);
991 if (!sec)
992 return -1;
993
994 idx = 0;
995 list_for_each_entry(insn, &file->call_list, call_node) {
996
997 if (!elf_init_reloc_text_sym(file->elf, sec,
998 idx * sizeof(unsigned int), idx,
999 insn->sec, insn->offset))
1000 return -1;
1001
1002 idx++;
1003 }
1004
1005 return 0;
1006 }
1007
1008 /*
1009 * Warnings shouldn't be reported for ignored functions.
1010 */
1011 static void add_ignores(struct objtool_file *file)
1012 {
1013 struct instruction *insn;
1014 struct section *rsec;
1015 struct symbol *func;
1016 struct reloc *reloc;
1017
1018 rsec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1019 if (!rsec)
1020 return;
1021
1022 for_each_reloc(rsec, reloc) {
1023 switch (reloc->sym->type) {
1024 case STT_FUNC:
1025 func = reloc->sym;
1026 break;
1027
1028 case STT_SECTION:
1029 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1030 if (!func)
1031 continue;
1032 break;
1033
1034 default:
1035 WARN("unexpected relocation symbol type in %s: %d",
1036 rsec->name, reloc->sym->type);
1037 continue;
1038 }
1039
1040 func_for_each_insn(file, func, insn)
1041 insn->ignore = true;
1042 }
1043 }
1044
1045 /*
1046 * This is a whitelist of functions that is allowed to be called with AC set.
1047 * The list is meant to be minimal and only contains compiler instrumentation
1048 * ABI and a few functions used to implement *_{to,from}_user() functions.
1049 *
1050 * These functions must not directly change AC, but may PUSHF/POPF.
1051 */
1052 static const char *uaccess_safe_builtin[] = {
1053 /* KASAN */
1054 "kasan_report",
1055 "kasan_check_range",
1056 /* KASAN out-of-line */
1057 "__asan_loadN_noabort",
1058 "__asan_load1_noabort",
1059 "__asan_load2_noabort",
1060 "__asan_load4_noabort",
1061 "__asan_load8_noabort",
1062 "__asan_load16_noabort",
1063 "__asan_storeN_noabort",
1064 "__asan_store1_noabort",
1065 "__asan_store2_noabort",
1066 "__asan_store4_noabort",
1067 "__asan_store8_noabort",
1068 "__asan_store16_noabort",
1069 "__kasan_check_read",
1070 "__kasan_check_write",
1071 /* KASAN in-line */
1072 "__asan_report_load_n_noabort",
1073 "__asan_report_load1_noabort",
1074 "__asan_report_load2_noabort",
1075 "__asan_report_load4_noabort",
1076 "__asan_report_load8_noabort",
1077 "__asan_report_load16_noabort",
1078 "__asan_report_store_n_noabort",
1079 "__asan_report_store1_noabort",
1080 "__asan_report_store2_noabort",
1081 "__asan_report_store4_noabort",
1082 "__asan_report_store8_noabort",
1083 "__asan_report_store16_noabort",
1084 /* KCSAN */
1085 "__kcsan_check_access",
1086 "__kcsan_mb",
1087 "__kcsan_wmb",
1088 "__kcsan_rmb",
1089 "__kcsan_release",
1090 "kcsan_found_watchpoint",
1091 "kcsan_setup_watchpoint",
1092 "kcsan_check_scoped_accesses",
1093 "kcsan_disable_current",
1094 "kcsan_enable_current_nowarn",
1095 /* KCSAN/TSAN */
1096 "__tsan_func_entry",
1097 "__tsan_func_exit",
1098 "__tsan_read_range",
1099 "__tsan_write_range",
1100 "__tsan_read1",
1101 "__tsan_read2",
1102 "__tsan_read4",
1103 "__tsan_read8",
1104 "__tsan_read16",
1105 "__tsan_write1",
1106 "__tsan_write2",
1107 "__tsan_write4",
1108 "__tsan_write8",
1109 "__tsan_write16",
1110 "__tsan_read_write1",
1111 "__tsan_read_write2",
1112 "__tsan_read_write4",
1113 "__tsan_read_write8",
1114 "__tsan_read_write16",
1115 "__tsan_volatile_read1",
1116 "__tsan_volatile_read2",
1117 "__tsan_volatile_read4",
1118 "__tsan_volatile_read8",
1119 "__tsan_volatile_read16",
1120 "__tsan_volatile_write1",
1121 "__tsan_volatile_write2",
1122 "__tsan_volatile_write4",
1123 "__tsan_volatile_write8",
1124 "__tsan_volatile_write16",
1125 "__tsan_atomic8_load",
1126 "__tsan_atomic16_load",
1127 "__tsan_atomic32_load",
1128 "__tsan_atomic64_load",
1129 "__tsan_atomic8_store",
1130 "__tsan_atomic16_store",
1131 "__tsan_atomic32_store",
1132 "__tsan_atomic64_store",
1133 "__tsan_atomic8_exchange",
1134 "__tsan_atomic16_exchange",
1135 "__tsan_atomic32_exchange",
1136 "__tsan_atomic64_exchange",
1137 "__tsan_atomic8_fetch_add",
1138 "__tsan_atomic16_fetch_add",
1139 "__tsan_atomic32_fetch_add",
1140 "__tsan_atomic64_fetch_add",
1141 "__tsan_atomic8_fetch_sub",
1142 "__tsan_atomic16_fetch_sub",
1143 "__tsan_atomic32_fetch_sub",
1144 "__tsan_atomic64_fetch_sub",
1145 "__tsan_atomic8_fetch_and",
1146 "__tsan_atomic16_fetch_and",
1147 "__tsan_atomic32_fetch_and",
1148 "__tsan_atomic64_fetch_and",
1149 "__tsan_atomic8_fetch_or",
1150 "__tsan_atomic16_fetch_or",
1151 "__tsan_atomic32_fetch_or",
1152 "__tsan_atomic64_fetch_or",
1153 "__tsan_atomic8_fetch_xor",
1154 "__tsan_atomic16_fetch_xor",
1155 "__tsan_atomic32_fetch_xor",
1156 "__tsan_atomic64_fetch_xor",
1157 "__tsan_atomic8_fetch_nand",
1158 "__tsan_atomic16_fetch_nand",
1159 "__tsan_atomic32_fetch_nand",
1160 "__tsan_atomic64_fetch_nand",
1161 "__tsan_atomic8_compare_exchange_strong",
1162 "__tsan_atomic16_compare_exchange_strong",
1163 "__tsan_atomic32_compare_exchange_strong",
1164 "__tsan_atomic64_compare_exchange_strong",
1165 "__tsan_atomic8_compare_exchange_weak",
1166 "__tsan_atomic16_compare_exchange_weak",
1167 "__tsan_atomic32_compare_exchange_weak",
1168 "__tsan_atomic64_compare_exchange_weak",
1169 "__tsan_atomic8_compare_exchange_val",
1170 "__tsan_atomic16_compare_exchange_val",
1171 "__tsan_atomic32_compare_exchange_val",
1172 "__tsan_atomic64_compare_exchange_val",
1173 "__tsan_atomic_thread_fence",
1174 "__tsan_atomic_signal_fence",
1175 "__tsan_unaligned_read16",
1176 "__tsan_unaligned_write16",
1177 /* KCOV */
1178 "write_comp_data",
1179 "check_kcov_mode",
1180 "__sanitizer_cov_trace_pc",
1181 "__sanitizer_cov_trace_const_cmp1",
1182 "__sanitizer_cov_trace_const_cmp2",
1183 "__sanitizer_cov_trace_const_cmp4",
1184 "__sanitizer_cov_trace_const_cmp8",
1185 "__sanitizer_cov_trace_cmp1",
1186 "__sanitizer_cov_trace_cmp2",
1187 "__sanitizer_cov_trace_cmp4",
1188 "__sanitizer_cov_trace_cmp8",
1189 "__sanitizer_cov_trace_switch",
1190 /* KMSAN */
1191 "kmsan_copy_to_user",
1192 "kmsan_report",
1193 "kmsan_unpoison_entry_regs",
1194 "kmsan_unpoison_memory",
1195 "__msan_chain_origin",
1196 "__msan_get_context_state",
1197 "__msan_instrument_asm_store",
1198 "__msan_metadata_ptr_for_load_1",
1199 "__msan_metadata_ptr_for_load_2",
1200 "__msan_metadata_ptr_for_load_4",
1201 "__msan_metadata_ptr_for_load_8",
1202 "__msan_metadata_ptr_for_load_n",
1203 "__msan_metadata_ptr_for_store_1",
1204 "__msan_metadata_ptr_for_store_2",
1205 "__msan_metadata_ptr_for_store_4",
1206 "__msan_metadata_ptr_for_store_8",
1207 "__msan_metadata_ptr_for_store_n",
1208 "__msan_poison_alloca",
1209 "__msan_warning",
1210 /* UBSAN */
1211 "ubsan_type_mismatch_common",
1212 "__ubsan_handle_type_mismatch",
1213 "__ubsan_handle_type_mismatch_v1",
1214 "__ubsan_handle_shift_out_of_bounds",
1215 "__ubsan_handle_load_invalid_value",
1216 /* STACKLEAK */
1217 "stackleak_track_stack",
1218 /* misc */
1219 "csum_partial_copy_generic",
1220 "copy_mc_fragile",
1221 "copy_mc_fragile_handle_tail",
1222 "copy_mc_enhanced_fast_string",
1223 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1224 "rep_stos_alternative",
1225 "rep_movs_alternative",
1226 "__copy_user_nocache",
1227 NULL
1228 };
1229
1230 static void add_uaccess_safe(struct objtool_file *file)
1231 {
1232 struct symbol *func;
1233 const char **name;
1234
1235 if (!opts.uaccess)
1236 return;
1237
1238 for (name = uaccess_safe_builtin; *name; name++) {
1239 func = find_symbol_by_name(file->elf, *name);
1240 if (!func)
1241 continue;
1242
1243 func->uaccess_safe = true;
1244 }
1245 }
1246
1247 /*
1248 * FIXME: For now, just ignore any alternatives which add retpolines. This is
1249 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1250 * But it at least allows objtool to understand the control flow *around* the
1251 * retpoline.
1252 */
1253 static int add_ignore_alternatives(struct objtool_file *file)
1254 {
1255 struct section *rsec;
1256 struct reloc *reloc;
1257 struct instruction *insn;
1258
1259 rsec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1260 if (!rsec)
1261 return 0;
1262
1263 for_each_reloc(rsec, reloc) {
1264 if (reloc->sym->type != STT_SECTION) {
1265 WARN("unexpected relocation symbol type in %s", rsec->name);
1266 return -1;
1267 }
1268
1269 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1270 if (!insn) {
1271 WARN("bad .discard.ignore_alts entry");
1272 return -1;
1273 }
1274
1275 insn->ignore_alts = true;
1276 }
1277
1278 return 0;
1279 }
1280
1281 __weak bool arch_is_retpoline(struct symbol *sym)
1282 {
1283 return false;
1284 }
1285
1286 __weak bool arch_is_rethunk(struct symbol *sym)
1287 {
1288 return false;
1289 }
1290
1291 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1292 {
1293 struct reloc *reloc;
1294
1295 if (insn->no_reloc)
1296 return NULL;
1297
1298 if (!file)
1299 return NULL;
1300
1301 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1302 insn->offset, insn->len);
1303 if (!reloc) {
1304 insn->no_reloc = 1;
1305 return NULL;
1306 }
1307
1308 return reloc;
1309 }
1310
1311 static void remove_insn_ops(struct instruction *insn)
1312 {
1313 struct stack_op *op, *next;
1314
1315 for (op = insn->stack_ops; op; op = next) {
1316 next = op->next;
1317 free(op);
1318 }
1319 insn->stack_ops = NULL;
1320 }
1321
1322 static void annotate_call_site(struct objtool_file *file,
1323 struct instruction *insn, bool sibling)
1324 {
1325 struct reloc *reloc = insn_reloc(file, insn);
1326 struct symbol *sym = insn_call_dest(insn);
1327
1328 if (!sym)
1329 sym = reloc->sym;
1330
1331 /*
1332 * Alternative replacement code is just template code which is
1333 * sometimes copied to the original instruction. For now, don't
1334 * annotate it. (In the future we might consider annotating the
1335 * original instruction if/when it ever makes sense to do so.)
1336 */
1337 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1338 return;
1339
1340 if (sym->static_call_tramp) {
1341 list_add_tail(&insn->call_node, &file->static_call_list);
1342 return;
1343 }
1344
1345 if (sym->retpoline_thunk) {
1346 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1347 return;
1348 }
1349
1350 /*
1351 * Many compilers cannot disable KCOV or sanitizer calls with a function
1352 * attribute so they need a little help, NOP out any such calls from
1353 * noinstr text.
1354 */
1355 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1356 if (reloc) {
1357 reloc->type = R_NONE;
1358 elf_write_reloc(file->elf, reloc);
1359 }
1360
1361 elf_write_insn(file->elf, insn->sec,
1362 insn->offset, insn->len,
1363 sibling ? arch_ret_insn(insn->len)
1364 : arch_nop_insn(insn->len));
1365
1366 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1367
1368 if (sibling) {
1369 /*
1370 * We've replaced the tail-call JMP insn by two new
1371 * insn: RET; INT3, except we only have a single struct
1372 * insn here. Mark it retpoline_safe to avoid the SLS
1373 * warning, instead of adding another insn.
1374 */
1375 insn->retpoline_safe = true;
1376 }
1377
1378 return;
1379 }
1380
1381 if (opts.mcount && sym->fentry) {
1382 if (sibling)
1383 WARN_INSN(insn, "tail call to __fentry__ !?!?");
1384 if (opts.mnop) {
1385 if (reloc) {
1386 reloc->type = R_NONE;
1387 elf_write_reloc(file->elf, reloc);
1388 }
1389
1390 elf_write_insn(file->elf, insn->sec,
1391 insn->offset, insn->len,
1392 arch_nop_insn(insn->len));
1393
1394 insn->type = INSN_NOP;
1395 }
1396
1397 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1398 return;
1399 }
1400
1401 if (insn->type == INSN_CALL && !insn->sec->init)
1402 list_add_tail(&insn->call_node, &file->call_list);
1403
1404 if (!sibling && dead_end_function(file, sym))
1405 insn->dead_end = true;
1406 }
1407
1408 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1409 struct symbol *dest, bool sibling)
1410 {
1411 insn->_call_dest = dest;
1412 if (!dest)
1413 return;
1414
1415 /*
1416 * Whatever stack impact regular CALLs have, should be undone
1417 * by the RETURN of the called function.
1418 *
1419 * Annotated intra-function calls retain the stack_ops but
1420 * are converted to JUMP, see read_intra_function_calls().
1421 */
1422 remove_insn_ops(insn);
1423
1424 annotate_call_site(file, insn, sibling);
1425 }
1426
1427 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1428 {
1429 /*
1430 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1431 * so convert them accordingly.
1432 */
1433 switch (insn->type) {
1434 case INSN_CALL:
1435 insn->type = INSN_CALL_DYNAMIC;
1436 break;
1437 case INSN_JUMP_UNCONDITIONAL:
1438 insn->type = INSN_JUMP_DYNAMIC;
1439 break;
1440 case INSN_JUMP_CONDITIONAL:
1441 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1442 break;
1443 default:
1444 return;
1445 }
1446
1447 insn->retpoline_safe = true;
1448
1449 /*
1450 * Whatever stack impact regular CALLs have, should be undone
1451 * by the RETURN of the called function.
1452 *
1453 * Annotated intra-function calls retain the stack_ops but
1454 * are converted to JUMP, see read_intra_function_calls().
1455 */
1456 remove_insn_ops(insn);
1457
1458 annotate_call_site(file, insn, false);
1459 }
1460
1461 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1462 {
1463 /*
1464 * Return thunk tail calls are really just returns in disguise,
1465 * so convert them accordingly.
1466 */
1467 insn->type = INSN_RETURN;
1468 insn->retpoline_safe = true;
1469
1470 if (add)
1471 list_add_tail(&insn->call_node, &file->return_thunk_list);
1472 }
1473
1474 static bool is_first_func_insn(struct objtool_file *file,
1475 struct instruction *insn, struct symbol *sym)
1476 {
1477 if (insn->offset == sym->offset)
1478 return true;
1479
1480 /* Allow direct CALL/JMP past ENDBR */
1481 if (opts.ibt) {
1482 struct instruction *prev = prev_insn_same_sym(file, insn);
1483
1484 if (prev && prev->type == INSN_ENDBR &&
1485 insn->offset == sym->offset + prev->len)
1486 return true;
1487 }
1488
1489 return false;
1490 }
1491
1492 /*
1493 * A sibling call is a tail-call to another symbol -- to differentiate from a
1494 * recursive tail-call which is to the same symbol.
1495 */
1496 static bool jump_is_sibling_call(struct objtool_file *file,
1497 struct instruction *from, struct instruction *to)
1498 {
1499 struct symbol *fs = from->sym;
1500 struct symbol *ts = to->sym;
1501
1502 /* Not a sibling call if from/to a symbol hole */
1503 if (!fs || !ts)
1504 return false;
1505
1506 /* Not a sibling call if not targeting the start of a symbol. */
1507 if (!is_first_func_insn(file, to, ts))
1508 return false;
1509
1510 /* Disallow sibling calls into STT_NOTYPE */
1511 if (ts->type == STT_NOTYPE)
1512 return false;
1513
1514 /* Must not be self to be a sibling */
1515 return fs->pfunc != ts->pfunc;
1516 }
1517
1518 /*
1519 * Find the destination instructions for all jumps.
1520 */
1521 static int add_jump_destinations(struct objtool_file *file)
1522 {
1523 struct instruction *insn, *jump_dest;
1524 struct reloc *reloc;
1525 struct section *dest_sec;
1526 unsigned long dest_off;
1527
1528 for_each_insn(file, insn) {
1529 if (insn->jump_dest) {
1530 /*
1531 * handle_group_alt() may have previously set
1532 * 'jump_dest' for some alternatives.
1533 */
1534 continue;
1535 }
1536 if (!is_static_jump(insn))
1537 continue;
1538
1539 reloc = insn_reloc(file, insn);
1540 if (!reloc) {
1541 dest_sec = insn->sec;
1542 dest_off = arch_jump_destination(insn);
1543 } else if (reloc->sym->type == STT_SECTION) {
1544 dest_sec = reloc->sym->sec;
1545 dest_off = arch_dest_reloc_offset(reloc->addend);
1546 } else if (reloc->sym->retpoline_thunk) {
1547 add_retpoline_call(file, insn);
1548 continue;
1549 } else if (reloc->sym->return_thunk) {
1550 add_return_call(file, insn, true);
1551 continue;
1552 } else if (insn_func(insn)) {
1553 /*
1554 * External sibling call or internal sibling call with
1555 * STT_FUNC reloc.
1556 */
1557 add_call_dest(file, insn, reloc->sym, true);
1558 continue;
1559 } else if (reloc->sym->sec->idx) {
1560 dest_sec = reloc->sym->sec;
1561 dest_off = reloc->sym->sym.st_value +
1562 arch_dest_reloc_offset(reloc->addend);
1563 } else {
1564 /* non-func asm code jumping to another file */
1565 continue;
1566 }
1567
1568 jump_dest = find_insn(file, dest_sec, dest_off);
1569 if (!jump_dest) {
1570 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1571
1572 /*
1573 * This is a special case for zen_untrain_ret().
1574 * It jumps to __x86_return_thunk(), but objtool
1575 * can't find the thunk's starting RET
1576 * instruction, because the RET is also in the
1577 * middle of another instruction. Objtool only
1578 * knows about the outer instruction.
1579 */
1580 if (sym && sym->return_thunk) {
1581 add_return_call(file, insn, false);
1582 continue;
1583 }
1584
1585 WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
1586 dest_sec->name, dest_off);
1587 return -1;
1588 }
1589
1590 /*
1591 * Cross-function jump.
1592 */
1593 if (insn_func(insn) && insn_func(jump_dest) &&
1594 insn_func(insn) != insn_func(jump_dest)) {
1595
1596 /*
1597 * For GCC 8+, create parent/child links for any cold
1598 * subfunctions. This is _mostly_ redundant with a
1599 * similar initialization in read_symbols().
1600 *
1601 * If a function has aliases, we want the *first* such
1602 * function in the symbol table to be the subfunction's
1603 * parent. In that case we overwrite the
1604 * initialization done in read_symbols().
1605 *
1606 * However this code can't completely replace the
1607 * read_symbols() code because this doesn't detect the
1608 * case where the parent function's only reference to a
1609 * subfunction is through a jump table.
1610 */
1611 if (!strstr(insn_func(insn)->name, ".cold") &&
1612 strstr(insn_func(jump_dest)->name, ".cold")) {
1613 insn_func(insn)->cfunc = insn_func(jump_dest);
1614 insn_func(jump_dest)->pfunc = insn_func(insn);
1615 }
1616 }
1617
1618 if (jump_is_sibling_call(file, insn, jump_dest)) {
1619 /*
1620 * Internal sibling call without reloc or with
1621 * STT_SECTION reloc.
1622 */
1623 add_call_dest(file, insn, insn_func(jump_dest), true);
1624 continue;
1625 }
1626
1627 insn->jump_dest = jump_dest;
1628 }
1629
1630 return 0;
1631 }
1632
1633 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1634 {
1635 struct symbol *call_dest;
1636
1637 call_dest = find_func_by_offset(sec, offset);
1638 if (!call_dest)
1639 call_dest = find_symbol_by_offset(sec, offset);
1640
1641 return call_dest;
1642 }
1643
1644 /*
1645 * Find the destination instructions for all calls.
1646 */
1647 static int add_call_destinations(struct objtool_file *file)
1648 {
1649 struct instruction *insn;
1650 unsigned long dest_off;
1651 struct symbol *dest;
1652 struct reloc *reloc;
1653
1654 for_each_insn(file, insn) {
1655 if (insn->type != INSN_CALL)
1656 continue;
1657
1658 reloc = insn_reloc(file, insn);
1659 if (!reloc) {
1660 dest_off = arch_jump_destination(insn);
1661 dest = find_call_destination(insn->sec, dest_off);
1662
1663 add_call_dest(file, insn, dest, false);
1664
1665 if (insn->ignore)
1666 continue;
1667
1668 if (!insn_call_dest(insn)) {
1669 WARN_INSN(insn, "unannotated intra-function call");
1670 return -1;
1671 }
1672
1673 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
1674 WARN_INSN(insn, "unsupported call to non-function");
1675 return -1;
1676 }
1677
1678 } else if (reloc->sym->type == STT_SECTION) {
1679 dest_off = arch_dest_reloc_offset(reloc->addend);
1680 dest = find_call_destination(reloc->sym->sec, dest_off);
1681 if (!dest) {
1682 WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1683 reloc->sym->sec->name, dest_off);
1684 return -1;
1685 }
1686
1687 add_call_dest(file, insn, dest, false);
1688
1689 } else if (reloc->sym->retpoline_thunk) {
1690 add_retpoline_call(file, insn);
1691
1692 } else
1693 add_call_dest(file, insn, reloc->sym, false);
1694 }
1695
1696 return 0;
1697 }
1698
1699 /*
1700 * The .alternatives section requires some extra special care over and above
1701 * other special sections because alternatives are patched in place.
1702 */
1703 static int handle_group_alt(struct objtool_file *file,
1704 struct special_alt *special_alt,
1705 struct instruction *orig_insn,
1706 struct instruction **new_insn)
1707 {
1708 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1709 struct alt_group *orig_alt_group, *new_alt_group;
1710 unsigned long dest_off;
1711
1712 orig_alt_group = orig_insn->alt_group;
1713 if (!orig_alt_group) {
1714 struct instruction *last_orig_insn = NULL;
1715
1716 orig_alt_group = malloc(sizeof(*orig_alt_group));
1717 if (!orig_alt_group) {
1718 WARN("malloc failed");
1719 return -1;
1720 }
1721 orig_alt_group->cfi = calloc(special_alt->orig_len,
1722 sizeof(struct cfi_state *));
1723 if (!orig_alt_group->cfi) {
1724 WARN("calloc failed");
1725 return -1;
1726 }
1727
1728 insn = orig_insn;
1729 sec_for_each_insn_from(file, insn) {
1730 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1731 break;
1732
1733 insn->alt_group = orig_alt_group;
1734 last_orig_insn = insn;
1735 }
1736 orig_alt_group->orig_group = NULL;
1737 orig_alt_group->first_insn = orig_insn;
1738 orig_alt_group->last_insn = last_orig_insn;
1739 orig_alt_group->nop = NULL;
1740 } else {
1741 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1742 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1743 WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1744 orig_alt_group->last_insn->offset +
1745 orig_alt_group->last_insn->len -
1746 orig_alt_group->first_insn->offset,
1747 special_alt->orig_len);
1748 return -1;
1749 }
1750 }
1751
1752 new_alt_group = malloc(sizeof(*new_alt_group));
1753 if (!new_alt_group) {
1754 WARN("malloc failed");
1755 return -1;
1756 }
1757
1758 if (special_alt->new_len < special_alt->orig_len) {
1759 /*
1760 * Insert a fake nop at the end to make the replacement
1761 * alt_group the same size as the original. This is needed to
1762 * allow propagate_alt_cfi() to do its magic. When the last
1763 * instruction affects the stack, the instruction after it (the
1764 * nop) will propagate the new state to the shared CFI array.
1765 */
1766 nop = malloc(sizeof(*nop));
1767 if (!nop) {
1768 WARN("malloc failed");
1769 return -1;
1770 }
1771 memset(nop, 0, sizeof(*nop));
1772
1773 nop->sec = special_alt->new_sec;
1774 nop->offset = special_alt->new_off + special_alt->new_len;
1775 nop->len = special_alt->orig_len - special_alt->new_len;
1776 nop->type = INSN_NOP;
1777 nop->sym = orig_insn->sym;
1778 nop->alt_group = new_alt_group;
1779 nop->ignore = orig_insn->ignore_alts;
1780 }
1781
1782 if (!special_alt->new_len) {
1783 *new_insn = nop;
1784 goto end;
1785 }
1786
1787 insn = *new_insn;
1788 sec_for_each_insn_from(file, insn) {
1789 struct reloc *alt_reloc;
1790
1791 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1792 break;
1793
1794 last_new_insn = insn;
1795
1796 insn->ignore = orig_insn->ignore_alts;
1797 insn->sym = orig_insn->sym;
1798 insn->alt_group = new_alt_group;
1799
1800 /*
1801 * Since alternative replacement code is copy/pasted by the
1802 * kernel after applying relocations, generally such code can't
1803 * have relative-address relocation references to outside the
1804 * .altinstr_replacement section, unless the arch's
1805 * alternatives code can adjust the relative offsets
1806 * accordingly.
1807 */
1808 alt_reloc = insn_reloc(file, insn);
1809 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1810 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1811
1812 WARN_INSN(insn, "unsupported relocation in alternatives section");
1813 return -1;
1814 }
1815
1816 if (!is_static_jump(insn))
1817 continue;
1818
1819 if (!insn->immediate)
1820 continue;
1821
1822 dest_off = arch_jump_destination(insn);
1823 if (dest_off == special_alt->new_off + special_alt->new_len) {
1824 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1825 if (!insn->jump_dest) {
1826 WARN_INSN(insn, "can't find alternative jump destination");
1827 return -1;
1828 }
1829 }
1830 }
1831
1832 if (!last_new_insn) {
1833 WARN_FUNC("can't find last new alternative instruction",
1834 special_alt->new_sec, special_alt->new_off);
1835 return -1;
1836 }
1837
1838 end:
1839 new_alt_group->orig_group = orig_alt_group;
1840 new_alt_group->first_insn = *new_insn;
1841 new_alt_group->last_insn = last_new_insn;
1842 new_alt_group->nop = nop;
1843 new_alt_group->cfi = orig_alt_group->cfi;
1844 return 0;
1845 }
1846
1847 /*
1848 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1849 * If the original instruction is a jump, make the alt entry an effective nop
1850 * by just skipping the original instruction.
1851 */
1852 static int handle_jump_alt(struct objtool_file *file,
1853 struct special_alt *special_alt,
1854 struct instruction *orig_insn,
1855 struct instruction **new_insn)
1856 {
1857 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1858 orig_insn->type != INSN_NOP) {
1859
1860 WARN_INSN(orig_insn, "unsupported instruction at jump label");
1861 return -1;
1862 }
1863
1864 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1865 struct reloc *reloc = insn_reloc(file, orig_insn);
1866
1867 if (reloc) {
1868 reloc->type = R_NONE;
1869 elf_write_reloc(file->elf, reloc);
1870 }
1871 elf_write_insn(file->elf, orig_insn->sec,
1872 orig_insn->offset, orig_insn->len,
1873 arch_nop_insn(orig_insn->len));
1874 orig_insn->type = INSN_NOP;
1875 }
1876
1877 if (orig_insn->type == INSN_NOP) {
1878 if (orig_insn->len == 2)
1879 file->jl_nop_short++;
1880 else
1881 file->jl_nop_long++;
1882
1883 return 0;
1884 }
1885
1886 if (orig_insn->len == 2)
1887 file->jl_short++;
1888 else
1889 file->jl_long++;
1890
1891 *new_insn = next_insn_same_sec(file, orig_insn);
1892 return 0;
1893 }
1894
1895 /*
1896 * Read all the special sections which have alternate instructions which can be
1897 * patched in or redirected to at runtime. Each instruction having alternate
1898 * instruction(s) has them added to its insn->alts list, which will be
1899 * traversed in validate_branch().
1900 */
1901 static int add_special_section_alts(struct objtool_file *file)
1902 {
1903 struct list_head special_alts;
1904 struct instruction *orig_insn, *new_insn;
1905 struct special_alt *special_alt, *tmp;
1906 struct alternative *alt;
1907 int ret;
1908
1909 ret = special_get_alts(file->elf, &special_alts);
1910 if (ret)
1911 return ret;
1912
1913 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1914
1915 orig_insn = find_insn(file, special_alt->orig_sec,
1916 special_alt->orig_off);
1917 if (!orig_insn) {
1918 WARN_FUNC("special: can't find orig instruction",
1919 special_alt->orig_sec, special_alt->orig_off);
1920 ret = -1;
1921 goto out;
1922 }
1923
1924 new_insn = NULL;
1925 if (!special_alt->group || special_alt->new_len) {
1926 new_insn = find_insn(file, special_alt->new_sec,
1927 special_alt->new_off);
1928 if (!new_insn) {
1929 WARN_FUNC("special: can't find new instruction",
1930 special_alt->new_sec,
1931 special_alt->new_off);
1932 ret = -1;
1933 goto out;
1934 }
1935 }
1936
1937 if (special_alt->group) {
1938 if (!special_alt->orig_len) {
1939 WARN_INSN(orig_insn, "empty alternative entry");
1940 continue;
1941 }
1942
1943 ret = handle_group_alt(file, special_alt, orig_insn,
1944 &new_insn);
1945 if (ret)
1946 goto out;
1947 } else if (special_alt->jump_or_nop) {
1948 ret = handle_jump_alt(file, special_alt, orig_insn,
1949 &new_insn);
1950 if (ret)
1951 goto out;
1952 }
1953
1954 alt = malloc(sizeof(*alt));
1955 if (!alt) {
1956 WARN("malloc failed");
1957 ret = -1;
1958 goto out;
1959 }
1960
1961 alt->insn = new_insn;
1962 alt->skip_orig = special_alt->skip_orig;
1963 orig_insn->ignore_alts |= special_alt->skip_alt;
1964 alt->next = orig_insn->alts;
1965 orig_insn->alts = alt;
1966
1967 list_del(&special_alt->list);
1968 free(special_alt);
1969 }
1970
1971 if (opts.stats) {
1972 printf("jl\\\tNOP\tJMP\n");
1973 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
1974 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
1975 }
1976
1977 out:
1978 return ret;
1979 }
1980
1981 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
1982 struct reloc *table)
1983 {
1984 struct reloc *reloc = table;
1985 struct instruction *dest_insn;
1986 struct alternative *alt;
1987 struct symbol *pfunc = insn_func(insn)->pfunc;
1988 unsigned int prev_offset = 0;
1989
1990 /*
1991 * Each @reloc is a switch table relocation which points to the target
1992 * instruction.
1993 */
1994 for_each_reloc_from(table->sec, reloc) {
1995
1996 /* Check for the end of the table: */
1997 if (reloc != table && reloc->jump_table_start)
1998 break;
1999
2000 /* Make sure the table entries are consecutive: */
2001 if (prev_offset && reloc_offset(reloc) != prev_offset + 8)
2002 break;
2003
2004 /* Detect function pointers from contiguous objects: */
2005 if (reloc->sym->sec == pfunc->sec &&
2006 reloc->addend == pfunc->offset)
2007 break;
2008
2009 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2010 if (!dest_insn)
2011 break;
2012
2013 /* Make sure the destination is in the same function: */
2014 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2015 break;
2016
2017 alt = malloc(sizeof(*alt));
2018 if (!alt) {
2019 WARN("malloc failed");
2020 return -1;
2021 }
2022
2023 alt->insn = dest_insn;
2024 alt->next = insn->alts;
2025 insn->alts = alt;
2026 prev_offset = reloc_offset(reloc);
2027 }
2028
2029 if (!prev_offset) {
2030 WARN_INSN(insn, "can't find switch jump table");
2031 return -1;
2032 }
2033
2034 return 0;
2035 }
2036
2037 /*
2038 * find_jump_table() - Given a dynamic jump, find the switch jump table
2039 * associated with it.
2040 */
2041 static struct reloc *find_jump_table(struct objtool_file *file,
2042 struct symbol *func,
2043 struct instruction *insn)
2044 {
2045 struct reloc *table_reloc;
2046 struct instruction *dest_insn, *orig_insn = insn;
2047
2048 /*
2049 * Backward search using the @first_jump_src links, these help avoid
2050 * much of the 'in between' code. Which avoids us getting confused by
2051 * it.
2052 */
2053 for (;
2054 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2055 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2056
2057 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2058 break;
2059
2060 /* allow small jumps within the range */
2061 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2062 insn->jump_dest &&
2063 (insn->jump_dest->offset <= insn->offset ||
2064 insn->jump_dest->offset > orig_insn->offset))
2065 break;
2066
2067 table_reloc = arch_find_switch_table(file, insn);
2068 if (!table_reloc)
2069 continue;
2070 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2071 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2072 continue;
2073
2074 return table_reloc;
2075 }
2076
2077 return NULL;
2078 }
2079
2080 /*
2081 * First pass: Mark the head of each jump table so that in the next pass,
2082 * we know when a given jump table ends and the next one starts.
2083 */
2084 static void mark_func_jump_tables(struct objtool_file *file,
2085 struct symbol *func)
2086 {
2087 struct instruction *insn, *last = NULL;
2088 struct reloc *reloc;
2089
2090 func_for_each_insn(file, func, insn) {
2091 if (!last)
2092 last = insn;
2093
2094 /*
2095 * Store back-pointers for unconditional forward jumps such
2096 * that find_jump_table() can back-track using those and
2097 * avoid some potentially confusing code.
2098 */
2099 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2100 insn->offset > last->offset &&
2101 insn->jump_dest->offset > insn->offset &&
2102 !insn->jump_dest->first_jump_src) {
2103
2104 insn->jump_dest->first_jump_src = insn;
2105 last = insn->jump_dest;
2106 }
2107
2108 if (insn->type != INSN_JUMP_DYNAMIC)
2109 continue;
2110
2111 reloc = find_jump_table(file, func, insn);
2112 if (reloc) {
2113 reloc->jump_table_start = true;
2114 insn->_jump_table = reloc;
2115 }
2116 }
2117 }
2118
2119 static int add_func_jump_tables(struct objtool_file *file,
2120 struct symbol *func)
2121 {
2122 struct instruction *insn;
2123 int ret;
2124
2125 func_for_each_insn(file, func, insn) {
2126 if (!insn_jump_table(insn))
2127 continue;
2128
2129 ret = add_jump_table(file, insn, insn_jump_table(insn));
2130 if (ret)
2131 return ret;
2132 }
2133
2134 return 0;
2135 }
2136
2137 /*
2138 * For some switch statements, gcc generates a jump table in the .rodata
2139 * section which contains a list of addresses within the function to jump to.
2140 * This finds these jump tables and adds them to the insn->alts lists.
2141 */
2142 static int add_jump_table_alts(struct objtool_file *file)
2143 {
2144 struct symbol *func;
2145 int ret;
2146
2147 if (!file->rodata)
2148 return 0;
2149
2150 for_each_sym(file, func) {
2151 if (func->type != STT_FUNC)
2152 continue;
2153
2154 mark_func_jump_tables(file, func);
2155 ret = add_func_jump_tables(file, func);
2156 if (ret)
2157 return ret;
2158 }
2159
2160 return 0;
2161 }
2162
2163 static void set_func_state(struct cfi_state *state)
2164 {
2165 state->cfa = initial_func_cfi.cfa;
2166 memcpy(&state->regs, &initial_func_cfi.regs,
2167 CFI_NUM_REGS * sizeof(struct cfi_reg));
2168 state->stack_size = initial_func_cfi.cfa.offset;
2169 state->type = UNWIND_HINT_TYPE_CALL;
2170 }
2171
2172 static int read_unwind_hints(struct objtool_file *file)
2173 {
2174 struct cfi_state cfi = init_cfi;
2175 struct section *sec;
2176 struct unwind_hint *hint;
2177 struct instruction *insn;
2178 struct reloc *reloc;
2179 int i;
2180
2181 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2182 if (!sec)
2183 return 0;
2184
2185 if (!sec->rsec) {
2186 WARN("missing .rela.discard.unwind_hints section");
2187 return -1;
2188 }
2189
2190 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2191 WARN("struct unwind_hint size mismatch");
2192 return -1;
2193 }
2194
2195 file->hints = true;
2196
2197 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2198 hint = (struct unwind_hint *)sec->data->d_buf + i;
2199
2200 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2201 if (!reloc) {
2202 WARN("can't find reloc for unwind_hints[%d]", i);
2203 return -1;
2204 }
2205
2206 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2207 if (!insn) {
2208 WARN("can't find insn for unwind_hints[%d]", i);
2209 return -1;
2210 }
2211
2212 insn->hint = true;
2213
2214 if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) {
2215 insn->cfi = &force_undefined_cfi;
2216 continue;
2217 }
2218
2219 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2220 insn->hint = false;
2221 insn->save = true;
2222 continue;
2223 }
2224
2225 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2226 insn->restore = true;
2227 continue;
2228 }
2229
2230 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2231 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2232
2233 if (sym && sym->bind == STB_GLOBAL) {
2234 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2235 WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2236 }
2237 }
2238 }
2239
2240 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2241 insn->cfi = &func_cfi;
2242 continue;
2243 }
2244
2245 if (insn->cfi)
2246 cfi = *(insn->cfi);
2247
2248 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2249 WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2250 return -1;
2251 }
2252
2253 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2254 cfi.type = hint->type;
2255 cfi.signal = hint->signal;
2256
2257 insn->cfi = cfi_hash_find_or_add(&cfi);
2258 }
2259
2260 return 0;
2261 }
2262
2263 static int read_noendbr_hints(struct objtool_file *file)
2264 {
2265 struct instruction *insn;
2266 struct section *rsec;
2267 struct reloc *reloc;
2268
2269 rsec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2270 if (!rsec)
2271 return 0;
2272
2273 for_each_reloc(rsec, reloc) {
2274 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2275 if (!insn) {
2276 WARN("bad .discard.noendbr entry");
2277 return -1;
2278 }
2279
2280 insn->noendbr = 1;
2281 }
2282
2283 return 0;
2284 }
2285
2286 static int read_retpoline_hints(struct objtool_file *file)
2287 {
2288 struct section *rsec;
2289 struct instruction *insn;
2290 struct reloc *reloc;
2291
2292 rsec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2293 if (!rsec)
2294 return 0;
2295
2296 for_each_reloc(rsec, reloc) {
2297 if (reloc->sym->type != STT_SECTION) {
2298 WARN("unexpected relocation symbol type in %s", rsec->name);
2299 return -1;
2300 }
2301
2302 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2303 if (!insn) {
2304 WARN("bad .discard.retpoline_safe entry");
2305 return -1;
2306 }
2307
2308 if (insn->type != INSN_JUMP_DYNAMIC &&
2309 insn->type != INSN_CALL_DYNAMIC &&
2310 insn->type != INSN_RETURN &&
2311 insn->type != INSN_NOP) {
2312 WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2313 return -1;
2314 }
2315
2316 insn->retpoline_safe = true;
2317 }
2318
2319 return 0;
2320 }
2321
2322 static int read_instr_hints(struct objtool_file *file)
2323 {
2324 struct section *rsec;
2325 struct instruction *insn;
2326 struct reloc *reloc;
2327
2328 rsec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2329 if (!rsec)
2330 return 0;
2331
2332 for_each_reloc(rsec, reloc) {
2333 if (reloc->sym->type != STT_SECTION) {
2334 WARN("unexpected relocation symbol type in %s", rsec->name);
2335 return -1;
2336 }
2337
2338 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2339 if (!insn) {
2340 WARN("bad .discard.instr_end entry");
2341 return -1;
2342 }
2343
2344 insn->instr--;
2345 }
2346
2347 rsec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2348 if (!rsec)
2349 return 0;
2350
2351 for_each_reloc(rsec, reloc) {
2352 if (reloc->sym->type != STT_SECTION) {
2353 WARN("unexpected relocation symbol type in %s", rsec->name);
2354 return -1;
2355 }
2356
2357 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2358 if (!insn) {
2359 WARN("bad .discard.instr_begin entry");
2360 return -1;
2361 }
2362
2363 insn->instr++;
2364 }
2365
2366 return 0;
2367 }
2368
2369 static int read_validate_unret_hints(struct objtool_file *file)
2370 {
2371 struct section *rsec;
2372 struct instruction *insn;
2373 struct reloc *reloc;
2374
2375 rsec = find_section_by_name(file->elf, ".rela.discard.validate_unret");
2376 if (!rsec)
2377 return 0;
2378
2379 for_each_reloc(rsec, reloc) {
2380 if (reloc->sym->type != STT_SECTION) {
2381 WARN("unexpected relocation symbol type in %s", rsec->name);
2382 return -1;
2383 }
2384
2385 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2386 if (!insn) {
2387 WARN("bad .discard.instr_end entry");
2388 return -1;
2389 }
2390 insn->unret = 1;
2391 }
2392
2393 return 0;
2394 }
2395
2396
2397 static int read_intra_function_calls(struct objtool_file *file)
2398 {
2399 struct instruction *insn;
2400 struct section *rsec;
2401 struct reloc *reloc;
2402
2403 rsec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2404 if (!rsec)
2405 return 0;
2406
2407 for_each_reloc(rsec, reloc) {
2408 unsigned long dest_off;
2409
2410 if (reloc->sym->type != STT_SECTION) {
2411 WARN("unexpected relocation symbol type in %s",
2412 rsec->name);
2413 return -1;
2414 }
2415
2416 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2417 if (!insn) {
2418 WARN("bad .discard.intra_function_call entry");
2419 return -1;
2420 }
2421
2422 if (insn->type != INSN_CALL) {
2423 WARN_INSN(insn, "intra_function_call not a direct call");
2424 return -1;
2425 }
2426
2427 /*
2428 * Treat intra-function CALLs as JMPs, but with a stack_op.
2429 * See add_call_destinations(), which strips stack_ops from
2430 * normal CALLs.
2431 */
2432 insn->type = INSN_JUMP_UNCONDITIONAL;
2433
2434 dest_off = arch_jump_destination(insn);
2435 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2436 if (!insn->jump_dest) {
2437 WARN_INSN(insn, "can't find call dest at %s+0x%lx",
2438 insn->sec->name, dest_off);
2439 return -1;
2440 }
2441 }
2442
2443 return 0;
2444 }
2445
2446 /*
2447 * Return true if name matches an instrumentation function, where calls to that
2448 * function from noinstr code can safely be removed, but compilers won't do so.
2449 */
2450 static bool is_profiling_func(const char *name)
2451 {
2452 /*
2453 * Many compilers cannot disable KCOV with a function attribute.
2454 */
2455 if (!strncmp(name, "__sanitizer_cov_", 16))
2456 return true;
2457
2458 /*
2459 * Some compilers currently do not remove __tsan_func_entry/exit nor
2460 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2461 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2462 * minimum Clang version is 14.0, this can be removed.
2463 */
2464 if (!strncmp(name, "__tsan_func_", 12) ||
2465 !strcmp(name, "__tsan_atomic_signal_fence"))
2466 return true;
2467
2468 return false;
2469 }
2470
2471 static int classify_symbols(struct objtool_file *file)
2472 {
2473 struct symbol *func;
2474
2475 for_each_sym(file, func) {
2476 if (func->bind != STB_GLOBAL)
2477 continue;
2478
2479 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2480 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2481 func->static_call_tramp = true;
2482
2483 if (arch_is_retpoline(func))
2484 func->retpoline_thunk = true;
2485
2486 if (arch_is_rethunk(func))
2487 func->return_thunk = true;
2488
2489 if (arch_ftrace_match(func->name))
2490 func->fentry = true;
2491
2492 if (is_profiling_func(func->name))
2493 func->profiling_func = true;
2494 }
2495
2496 return 0;
2497 }
2498
2499 static void mark_rodata(struct objtool_file *file)
2500 {
2501 struct section *sec;
2502 bool found = false;
2503
2504 /*
2505 * Search for the following rodata sections, each of which can
2506 * potentially contain jump tables:
2507 *
2508 * - .rodata: can contain GCC switch tables
2509 * - .rodata.<func>: same, if -fdata-sections is being used
2510 * - .rodata..c_jump_table: contains C annotated jump tables
2511 *
2512 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2513 */
2514 for_each_sec(file, sec) {
2515 if (!strncmp(sec->name, ".rodata", 7) &&
2516 !strstr(sec->name, ".str1.")) {
2517 sec->rodata = true;
2518 found = true;
2519 }
2520 }
2521
2522 file->rodata = found;
2523 }
2524
2525 static int decode_sections(struct objtool_file *file)
2526 {
2527 int ret;
2528
2529 mark_rodata(file);
2530
2531 ret = init_pv_ops(file);
2532 if (ret)
2533 return ret;
2534
2535 /*
2536 * Must be before add_{jump_call}_destination.
2537 */
2538 ret = classify_symbols(file);
2539 if (ret)
2540 return ret;
2541
2542 ret = decode_instructions(file);
2543 if (ret)
2544 return ret;
2545
2546 add_ignores(file);
2547 add_uaccess_safe(file);
2548
2549 ret = add_ignore_alternatives(file);
2550 if (ret)
2551 return ret;
2552
2553 /*
2554 * Must be before read_unwind_hints() since that needs insn->noendbr.
2555 */
2556 ret = read_noendbr_hints(file);
2557 if (ret)
2558 return ret;
2559
2560 /*
2561 * Must be before add_jump_destinations(), which depends on 'func'
2562 * being set for alternatives, to enable proper sibling call detection.
2563 */
2564 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2565 ret = add_special_section_alts(file);
2566 if (ret)
2567 return ret;
2568 }
2569
2570 ret = add_jump_destinations(file);
2571 if (ret)
2572 return ret;
2573
2574 /*
2575 * Must be before add_call_destination(); it changes INSN_CALL to
2576 * INSN_JUMP.
2577 */
2578 ret = read_intra_function_calls(file);
2579 if (ret)
2580 return ret;
2581
2582 ret = add_call_destinations(file);
2583 if (ret)
2584 return ret;
2585
2586 /*
2587 * Must be after add_call_destinations() such that it can override
2588 * dead_end_function() marks.
2589 */
2590 ret = add_dead_ends(file);
2591 if (ret)
2592 return ret;
2593
2594 ret = add_jump_table_alts(file);
2595 if (ret)
2596 return ret;
2597
2598 ret = read_unwind_hints(file);
2599 if (ret)
2600 return ret;
2601
2602 ret = read_retpoline_hints(file);
2603 if (ret)
2604 return ret;
2605
2606 ret = read_instr_hints(file);
2607 if (ret)
2608 return ret;
2609
2610 ret = read_validate_unret_hints(file);
2611 if (ret)
2612 return ret;
2613
2614 return 0;
2615 }
2616
2617 static bool is_fentry_call(struct instruction *insn)
2618 {
2619 if (insn->type == INSN_CALL &&
2620 insn_call_dest(insn) &&
2621 insn_call_dest(insn)->fentry)
2622 return true;
2623
2624 return false;
2625 }
2626
2627 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2628 {
2629 struct cfi_state *cfi = &state->cfi;
2630 int i;
2631
2632 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2633 return true;
2634
2635 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2636 return true;
2637
2638 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2639 return true;
2640
2641 for (i = 0; i < CFI_NUM_REGS; i++) {
2642 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2643 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2644 return true;
2645 }
2646
2647 return false;
2648 }
2649
2650 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2651 int expected_offset)
2652 {
2653 return reg->base == CFI_CFA &&
2654 reg->offset == expected_offset;
2655 }
2656
2657 static bool has_valid_stack_frame(struct insn_state *state)
2658 {
2659 struct cfi_state *cfi = &state->cfi;
2660
2661 if (cfi->cfa.base == CFI_BP &&
2662 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2663 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2664 return true;
2665
2666 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2667 return true;
2668
2669 return false;
2670 }
2671
2672 static int update_cfi_state_regs(struct instruction *insn,
2673 struct cfi_state *cfi,
2674 struct stack_op *op)
2675 {
2676 struct cfi_reg *cfa = &cfi->cfa;
2677
2678 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2679 return 0;
2680
2681 /* push */
2682 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2683 cfa->offset += 8;
2684
2685 /* pop */
2686 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2687 cfa->offset -= 8;
2688
2689 /* add immediate to sp */
2690 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2691 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2692 cfa->offset -= op->src.offset;
2693
2694 return 0;
2695 }
2696
2697 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2698 {
2699 if (arch_callee_saved_reg(reg) &&
2700 cfi->regs[reg].base == CFI_UNDEFINED) {
2701 cfi->regs[reg].base = base;
2702 cfi->regs[reg].offset = offset;
2703 }
2704 }
2705
2706 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2707 {
2708 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2709 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2710 }
2711
2712 /*
2713 * A note about DRAP stack alignment:
2714 *
2715 * GCC has the concept of a DRAP register, which is used to help keep track of
2716 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2717 * register. The typical DRAP pattern is:
2718 *
2719 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2720 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2721 * 41 ff 72 f8 pushq -0x8(%r10)
2722 * 55 push %rbp
2723 * 48 89 e5 mov %rsp,%rbp
2724 * (more pushes)
2725 * 41 52 push %r10
2726 * ...
2727 * 41 5a pop %r10
2728 * (more pops)
2729 * 5d pop %rbp
2730 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2731 * c3 retq
2732 *
2733 * There are some variations in the epilogues, like:
2734 *
2735 * 5b pop %rbx
2736 * 41 5a pop %r10
2737 * 41 5c pop %r12
2738 * 41 5d pop %r13
2739 * 41 5e pop %r14
2740 * c9 leaveq
2741 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2742 * c3 retq
2743 *
2744 * and:
2745 *
2746 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2747 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2748 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2749 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2750 * c9 leaveq
2751 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2752 * c3 retq
2753 *
2754 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2755 * restored beforehand:
2756 *
2757 * 41 55 push %r13
2758 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2759 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2760 * ...
2761 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2762 * 41 5d pop %r13
2763 * c3 retq
2764 */
2765 static int update_cfi_state(struct instruction *insn,
2766 struct instruction *next_insn,
2767 struct cfi_state *cfi, struct stack_op *op)
2768 {
2769 struct cfi_reg *cfa = &cfi->cfa;
2770 struct cfi_reg *regs = cfi->regs;
2771
2772 /* ignore UNWIND_HINT_UNDEFINED regions */
2773 if (cfi->force_undefined)
2774 return 0;
2775
2776 /* stack operations don't make sense with an undefined CFA */
2777 if (cfa->base == CFI_UNDEFINED) {
2778 if (insn_func(insn)) {
2779 WARN_INSN(insn, "undefined stack state");
2780 return -1;
2781 }
2782 return 0;
2783 }
2784
2785 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2786 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2787 return update_cfi_state_regs(insn, cfi, op);
2788
2789 switch (op->dest.type) {
2790
2791 case OP_DEST_REG:
2792 switch (op->src.type) {
2793
2794 case OP_SRC_REG:
2795 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2796 cfa->base == CFI_SP &&
2797 check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2798
2799 /* mov %rsp, %rbp */
2800 cfa->base = op->dest.reg;
2801 cfi->bp_scratch = false;
2802 }
2803
2804 else if (op->src.reg == CFI_SP &&
2805 op->dest.reg == CFI_BP && cfi->drap) {
2806
2807 /* drap: mov %rsp, %rbp */
2808 regs[CFI_BP].base = CFI_BP;
2809 regs[CFI_BP].offset = -cfi->stack_size;
2810 cfi->bp_scratch = false;
2811 }
2812
2813 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2814
2815 /*
2816 * mov %rsp, %reg
2817 *
2818 * This is needed for the rare case where GCC
2819 * does:
2820 *
2821 * mov %rsp, %rax
2822 * ...
2823 * mov %rax, %rsp
2824 */
2825 cfi->vals[op->dest.reg].base = CFI_CFA;
2826 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2827 }
2828
2829 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2830 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2831
2832 /*
2833 * mov %rbp, %rsp
2834 *
2835 * Restore the original stack pointer (Clang).
2836 */
2837 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2838 }
2839
2840 else if (op->dest.reg == cfa->base) {
2841
2842 /* mov %reg, %rsp */
2843 if (cfa->base == CFI_SP &&
2844 cfi->vals[op->src.reg].base == CFI_CFA) {
2845
2846 /*
2847 * This is needed for the rare case
2848 * where GCC does something dumb like:
2849 *
2850 * lea 0x8(%rsp), %rcx
2851 * ...
2852 * mov %rcx, %rsp
2853 */
2854 cfa->offset = -cfi->vals[op->src.reg].offset;
2855 cfi->stack_size = cfa->offset;
2856
2857 } else if (cfa->base == CFI_SP &&
2858 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2859 cfi->vals[op->src.reg].offset == cfa->offset) {
2860
2861 /*
2862 * Stack swizzle:
2863 *
2864 * 1: mov %rsp, (%[tos])
2865 * 2: mov %[tos], %rsp
2866 * ...
2867 * 3: pop %rsp
2868 *
2869 * Where:
2870 *
2871 * 1 - places a pointer to the previous
2872 * stack at the Top-of-Stack of the
2873 * new stack.
2874 *
2875 * 2 - switches to the new stack.
2876 *
2877 * 3 - pops the Top-of-Stack to restore
2878 * the original stack.
2879 *
2880 * Note: we set base to SP_INDIRECT
2881 * here and preserve offset. Therefore
2882 * when the unwinder reaches ToS it
2883 * will dereference SP and then add the
2884 * offset to find the next frame, IOW:
2885 * (%rsp) + offset.
2886 */
2887 cfa->base = CFI_SP_INDIRECT;
2888
2889 } else {
2890 cfa->base = CFI_UNDEFINED;
2891 cfa->offset = 0;
2892 }
2893 }
2894
2895 else if (op->dest.reg == CFI_SP &&
2896 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2897 cfi->vals[op->src.reg].offset == cfa->offset) {
2898
2899 /*
2900 * The same stack swizzle case 2) as above. But
2901 * because we can't change cfa->base, case 3)
2902 * will become a regular POP. Pretend we're a
2903 * PUSH so things don't go unbalanced.
2904 */
2905 cfi->stack_size += 8;
2906 }
2907
2908
2909 break;
2910
2911 case OP_SRC_ADD:
2912 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2913
2914 /* add imm, %rsp */
2915 cfi->stack_size -= op->src.offset;
2916 if (cfa->base == CFI_SP)
2917 cfa->offset -= op->src.offset;
2918 break;
2919 }
2920
2921 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2922
2923 /* lea disp(%rbp), %rsp */
2924 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2925 break;
2926 }
2927
2928 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2929
2930 /* drap: lea disp(%rsp), %drap */
2931 cfi->drap_reg = op->dest.reg;
2932
2933 /*
2934 * lea disp(%rsp), %reg
2935 *
2936 * This is needed for the rare case where GCC
2937 * does something dumb like:
2938 *
2939 * lea 0x8(%rsp), %rcx
2940 * ...
2941 * mov %rcx, %rsp
2942 */
2943 cfi->vals[op->dest.reg].base = CFI_CFA;
2944 cfi->vals[op->dest.reg].offset = \
2945 -cfi->stack_size + op->src.offset;
2946
2947 break;
2948 }
2949
2950 if (cfi->drap && op->dest.reg == CFI_SP &&
2951 op->src.reg == cfi->drap_reg) {
2952
2953 /* drap: lea disp(%drap), %rsp */
2954 cfa->base = CFI_SP;
2955 cfa->offset = cfi->stack_size = -op->src.offset;
2956 cfi->drap_reg = CFI_UNDEFINED;
2957 cfi->drap = false;
2958 break;
2959 }
2960
2961 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
2962 WARN_INSN(insn, "unsupported stack register modification");
2963 return -1;
2964 }
2965
2966 break;
2967
2968 case OP_SRC_AND:
2969 if (op->dest.reg != CFI_SP ||
2970 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
2971 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
2972 WARN_INSN(insn, "unsupported stack pointer realignment");
2973 return -1;
2974 }
2975
2976 if (cfi->drap_reg != CFI_UNDEFINED) {
2977 /* drap: and imm, %rsp */
2978 cfa->base = cfi->drap_reg;
2979 cfa->offset = cfi->stack_size = 0;
2980 cfi->drap = true;
2981 }
2982
2983 /*
2984 * Older versions of GCC (4.8ish) realign the stack
2985 * without DRAP, with a frame pointer.
2986 */
2987
2988 break;
2989
2990 case OP_SRC_POP:
2991 case OP_SRC_POPF:
2992 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
2993
2994 /* pop %rsp; # restore from a stack swizzle */
2995 cfa->base = CFI_SP;
2996 break;
2997 }
2998
2999 if (!cfi->drap && op->dest.reg == cfa->base) {
3000
3001 /* pop %rbp */
3002 cfa->base = CFI_SP;
3003 }
3004
3005 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3006 op->dest.reg == cfi->drap_reg &&
3007 cfi->drap_offset == -cfi->stack_size) {
3008
3009 /* drap: pop %drap */
3010 cfa->base = cfi->drap_reg;
3011 cfa->offset = 0;
3012 cfi->drap_offset = -1;
3013
3014 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3015
3016 /* pop %reg */
3017 restore_reg(cfi, op->dest.reg);
3018 }
3019
3020 cfi->stack_size -= 8;
3021 if (cfa->base == CFI_SP)
3022 cfa->offset -= 8;
3023
3024 break;
3025
3026 case OP_SRC_REG_INDIRECT:
3027 if (!cfi->drap && op->dest.reg == cfa->base &&
3028 op->dest.reg == CFI_BP) {
3029
3030 /* mov disp(%rsp), %rbp */
3031 cfa->base = CFI_SP;
3032 cfa->offset = cfi->stack_size;
3033 }
3034
3035 if (cfi->drap && op->src.reg == CFI_BP &&
3036 op->src.offset == cfi->drap_offset) {
3037
3038 /* drap: mov disp(%rbp), %drap */
3039 cfa->base = cfi->drap_reg;
3040 cfa->offset = 0;
3041 cfi->drap_offset = -1;
3042 }
3043
3044 if (cfi->drap && op->src.reg == CFI_BP &&
3045 op->src.offset == regs[op->dest.reg].offset) {
3046
3047 /* drap: mov disp(%rbp), %reg */
3048 restore_reg(cfi, op->dest.reg);
3049
3050 } else if (op->src.reg == cfa->base &&
3051 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3052
3053 /* mov disp(%rbp), %reg */
3054 /* mov disp(%rsp), %reg */
3055 restore_reg(cfi, op->dest.reg);
3056
3057 } else if (op->src.reg == CFI_SP &&
3058 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3059
3060 /* mov disp(%rsp), %reg */
3061 restore_reg(cfi, op->dest.reg);
3062 }
3063
3064 break;
3065
3066 default:
3067 WARN_INSN(insn, "unknown stack-related instruction");
3068 return -1;
3069 }
3070
3071 break;
3072
3073 case OP_DEST_PUSH:
3074 case OP_DEST_PUSHF:
3075 cfi->stack_size += 8;
3076 if (cfa->base == CFI_SP)
3077 cfa->offset += 8;
3078
3079 if (op->src.type != OP_SRC_REG)
3080 break;
3081
3082 if (cfi->drap) {
3083 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3084
3085 /* drap: push %drap */
3086 cfa->base = CFI_BP_INDIRECT;
3087 cfa->offset = -cfi->stack_size;
3088
3089 /* save drap so we know when to restore it */
3090 cfi->drap_offset = -cfi->stack_size;
3091
3092 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3093
3094 /* drap: push %rbp */
3095 cfi->stack_size = 0;
3096
3097 } else {
3098
3099 /* drap: push %reg */
3100 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3101 }
3102
3103 } else {
3104
3105 /* push %reg */
3106 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3107 }
3108
3109 /* detect when asm code uses rbp as a scratch register */
3110 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3111 cfa->base != CFI_BP)
3112 cfi->bp_scratch = true;
3113 break;
3114
3115 case OP_DEST_REG_INDIRECT:
3116
3117 if (cfi->drap) {
3118 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3119
3120 /* drap: mov %drap, disp(%rbp) */
3121 cfa->base = CFI_BP_INDIRECT;
3122 cfa->offset = op->dest.offset;
3123
3124 /* save drap offset so we know when to restore it */
3125 cfi->drap_offset = op->dest.offset;
3126 } else {
3127
3128 /* drap: mov reg, disp(%rbp) */
3129 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3130 }
3131
3132 } else if (op->dest.reg == cfa->base) {
3133
3134 /* mov reg, disp(%rbp) */
3135 /* mov reg, disp(%rsp) */
3136 save_reg(cfi, op->src.reg, CFI_CFA,
3137 op->dest.offset - cfi->cfa.offset);
3138
3139 } else if (op->dest.reg == CFI_SP) {
3140
3141 /* mov reg, disp(%rsp) */
3142 save_reg(cfi, op->src.reg, CFI_CFA,
3143 op->dest.offset - cfi->stack_size);
3144
3145 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3146
3147 /* mov %rsp, (%reg); # setup a stack swizzle. */
3148 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3149 cfi->vals[op->dest.reg].offset = cfa->offset;
3150 }
3151
3152 break;
3153
3154 case OP_DEST_MEM:
3155 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3156 WARN_INSN(insn, "unknown stack-related memory operation");
3157 return -1;
3158 }
3159
3160 /* pop mem */
3161 cfi->stack_size -= 8;
3162 if (cfa->base == CFI_SP)
3163 cfa->offset -= 8;
3164
3165 break;
3166
3167 default:
3168 WARN_INSN(insn, "unknown stack-related instruction");
3169 return -1;
3170 }
3171
3172 return 0;
3173 }
3174
3175 /*
3176 * The stack layouts of alternatives instructions can sometimes diverge when
3177 * they have stack modifications. That's fine as long as the potential stack
3178 * layouts don't conflict at any given potential instruction boundary.
3179 *
3180 * Flatten the CFIs of the different alternative code streams (both original
3181 * and replacement) into a single shared CFI array which can be used to detect
3182 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3183 */
3184 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3185 {
3186 struct cfi_state **alt_cfi;
3187 int group_off;
3188
3189 if (!insn->alt_group)
3190 return 0;
3191
3192 if (!insn->cfi) {
3193 WARN("CFI missing");
3194 return -1;
3195 }
3196
3197 alt_cfi = insn->alt_group->cfi;
3198 group_off = insn->offset - insn->alt_group->first_insn->offset;
3199
3200 if (!alt_cfi[group_off]) {
3201 alt_cfi[group_off] = insn->cfi;
3202 } else {
3203 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3204 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3205 struct instruction *orig = orig_group->first_insn;
3206 char *where = offstr(insn->sec, insn->offset);
3207 WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
3208 free(where);
3209 return -1;
3210 }
3211 }
3212
3213 return 0;
3214 }
3215
3216 static int handle_insn_ops(struct instruction *insn,
3217 struct instruction *next_insn,
3218 struct insn_state *state)
3219 {
3220 struct stack_op *op;
3221
3222 for (op = insn->stack_ops; op; op = op->next) {
3223
3224 if (update_cfi_state(insn, next_insn, &state->cfi, op))
3225 return 1;
3226
3227 if (!insn->alt_group)
3228 continue;
3229
3230 if (op->dest.type == OP_DEST_PUSHF) {
3231 if (!state->uaccess_stack) {
3232 state->uaccess_stack = 1;
3233 } else if (state->uaccess_stack >> 31) {
3234 WARN_INSN(insn, "PUSHF stack exhausted");
3235 return 1;
3236 }
3237 state->uaccess_stack <<= 1;
3238 state->uaccess_stack |= state->uaccess;
3239 }
3240
3241 if (op->src.type == OP_SRC_POPF) {
3242 if (state->uaccess_stack) {
3243 state->uaccess = state->uaccess_stack & 1;
3244 state->uaccess_stack >>= 1;
3245 if (state->uaccess_stack == 1)
3246 state->uaccess_stack = 0;
3247 }
3248 }
3249 }
3250
3251 return 0;
3252 }
3253
3254 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3255 {
3256 struct cfi_state *cfi1 = insn->cfi;
3257 int i;
3258
3259 if (!cfi1) {
3260 WARN("CFI missing");
3261 return false;
3262 }
3263
3264 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3265
3266 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3267 cfi1->cfa.base, cfi1->cfa.offset,
3268 cfi2->cfa.base, cfi2->cfa.offset);
3269
3270 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3271 for (i = 0; i < CFI_NUM_REGS; i++) {
3272 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3273 sizeof(struct cfi_reg)))
3274 continue;
3275
3276 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3277 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3278 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3279 break;
3280 }
3281
3282 } else if (cfi1->type != cfi2->type) {
3283
3284 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3285 cfi1->type, cfi2->type);
3286
3287 } else if (cfi1->drap != cfi2->drap ||
3288 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3289 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3290
3291 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3292 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3293 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3294
3295 } else
3296 return true;
3297
3298 return false;
3299 }
3300
3301 static inline bool func_uaccess_safe(struct symbol *func)
3302 {
3303 if (func)
3304 return func->uaccess_safe;
3305
3306 return false;
3307 }
3308
3309 static inline const char *call_dest_name(struct instruction *insn)
3310 {
3311 static char pvname[19];
3312 struct reloc *reloc;
3313 int idx;
3314
3315 if (insn_call_dest(insn))
3316 return insn_call_dest(insn)->name;
3317
3318 reloc = insn_reloc(NULL, insn);
3319 if (reloc && !strcmp(reloc->sym->name, "pv_ops")) {
3320 idx = (reloc->addend / sizeof(void *));
3321 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3322 return pvname;
3323 }
3324
3325 return "{dynamic}";
3326 }
3327
3328 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3329 {
3330 struct symbol *target;
3331 struct reloc *reloc;
3332 int idx;
3333
3334 reloc = insn_reloc(file, insn);
3335 if (!reloc || strcmp(reloc->sym->name, "pv_ops"))
3336 return false;
3337
3338 idx = (arch_dest_reloc_offset(reloc->addend) / sizeof(void *));
3339
3340 if (file->pv_ops[idx].clean)
3341 return true;
3342
3343 file->pv_ops[idx].clean = true;
3344
3345 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3346 if (!target->sec->noinstr) {
3347 WARN("pv_ops[%d]: %s", idx, target->name);
3348 file->pv_ops[idx].clean = false;
3349 }
3350 }
3351
3352 return file->pv_ops[idx].clean;
3353 }
3354
3355 static inline bool noinstr_call_dest(struct objtool_file *file,
3356 struct instruction *insn,
3357 struct symbol *func)
3358 {
3359 /*
3360 * We can't deal with indirect function calls at present;
3361 * assume they're instrumented.
3362 */
3363 if (!func) {
3364 if (file->pv_ops)
3365 return pv_call_dest(file, insn);
3366
3367 return false;
3368 }
3369
3370 /*
3371 * If the symbol is from a noinstr section; we good.
3372 */
3373 if (func->sec->noinstr)
3374 return true;
3375
3376 /*
3377 * If the symbol is a static_call trampoline, we can't tell.
3378 */
3379 if (func->static_call_tramp)
3380 return true;
3381
3382 /*
3383 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3384 * something 'BAD' happened. At the risk of taking the machine down,
3385 * let them proceed to get the message out.
3386 */
3387 if (!strncmp(func->name, "__ubsan_handle_", 15))
3388 return true;
3389
3390 return false;
3391 }
3392
3393 static int validate_call(struct objtool_file *file,
3394 struct instruction *insn,
3395 struct insn_state *state)
3396 {
3397 if (state->noinstr && state->instr <= 0 &&
3398 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3399 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3400 return 1;
3401 }
3402
3403 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3404 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3405 return 1;
3406 }
3407
3408 if (state->df) {
3409 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3410 return 1;
3411 }
3412
3413 return 0;
3414 }
3415
3416 static int validate_sibling_call(struct objtool_file *file,
3417 struct instruction *insn,
3418 struct insn_state *state)
3419 {
3420 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3421 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3422 return 1;
3423 }
3424
3425 return validate_call(file, insn, state);
3426 }
3427
3428 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3429 {
3430 if (state->noinstr && state->instr > 0) {
3431 WARN_INSN(insn, "return with instrumentation enabled");
3432 return 1;
3433 }
3434
3435 if (state->uaccess && !func_uaccess_safe(func)) {
3436 WARN_INSN(insn, "return with UACCESS enabled");
3437 return 1;
3438 }
3439
3440 if (!state->uaccess && func_uaccess_safe(func)) {
3441 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3442 return 1;
3443 }
3444
3445 if (state->df) {
3446 WARN_INSN(insn, "return with DF set");
3447 return 1;
3448 }
3449
3450 if (func && has_modified_stack_frame(insn, state)) {
3451 WARN_INSN(insn, "return with modified stack frame");
3452 return 1;
3453 }
3454
3455 if (state->cfi.bp_scratch) {
3456 WARN_INSN(insn, "BP used as a scratch register");
3457 return 1;
3458 }
3459
3460 return 0;
3461 }
3462
3463 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3464 struct instruction *insn)
3465 {
3466 struct alt_group *alt_group = insn->alt_group;
3467
3468 /*
3469 * Simulate the fact that alternatives are patched in-place. When the
3470 * end of a replacement alt_group is reached, redirect objtool flow to
3471 * the end of the original alt_group.
3472 *
3473 * insn->alts->insn -> alt_group->first_insn
3474 * ...
3475 * alt_group->last_insn
3476 * [alt_group->nop] -> next(orig_group->last_insn)
3477 */
3478 if (alt_group) {
3479 if (alt_group->nop) {
3480 /* ->nop implies ->orig_group */
3481 if (insn == alt_group->last_insn)
3482 return alt_group->nop;
3483 if (insn == alt_group->nop)
3484 goto next_orig;
3485 }
3486 if (insn == alt_group->last_insn && alt_group->orig_group)
3487 goto next_orig;
3488 }
3489
3490 return next_insn_same_sec(file, insn);
3491
3492 next_orig:
3493 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3494 }
3495
3496 /*
3497 * Follow the branch starting at the given instruction, and recursively follow
3498 * any other branches (jumps). Meanwhile, track the frame pointer state at
3499 * each instruction and validate all the rules described in
3500 * tools/objtool/Documentation/objtool.txt.
3501 */
3502 static int validate_branch(struct objtool_file *file, struct symbol *func,
3503 struct instruction *insn, struct insn_state state)
3504 {
3505 struct alternative *alt;
3506 struct instruction *next_insn, *prev_insn = NULL;
3507 struct section *sec;
3508 u8 visited;
3509 int ret;
3510
3511 sec = insn->sec;
3512
3513 while (1) {
3514 next_insn = next_insn_to_validate(file, insn);
3515
3516 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3517 /* Ignore KCFI type preambles, which always fall through */
3518 if (!strncmp(func->name, "__cfi_", 6) ||
3519 !strncmp(func->name, "__pfx_", 6))
3520 return 0;
3521
3522 WARN("%s() falls through to next function %s()",
3523 func->name, insn_func(insn)->name);
3524 return 1;
3525 }
3526
3527 if (func && insn->ignore) {
3528 WARN_INSN(insn, "BUG: why am I validating an ignored function?");
3529 return 1;
3530 }
3531
3532 visited = VISITED_BRANCH << state.uaccess;
3533 if (insn->visited & VISITED_BRANCH_MASK) {
3534 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3535 return 1;
3536
3537 if (insn->visited & visited)
3538 return 0;
3539 } else {
3540 nr_insns_visited++;
3541 }
3542
3543 if (state.noinstr)
3544 state.instr += insn->instr;
3545
3546 if (insn->hint) {
3547 if (insn->restore) {
3548 struct instruction *save_insn, *i;
3549
3550 i = insn;
3551 save_insn = NULL;
3552
3553 sym_for_each_insn_continue_reverse(file, func, i) {
3554 if (i->save) {
3555 save_insn = i;
3556 break;
3557 }
3558 }
3559
3560 if (!save_insn) {
3561 WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3562 return 1;
3563 }
3564
3565 if (!save_insn->visited) {
3566 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3567 return 1;
3568 }
3569
3570 insn->cfi = save_insn->cfi;
3571 nr_cfi_reused++;
3572 }
3573
3574 state.cfi = *insn->cfi;
3575 } else {
3576 /* XXX track if we actually changed state.cfi */
3577
3578 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3579 insn->cfi = prev_insn->cfi;
3580 nr_cfi_reused++;
3581 } else {
3582 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3583 }
3584 }
3585
3586 insn->visited |= visited;
3587
3588 if (propagate_alt_cfi(file, insn))
3589 return 1;
3590
3591 if (!insn->ignore_alts && insn->alts) {
3592 bool skip_orig = false;
3593
3594 for (alt = insn->alts; alt; alt = alt->next) {
3595 if (alt->skip_orig)
3596 skip_orig = true;
3597
3598 ret = validate_branch(file, func, alt->insn, state);
3599 if (ret) {
3600 BT_INSN(insn, "(alt)");
3601 return ret;
3602 }
3603 }
3604
3605 if (skip_orig)
3606 return 0;
3607 }
3608
3609 if (handle_insn_ops(insn, next_insn, &state))
3610 return 1;
3611
3612 switch (insn->type) {
3613
3614 case INSN_RETURN:
3615 return validate_return(func, insn, &state);
3616
3617 case INSN_CALL:
3618 case INSN_CALL_DYNAMIC:
3619 ret = validate_call(file, insn, &state);
3620 if (ret)
3621 return ret;
3622
3623 if (opts.stackval && func && !is_fentry_call(insn) &&
3624 !has_valid_stack_frame(&state)) {
3625 WARN_INSN(insn, "call without frame pointer save/setup");
3626 return 1;
3627 }
3628
3629 if (insn->dead_end)
3630 return 0;
3631
3632 break;
3633
3634 case INSN_JUMP_CONDITIONAL:
3635 case INSN_JUMP_UNCONDITIONAL:
3636 if (is_sibling_call(insn)) {
3637 ret = validate_sibling_call(file, insn, &state);
3638 if (ret)
3639 return ret;
3640
3641 } else if (insn->jump_dest) {
3642 ret = validate_branch(file, func,
3643 insn->jump_dest, state);
3644 if (ret) {
3645 BT_INSN(insn, "(branch)");
3646 return ret;
3647 }
3648 }
3649
3650 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3651 return 0;
3652
3653 break;
3654
3655 case INSN_JUMP_DYNAMIC:
3656 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3657 if (is_sibling_call(insn)) {
3658 ret = validate_sibling_call(file, insn, &state);
3659 if (ret)
3660 return ret;
3661 }
3662
3663 if (insn->type == INSN_JUMP_DYNAMIC)
3664 return 0;
3665
3666 break;
3667
3668 case INSN_CONTEXT_SWITCH:
3669 if (func && (!next_insn || !next_insn->hint)) {
3670 WARN_INSN(insn, "unsupported instruction in callable function");
3671 return 1;
3672 }
3673 return 0;
3674
3675 case INSN_STAC:
3676 if (state.uaccess) {
3677 WARN_INSN(insn, "recursive UACCESS enable");
3678 return 1;
3679 }
3680
3681 state.uaccess = true;
3682 break;
3683
3684 case INSN_CLAC:
3685 if (!state.uaccess && func) {
3686 WARN_INSN(insn, "redundant UACCESS disable");
3687 return 1;
3688 }
3689
3690 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3691 WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3692 return 1;
3693 }
3694
3695 state.uaccess = false;
3696 break;
3697
3698 case INSN_STD:
3699 if (state.df) {
3700 WARN_INSN(insn, "recursive STD");
3701 return 1;
3702 }
3703
3704 state.df = true;
3705 break;
3706
3707 case INSN_CLD:
3708 if (!state.df && func) {
3709 WARN_INSN(insn, "redundant CLD");
3710 return 1;
3711 }
3712
3713 state.df = false;
3714 break;
3715
3716 default:
3717 break;
3718 }
3719
3720 if (insn->dead_end)
3721 return 0;
3722
3723 if (!next_insn) {
3724 if (state.cfi.cfa.base == CFI_UNDEFINED)
3725 return 0;
3726 WARN("%s: unexpected end of section", sec->name);
3727 return 1;
3728 }
3729
3730 prev_insn = insn;
3731 insn = next_insn;
3732 }
3733
3734 return 0;
3735 }
3736
3737 static int validate_unwind_hint(struct objtool_file *file,
3738 struct instruction *insn,
3739 struct insn_state *state)
3740 {
3741 if (insn->hint && !insn->visited && !insn->ignore) {
3742 int ret = validate_branch(file, insn_func(insn), insn, *state);
3743 if (ret)
3744 BT_INSN(insn, "<=== (hint)");
3745 return ret;
3746 }
3747
3748 return 0;
3749 }
3750
3751 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3752 {
3753 struct instruction *insn;
3754 struct insn_state state;
3755 int warnings = 0;
3756
3757 if (!file->hints)
3758 return 0;
3759
3760 init_insn_state(file, &state, sec);
3761
3762 if (sec) {
3763 sec_for_each_insn(file, sec, insn)
3764 warnings += validate_unwind_hint(file, insn, &state);
3765 } else {
3766 for_each_insn(file, insn)
3767 warnings += validate_unwind_hint(file, insn, &state);
3768 }
3769
3770 return warnings;
3771 }
3772
3773 /*
3774 * Validate rethunk entry constraint: must untrain RET before the first RET.
3775 *
3776 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3777 * before an actual RET instruction.
3778 */
3779 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3780 {
3781 struct instruction *next, *dest;
3782 int ret;
3783
3784 for (;;) {
3785 next = next_insn_to_validate(file, insn);
3786
3787 if (insn->visited & VISITED_UNRET)
3788 return 0;
3789
3790 insn->visited |= VISITED_UNRET;
3791
3792 if (!insn->ignore_alts && insn->alts) {
3793 struct alternative *alt;
3794 bool skip_orig = false;
3795
3796 for (alt = insn->alts; alt; alt = alt->next) {
3797 if (alt->skip_orig)
3798 skip_orig = true;
3799
3800 ret = validate_unret(file, alt->insn);
3801 if (ret) {
3802 BT_INSN(insn, "(alt)");
3803 return ret;
3804 }
3805 }
3806
3807 if (skip_orig)
3808 return 0;
3809 }
3810
3811 switch (insn->type) {
3812
3813 case INSN_CALL_DYNAMIC:
3814 case INSN_JUMP_DYNAMIC:
3815 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3816 WARN_INSN(insn, "early indirect call");
3817 return 1;
3818
3819 case INSN_JUMP_UNCONDITIONAL:
3820 case INSN_JUMP_CONDITIONAL:
3821 if (!is_sibling_call(insn)) {
3822 if (!insn->jump_dest) {
3823 WARN_INSN(insn, "unresolved jump target after linking?!?");
3824 return -1;
3825 }
3826 ret = validate_unret(file, insn->jump_dest);
3827 if (ret) {
3828 BT_INSN(insn, "(branch%s)",
3829 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3830 return ret;
3831 }
3832
3833 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3834 return 0;
3835
3836 break;
3837 }
3838
3839 /* fallthrough */
3840 case INSN_CALL:
3841 dest = find_insn(file, insn_call_dest(insn)->sec,
3842 insn_call_dest(insn)->offset);
3843 if (!dest) {
3844 WARN("Unresolved function after linking!?: %s",
3845 insn_call_dest(insn)->name);
3846 return -1;
3847 }
3848
3849 ret = validate_unret(file, dest);
3850 if (ret) {
3851 BT_INSN(insn, "(call)");
3852 return ret;
3853 }
3854 /*
3855 * If a call returns without error, it must have seen UNTRAIN_RET.
3856 * Therefore any non-error return is a success.
3857 */
3858 return 0;
3859
3860 case INSN_RETURN:
3861 WARN_INSN(insn, "RET before UNTRAIN");
3862 return 1;
3863
3864 case INSN_NOP:
3865 if (insn->retpoline_safe)
3866 return 0;
3867 break;
3868
3869 default:
3870 break;
3871 }
3872
3873 if (!next) {
3874 WARN_INSN(insn, "teh end!");
3875 return -1;
3876 }
3877 insn = next;
3878 }
3879
3880 return 0;
3881 }
3882
3883 /*
3884 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3885 * VALIDATE_UNRET_END before RET.
3886 */
3887 static int validate_unrets(struct objtool_file *file)
3888 {
3889 struct instruction *insn;
3890 int ret, warnings = 0;
3891
3892 for_each_insn(file, insn) {
3893 if (!insn->unret)
3894 continue;
3895
3896 ret = validate_unret(file, insn);
3897 if (ret < 0) {
3898 WARN_INSN(insn, "Failed UNRET validation");
3899 return ret;
3900 }
3901 warnings += ret;
3902 }
3903
3904 return warnings;
3905 }
3906
3907 static int validate_retpoline(struct objtool_file *file)
3908 {
3909 struct instruction *insn;
3910 int warnings = 0;
3911
3912 for_each_insn(file, insn) {
3913 if (insn->type != INSN_JUMP_DYNAMIC &&
3914 insn->type != INSN_CALL_DYNAMIC &&
3915 insn->type != INSN_RETURN)
3916 continue;
3917
3918 if (insn->retpoline_safe)
3919 continue;
3920
3921 if (insn->sec->init)
3922 continue;
3923
3924 if (insn->type == INSN_RETURN) {
3925 if (opts.rethunk) {
3926 WARN_INSN(insn, "'naked' return found in RETHUNK build");
3927 } else
3928 continue;
3929 } else {
3930 WARN_INSN(insn, "indirect %s found in RETPOLINE build",
3931 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3932 }
3933
3934 warnings++;
3935 }
3936
3937 return warnings;
3938 }
3939
3940 static bool is_kasan_insn(struct instruction *insn)
3941 {
3942 return (insn->type == INSN_CALL &&
3943 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
3944 }
3945
3946 static bool is_ubsan_insn(struct instruction *insn)
3947 {
3948 return (insn->type == INSN_CALL &&
3949 !strcmp(insn_call_dest(insn)->name,
3950 "__ubsan_handle_builtin_unreachable"));
3951 }
3952
3953 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
3954 {
3955 int i;
3956 struct instruction *prev_insn;
3957
3958 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
3959 return true;
3960
3961 /*
3962 * Ignore alternative replacement instructions. This can happen
3963 * when a whitelisted function uses one of the ALTERNATIVE macros.
3964 */
3965 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
3966 !strcmp(insn->sec->name, ".altinstr_aux"))
3967 return true;
3968
3969 /*
3970 * Whole archive runs might encounter dead code from weak symbols.
3971 * This is where the linker will have dropped the weak symbol in
3972 * favour of a regular symbol, but leaves the code in place.
3973 *
3974 * In this case we'll find a piece of code (whole function) that is not
3975 * covered by a !section symbol. Ignore them.
3976 */
3977 if (opts.link && !insn_func(insn)) {
3978 int size = find_symbol_hole_containing(insn->sec, insn->offset);
3979 unsigned long end = insn->offset + size;
3980
3981 if (!size) /* not a hole */
3982 return false;
3983
3984 if (size < 0) /* hole until the end */
3985 return true;
3986
3987 sec_for_each_insn_continue(file, insn) {
3988 /*
3989 * If we reach a visited instruction at or before the
3990 * end of the hole, ignore the unreachable.
3991 */
3992 if (insn->visited)
3993 return true;
3994
3995 if (insn->offset >= end)
3996 break;
3997
3998 /*
3999 * If this hole jumps to a .cold function, mark it ignore too.
4000 */
4001 if (insn->jump_dest && insn_func(insn->jump_dest) &&
4002 strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4003 struct instruction *dest = insn->jump_dest;
4004 func_for_each_insn(file, insn_func(dest), dest)
4005 dest->ignore = true;
4006 }
4007 }
4008
4009 return false;
4010 }
4011
4012 if (!insn_func(insn))
4013 return false;
4014
4015 if (insn_func(insn)->static_call_tramp)
4016 return true;
4017
4018 /*
4019 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4020 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4021 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4022 * (or occasionally a JMP to UD2).
4023 *
4024 * It may also insert a UD2 after calling a __noreturn function.
4025 */
4026 prev_insn = prev_insn_same_sec(file, insn);
4027 if (prev_insn->dead_end &&
4028 (insn->type == INSN_BUG ||
4029 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4030 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4031 return true;
4032
4033 /*
4034 * Check if this (or a subsequent) instruction is related to
4035 * CONFIG_UBSAN or CONFIG_KASAN.
4036 *
4037 * End the search at 5 instructions to avoid going into the weeds.
4038 */
4039 for (i = 0; i < 5; i++) {
4040
4041 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4042 return true;
4043
4044 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4045 if (insn->jump_dest &&
4046 insn_func(insn->jump_dest) == insn_func(insn)) {
4047 insn = insn->jump_dest;
4048 continue;
4049 }
4050
4051 break;
4052 }
4053
4054 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4055 break;
4056
4057 insn = next_insn_same_sec(file, insn);
4058 }
4059
4060 return false;
4061 }
4062
4063 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
4064 {
4065 struct instruction *insn, *prev;
4066 struct cfi_state *cfi;
4067
4068 insn = find_insn(file, func->sec, func->offset);
4069 if (!insn)
4070 return -1;
4071
4072 for (prev = prev_insn_same_sec(file, insn);
4073 prev;
4074 prev = prev_insn_same_sec(file, prev)) {
4075 u64 offset;
4076
4077 if (prev->type != INSN_NOP)
4078 return -1;
4079
4080 offset = func->offset - prev->offset;
4081
4082 if (offset > opts.prefix)
4083 return -1;
4084
4085 if (offset < opts.prefix)
4086 continue;
4087
4088 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4089 break;
4090 }
4091
4092 if (!prev)
4093 return -1;
4094
4095 if (!insn->cfi) {
4096 /*
4097 * This can happen if stack validation isn't enabled or the
4098 * function is annotated with STACK_FRAME_NON_STANDARD.
4099 */
4100 return 0;
4101 }
4102
4103 /* Propagate insn->cfi to the prefix code */
4104 cfi = cfi_hash_find_or_add(insn->cfi);
4105 for (; prev != insn; prev = next_insn_same_sec(file, prev))
4106 prev->cfi = cfi;
4107
4108 return 0;
4109 }
4110
4111 static int add_prefix_symbols(struct objtool_file *file)
4112 {
4113 struct section *sec;
4114 struct symbol *func;
4115
4116 for_each_sec(file, sec) {
4117 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4118 continue;
4119
4120 sec_for_each_sym(sec, func) {
4121 if (func->type != STT_FUNC)
4122 continue;
4123
4124 add_prefix_symbol(file, func);
4125 }
4126 }
4127
4128 return 0;
4129 }
4130
4131 static int validate_symbol(struct objtool_file *file, struct section *sec,
4132 struct symbol *sym, struct insn_state *state)
4133 {
4134 struct instruction *insn;
4135 int ret;
4136
4137 if (!sym->len) {
4138 WARN("%s() is missing an ELF size annotation", sym->name);
4139 return 1;
4140 }
4141
4142 if (sym->pfunc != sym || sym->alias != sym)
4143 return 0;
4144
4145 insn = find_insn(file, sec, sym->offset);
4146 if (!insn || insn->ignore || insn->visited)
4147 return 0;
4148
4149 state->uaccess = sym->uaccess_safe;
4150
4151 ret = validate_branch(file, insn_func(insn), insn, *state);
4152 if (ret)
4153 BT_INSN(insn, "<=== (sym)");
4154 return ret;
4155 }
4156
4157 static int validate_section(struct objtool_file *file, struct section *sec)
4158 {
4159 struct insn_state state;
4160 struct symbol *func;
4161 int warnings = 0;
4162
4163 sec_for_each_sym(sec, func) {
4164 if (func->type != STT_FUNC)
4165 continue;
4166
4167 init_insn_state(file, &state, sec);
4168 set_func_state(&state.cfi);
4169
4170 warnings += validate_symbol(file, sec, func, &state);
4171 }
4172
4173 return warnings;
4174 }
4175
4176 static int validate_noinstr_sections(struct objtool_file *file)
4177 {
4178 struct section *sec;
4179 int warnings = 0;
4180
4181 sec = find_section_by_name(file->elf, ".noinstr.text");
4182 if (sec) {
4183 warnings += validate_section(file, sec);
4184 warnings += validate_unwind_hints(file, sec);
4185 }
4186
4187 sec = find_section_by_name(file->elf, ".entry.text");
4188 if (sec) {
4189 warnings += validate_section(file, sec);
4190 warnings += validate_unwind_hints(file, sec);
4191 }
4192
4193 sec = find_section_by_name(file->elf, ".cpuidle.text");
4194 if (sec) {
4195 warnings += validate_section(file, sec);
4196 warnings += validate_unwind_hints(file, sec);
4197 }
4198
4199 return warnings;
4200 }
4201
4202 static int validate_functions(struct objtool_file *file)
4203 {
4204 struct section *sec;
4205 int warnings = 0;
4206
4207 for_each_sec(file, sec) {
4208 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4209 continue;
4210
4211 warnings += validate_section(file, sec);
4212 }
4213
4214 return warnings;
4215 }
4216
4217 static void mark_endbr_used(struct instruction *insn)
4218 {
4219 if (!list_empty(&insn->call_node))
4220 list_del_init(&insn->call_node);
4221 }
4222
4223 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4224 {
4225 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4226 struct instruction *first;
4227
4228 if (!sym)
4229 return false;
4230
4231 first = find_insn(file, sym->sec, sym->offset);
4232 if (!first)
4233 return false;
4234
4235 if (first->type != INSN_ENDBR && !first->noendbr)
4236 return false;
4237
4238 return insn->offset == sym->offset + sym->len;
4239 }
4240
4241 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4242 {
4243 struct instruction *dest;
4244 struct reloc *reloc;
4245 unsigned long off;
4246 int warnings = 0;
4247
4248 /*
4249 * Looking for function pointer load relocations. Ignore
4250 * direct/indirect branches:
4251 */
4252 switch (insn->type) {
4253 case INSN_CALL:
4254 case INSN_CALL_DYNAMIC:
4255 case INSN_JUMP_CONDITIONAL:
4256 case INSN_JUMP_UNCONDITIONAL:
4257 case INSN_JUMP_DYNAMIC:
4258 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4259 case INSN_RETURN:
4260 case INSN_NOP:
4261 return 0;
4262 default:
4263 break;
4264 }
4265
4266 for (reloc = insn_reloc(file, insn);
4267 reloc;
4268 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4269 reloc_offset(reloc) + 1,
4270 (insn->offset + insn->len) - (reloc_offset(reloc) + 1))) {
4271
4272 /*
4273 * static_call_update() references the trampoline, which
4274 * doesn't have (or need) ENDBR. Skip warning in that case.
4275 */
4276 if (reloc->sym->static_call_tramp)
4277 continue;
4278
4279 off = reloc->sym->offset;
4280 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4281 off += arch_dest_reloc_offset(reloc->addend);
4282 else
4283 off += reloc->addend;
4284
4285 dest = find_insn(file, reloc->sym->sec, off);
4286 if (!dest)
4287 continue;
4288
4289 if (dest->type == INSN_ENDBR) {
4290 mark_endbr_used(dest);
4291 continue;
4292 }
4293
4294 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4295 /*
4296 * Anything from->to self is either _THIS_IP_ or
4297 * IRET-to-self.
4298 *
4299 * There is no sane way to annotate _THIS_IP_ since the
4300 * compiler treats the relocation as a constant and is
4301 * happy to fold in offsets, skewing any annotation we
4302 * do, leading to vast amounts of false-positives.
4303 *
4304 * There's also compiler generated _THIS_IP_ through
4305 * KCOV and such which we have no hope of annotating.
4306 *
4307 * As such, blanket accept self-references without
4308 * issue.
4309 */
4310 continue;
4311 }
4312
4313 /*
4314 * Accept anything ANNOTATE_NOENDBR.
4315 */
4316 if (dest->noendbr)
4317 continue;
4318
4319 /*
4320 * Accept if this is the instruction after a symbol
4321 * that is (no)endbr -- typical code-range usage.
4322 */
4323 if (noendbr_range(file, dest))
4324 continue;
4325
4326 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4327
4328 warnings++;
4329 }
4330
4331 return warnings;
4332 }
4333
4334 static int validate_ibt_data_reloc(struct objtool_file *file,
4335 struct reloc *reloc)
4336 {
4337 struct instruction *dest;
4338
4339 dest = find_insn(file, reloc->sym->sec,
4340 reloc->sym->offset + reloc->addend);
4341 if (!dest)
4342 return 0;
4343
4344 if (dest->type == INSN_ENDBR) {
4345 mark_endbr_used(dest);
4346 return 0;
4347 }
4348
4349 if (dest->noendbr)
4350 return 0;
4351
4352 WARN_FUNC("data relocation to !ENDBR: %s",
4353 reloc->sec->base, reloc_offset(reloc),
4354 offstr(dest->sec, dest->offset));
4355
4356 return 1;
4357 }
4358
4359 /*
4360 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4361 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4362 * NOPs) later, in create_ibt_endbr_seal_sections().
4363 */
4364 static int validate_ibt(struct objtool_file *file)
4365 {
4366 struct section *sec;
4367 struct reloc *reloc;
4368 struct instruction *insn;
4369 int warnings = 0;
4370
4371 for_each_insn(file, insn)
4372 warnings += validate_ibt_insn(file, insn);
4373
4374 for_each_sec(file, sec) {
4375
4376 /* Already done by validate_ibt_insn() */
4377 if (sec->sh.sh_flags & SHF_EXECINSTR)
4378 continue;
4379
4380 if (!sec->rsec)
4381 continue;
4382
4383 /*
4384 * These sections can reference text addresses, but not with
4385 * the intent to indirect branch to them.
4386 */
4387 if ((!strncmp(sec->name, ".discard", 8) &&
4388 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4389 !strncmp(sec->name, ".debug", 6) ||
4390 !strcmp(sec->name, ".altinstructions") ||
4391 !strcmp(sec->name, ".ibt_endbr_seal") ||
4392 !strcmp(sec->name, ".orc_unwind_ip") ||
4393 !strcmp(sec->name, ".parainstructions") ||
4394 !strcmp(sec->name, ".retpoline_sites") ||
4395 !strcmp(sec->name, ".smp_locks") ||
4396 !strcmp(sec->name, ".static_call_sites") ||
4397 !strcmp(sec->name, "_error_injection_whitelist") ||
4398 !strcmp(sec->name, "_kprobe_blacklist") ||
4399 !strcmp(sec->name, "__bug_table") ||
4400 !strcmp(sec->name, "__ex_table") ||
4401 !strcmp(sec->name, "__jump_table") ||
4402 !strcmp(sec->name, "__mcount_loc") ||
4403 !strcmp(sec->name, ".kcfi_traps") ||
4404 strstr(sec->name, "__patchable_function_entries"))
4405 continue;
4406
4407 for_each_reloc(sec->rsec, reloc)
4408 warnings += validate_ibt_data_reloc(file, reloc);
4409 }
4410
4411 return warnings;
4412 }
4413
4414 static int validate_sls(struct objtool_file *file)
4415 {
4416 struct instruction *insn, *next_insn;
4417 int warnings = 0;
4418
4419 for_each_insn(file, insn) {
4420 next_insn = next_insn_same_sec(file, insn);
4421
4422 if (insn->retpoline_safe)
4423 continue;
4424
4425 switch (insn->type) {
4426 case INSN_RETURN:
4427 if (!next_insn || next_insn->type != INSN_TRAP) {
4428 WARN_INSN(insn, "missing int3 after ret");
4429 warnings++;
4430 }
4431
4432 break;
4433 case INSN_JUMP_DYNAMIC:
4434 if (!next_insn || next_insn->type != INSN_TRAP) {
4435 WARN_INSN(insn, "missing int3 after indirect jump");
4436 warnings++;
4437 }
4438 break;
4439 default:
4440 break;
4441 }
4442 }
4443
4444 return warnings;
4445 }
4446
4447 static bool ignore_noreturn_call(struct instruction *insn)
4448 {
4449 struct symbol *call_dest = insn_call_dest(insn);
4450
4451 /*
4452 * FIXME: hack, we need a real noreturn solution
4453 *
4454 * Problem is, exc_double_fault() may or may not return, depending on
4455 * whether CONFIG_X86_ESPFIX64 is set. But objtool has no visibility
4456 * to the kernel config.
4457 *
4458 * Other potential ways to fix it:
4459 *
4460 * - have compiler communicate __noreturn functions somehow
4461 * - remove CONFIG_X86_ESPFIX64
4462 * - read the .config file
4463 * - add a cmdline option
4464 * - create a generic objtool annotation format (vs a bunch of custom
4465 * formats) and annotate it
4466 */
4467 if (!strcmp(call_dest->name, "exc_double_fault")) {
4468 /* prevent further unreachable warnings for the caller */
4469 insn->sym->warned = 1;
4470 return true;
4471 }
4472
4473 return false;
4474 }
4475
4476 static int validate_reachable_instructions(struct objtool_file *file)
4477 {
4478 struct instruction *insn, *prev_insn;
4479 struct symbol *call_dest;
4480 int warnings = 0;
4481
4482 if (file->ignore_unreachables)
4483 return 0;
4484
4485 for_each_insn(file, insn) {
4486 if (insn->visited || ignore_unreachable_insn(file, insn))
4487 continue;
4488
4489 prev_insn = prev_insn_same_sec(file, insn);
4490 if (prev_insn && prev_insn->dead_end) {
4491 call_dest = insn_call_dest(prev_insn);
4492 if (call_dest && !ignore_noreturn_call(prev_insn)) {
4493 WARN_INSN(insn, "%s() is missing a __noreturn annotation",
4494 call_dest->name);
4495 warnings++;
4496 continue;
4497 }
4498 }
4499
4500 WARN_INSN(insn, "unreachable instruction");
4501 warnings++;
4502 }
4503
4504 return warnings;
4505 }
4506
4507 /* 'funcs' is a space-separated list of function names */
4508 static int disas_funcs(const char *funcs)
4509 {
4510 const char *objdump_str, *cross_compile;
4511 int size, ret;
4512 char *cmd;
4513
4514 cross_compile = getenv("CROSS_COMPILE");
4515
4516 objdump_str = "%sobjdump -wdr %s | gawk -M -v _funcs='%s' '"
4517 "BEGIN { split(_funcs, funcs); }"
4518 "/^$/ { func_match = 0; }"
4519 "/<.*>:/ { "
4520 "f = gensub(/.*<(.*)>:/, \"\\\\1\", 1);"
4521 "for (i in funcs) {"
4522 "if (funcs[i] == f) {"
4523 "func_match = 1;"
4524 "base = strtonum(\"0x\" $1);"
4525 "break;"
4526 "}"
4527 "}"
4528 "}"
4529 "{"
4530 "if (func_match) {"
4531 "addr = strtonum(\"0x\" $1);"
4532 "printf(\"%%04x \", addr - base);"
4533 "print;"
4534 "}"
4535 "}' 1>&2";
4536
4537 /* fake snprintf() to calculate the size */
4538 size = snprintf(NULL, 0, objdump_str, cross_compile, objname, funcs) + 1;
4539 if (size <= 0) {
4540 WARN("objdump string size calculation failed");
4541 return -1;
4542 }
4543
4544 cmd = malloc(size);
4545
4546 /* real snprintf() */
4547 snprintf(cmd, size, objdump_str, cross_compile, objname, funcs);
4548 ret = system(cmd);
4549 if (ret) {
4550 WARN("disassembly failed: %d", ret);
4551 return -1;
4552 }
4553
4554 return 0;
4555 }
4556
4557 static int disas_warned_funcs(struct objtool_file *file)
4558 {
4559 struct symbol *sym;
4560 char *funcs = NULL, *tmp;
4561
4562 for_each_sym(file, sym) {
4563 if (sym->warned) {
4564 if (!funcs) {
4565 funcs = malloc(strlen(sym->name) + 1);
4566 strcpy(funcs, sym->name);
4567 } else {
4568 tmp = malloc(strlen(funcs) + strlen(sym->name) + 2);
4569 sprintf(tmp, "%s %s", funcs, sym->name);
4570 free(funcs);
4571 funcs = tmp;
4572 }
4573 }
4574 }
4575
4576 if (funcs)
4577 disas_funcs(funcs);
4578
4579 return 0;
4580 }
4581
4582 int check(struct objtool_file *file)
4583 {
4584 int ret, warnings = 0;
4585
4586 arch_initial_func_cfi_state(&initial_func_cfi);
4587 init_cfi_state(&init_cfi);
4588 init_cfi_state(&func_cfi);
4589 set_func_state(&func_cfi);
4590 init_cfi_state(&force_undefined_cfi);
4591 force_undefined_cfi.force_undefined = true;
4592
4593 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4594 goto out;
4595
4596 cfi_hash_add(&init_cfi);
4597 cfi_hash_add(&func_cfi);
4598
4599 ret = decode_sections(file);
4600 if (ret < 0)
4601 goto out;
4602
4603 warnings += ret;
4604
4605 if (!nr_insns)
4606 goto out;
4607
4608 if (opts.retpoline) {
4609 ret = validate_retpoline(file);
4610 if (ret < 0)
4611 return ret;
4612 warnings += ret;
4613 }
4614
4615 if (opts.stackval || opts.orc || opts.uaccess) {
4616 ret = validate_functions(file);
4617 if (ret < 0)
4618 goto out;
4619 warnings += ret;
4620
4621 ret = validate_unwind_hints(file, NULL);
4622 if (ret < 0)
4623 goto out;
4624 warnings += ret;
4625
4626 if (!warnings) {
4627 ret = validate_reachable_instructions(file);
4628 if (ret < 0)
4629 goto out;
4630 warnings += ret;
4631 }
4632
4633 } else if (opts.noinstr) {
4634 ret = validate_noinstr_sections(file);
4635 if (ret < 0)
4636 goto out;
4637 warnings += ret;
4638 }
4639
4640 if (opts.unret) {
4641 /*
4642 * Must be after validate_branch() and friends, it plays
4643 * further games with insn->visited.
4644 */
4645 ret = validate_unrets(file);
4646 if (ret < 0)
4647 return ret;
4648 warnings += ret;
4649 }
4650
4651 if (opts.ibt) {
4652 ret = validate_ibt(file);
4653 if (ret < 0)
4654 goto out;
4655 warnings += ret;
4656 }
4657
4658 if (opts.sls) {
4659 ret = validate_sls(file);
4660 if (ret < 0)
4661 goto out;
4662 warnings += ret;
4663 }
4664
4665 if (opts.static_call) {
4666 ret = create_static_call_sections(file);
4667 if (ret < 0)
4668 goto out;
4669 warnings += ret;
4670 }
4671
4672 if (opts.retpoline) {
4673 ret = create_retpoline_sites_sections(file);
4674 if (ret < 0)
4675 goto out;
4676 warnings += ret;
4677 }
4678
4679 if (opts.cfi) {
4680 ret = create_cfi_sections(file);
4681 if (ret < 0)
4682 goto out;
4683 warnings += ret;
4684 }
4685
4686 if (opts.rethunk) {
4687 ret = create_return_sites_sections(file);
4688 if (ret < 0)
4689 goto out;
4690 warnings += ret;
4691
4692 if (opts.hack_skylake) {
4693 ret = create_direct_call_sections(file);
4694 if (ret < 0)
4695 goto out;
4696 warnings += ret;
4697 }
4698 }
4699
4700 if (opts.mcount) {
4701 ret = create_mcount_loc_sections(file);
4702 if (ret < 0)
4703 goto out;
4704 warnings += ret;
4705 }
4706
4707 if (opts.prefix) {
4708 ret = add_prefix_symbols(file);
4709 if (ret < 0)
4710 return ret;
4711 warnings += ret;
4712 }
4713
4714 if (opts.ibt) {
4715 ret = create_ibt_endbr_seal_sections(file);
4716 if (ret < 0)
4717 goto out;
4718 warnings += ret;
4719 }
4720
4721 if (opts.orc && nr_insns) {
4722 ret = orc_create(file);
4723 if (ret < 0)
4724 goto out;
4725 warnings += ret;
4726 }
4727
4728 if (opts.verbose)
4729 disas_warned_funcs(file);
4730
4731 if (opts.stats) {
4732 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4733 printf("nr_cfi: %ld\n", nr_cfi);
4734 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4735 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4736 }
4737
4738 out:
4739 /*
4740 * For now, don't fail the kernel build on fatal warnings. These
4741 * errors are still fairly common due to the growing matrix of
4742 * supported toolchains and their recent pace of change.
4743 */
4744 return 0;
4745 }