]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/objtool/check.c
cpu: Mark nmi_panic_self_stop() __noreturn
[thirdparty/linux.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool_types.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26 struct alternative *next;
27 struct instruction *insn;
28 bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
50 struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 if (insn->idx == INSN_CHUNK_MAX)
54 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56 insn++;
57 if (!insn->len)
58 return NULL;
59
60 return insn;
61 }
62
63 static struct instruction *next_insn_same_func(struct objtool_file *file,
64 struct instruction *insn)
65 {
66 struct instruction *next = next_insn_same_sec(file, insn);
67 struct symbol *func = insn_func(insn);
68
69 if (!func)
70 return NULL;
71
72 if (next && insn_func(next) == func)
73 return next;
74
75 /* Check if we're already in the subfunction: */
76 if (func == func->cfunc)
77 return NULL;
78
79 /* Move to the subfunction: */
80 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 }
82
83 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84 struct instruction *insn)
85 {
86 if (insn->idx == 0) {
87 if (insn->prev_len)
88 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89 return NULL;
90 }
91
92 return insn - 1;
93 }
94
95 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96 struct instruction *insn)
97 {
98 struct instruction *prev = prev_insn_same_sec(file, insn);
99
100 if (prev && insn_func(prev) == insn_func(insn))
101 return prev;
102
103 return NULL;
104 }
105
106 #define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
111
112 #define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
114 insn; \
115 insn = next_insn_same_func(file, insn))
116
117 #define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
121
122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
126
127 #define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
129
130 #define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
133
134 static inline struct symbol *insn_call_dest(struct instruction *insn)
135 {
136 if (insn->type == INSN_JUMP_DYNAMIC ||
137 insn->type == INSN_CALL_DYNAMIC)
138 return NULL;
139
140 return insn->_call_dest;
141 }
142
143 static inline struct reloc *insn_jump_table(struct instruction *insn)
144 {
145 if (insn->type == INSN_JUMP_DYNAMIC ||
146 insn->type == INSN_CALL_DYNAMIC)
147 return insn->_jump_table;
148
149 return NULL;
150 }
151
152 static bool is_jump_table_jump(struct instruction *insn)
153 {
154 struct alt_group *alt_group = insn->alt_group;
155
156 if (insn_jump_table(insn))
157 return true;
158
159 /* Retpoline alternative for a jump table? */
160 return alt_group && alt_group->orig_group &&
161 insn_jump_table(alt_group->orig_group->first_insn);
162 }
163
164 static bool is_sibling_call(struct instruction *insn)
165 {
166 /*
167 * Assume only STT_FUNC calls have jump-tables.
168 */
169 if (insn_func(insn)) {
170 /* An indirect jump is either a sibling call or a jump to a table. */
171 if (insn->type == INSN_JUMP_DYNAMIC)
172 return !is_jump_table_jump(insn);
173 }
174
175 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
176 return (is_static_jump(insn) && insn_call_dest(insn));
177 }
178
179 /*
180 * This checks to see if the given function is a "noreturn" function.
181 *
182 * For global functions which are outside the scope of this object file, we
183 * have to keep a manual list of them.
184 *
185 * For local functions, we have to detect them manually by simply looking for
186 * the lack of a return instruction.
187 */
188 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
189 int recursion)
190 {
191 int i;
192 struct instruction *insn;
193 bool empty = true;
194
195 /*
196 * Unfortunately these have to be hard coded because the noreturn
197 * attribute isn't provided in ELF data. Keep 'em sorted.
198 */
199 static const char * const global_noreturns[] = {
200 "__invalid_creds",
201 "__module_put_and_kthread_exit",
202 "__reiserfs_panic",
203 "__stack_chk_fail",
204 "__ubsan_handle_builtin_unreachable",
205 "arch_call_rest_init",
206 "arch_cpu_idle_dead",
207 "cpu_bringup_and_idle",
208 "cpu_startup_entry",
209 "do_exit",
210 "do_group_exit",
211 "do_task_dead",
212 "ex_handler_msr_mce",
213 "fortify_panic",
214 "kthread_complete_and_exit",
215 "kthread_exit",
216 "kunit_try_catch_throw",
217 "lbug_with_loc",
218 "machine_real_restart",
219 "make_task_dead",
220 "nmi_panic_self_stop",
221 "panic",
222 "panic_smp_self_stop",
223 "rest_init",
224 "rewind_stack_and_make_dead",
225 "sev_es_terminate",
226 "snp_abort",
227 "start_kernel",
228 "stop_this_cpu",
229 "usercopy_abort",
230 "x86_64_start_kernel",
231 "x86_64_start_reservations",
232 "xen_cpu_bringup_again",
233 "xen_start_kernel",
234 };
235
236 if (!func)
237 return false;
238
239 if (func->bind == STB_WEAK)
240 return false;
241
242 if (func->bind == STB_GLOBAL)
243 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
244 if (!strcmp(func->name, global_noreturns[i]))
245 return true;
246
247 if (!func->len)
248 return false;
249
250 insn = find_insn(file, func->sec, func->offset);
251 if (!insn || !insn_func(insn))
252 return false;
253
254 func_for_each_insn(file, func, insn) {
255 empty = false;
256
257 if (insn->type == INSN_RETURN)
258 return false;
259 }
260
261 if (empty)
262 return false;
263
264 /*
265 * A function can have a sibling call instead of a return. In that
266 * case, the function's dead-end status depends on whether the target
267 * of the sibling call returns.
268 */
269 func_for_each_insn(file, func, insn) {
270 if (is_sibling_call(insn)) {
271 struct instruction *dest = insn->jump_dest;
272
273 if (!dest)
274 /* sibling call to another file */
275 return false;
276
277 /* local sibling call */
278 if (recursion == 5) {
279 /*
280 * Infinite recursion: two functions have
281 * sibling calls to each other. This is a very
282 * rare case. It means they aren't dead ends.
283 */
284 return false;
285 }
286
287 return __dead_end_function(file, insn_func(dest), recursion+1);
288 }
289 }
290
291 return true;
292 }
293
294 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
295 {
296 return __dead_end_function(file, func, 0);
297 }
298
299 static void init_cfi_state(struct cfi_state *cfi)
300 {
301 int i;
302
303 for (i = 0; i < CFI_NUM_REGS; i++) {
304 cfi->regs[i].base = CFI_UNDEFINED;
305 cfi->vals[i].base = CFI_UNDEFINED;
306 }
307 cfi->cfa.base = CFI_UNDEFINED;
308 cfi->drap_reg = CFI_UNDEFINED;
309 cfi->drap_offset = -1;
310 }
311
312 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
313 struct section *sec)
314 {
315 memset(state, 0, sizeof(*state));
316 init_cfi_state(&state->cfi);
317
318 /*
319 * We need the full vmlinux for noinstr validation, otherwise we can
320 * not correctly determine insn_call_dest(insn)->sec (external symbols
321 * do not have a section).
322 */
323 if (opts.link && opts.noinstr && sec)
324 state->noinstr = sec->noinstr;
325 }
326
327 static struct cfi_state *cfi_alloc(void)
328 {
329 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
330 if (!cfi) {
331 WARN("calloc failed");
332 exit(1);
333 }
334 nr_cfi++;
335 return cfi;
336 }
337
338 static int cfi_bits;
339 static struct hlist_head *cfi_hash;
340
341 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
342 {
343 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
344 (void *)cfi2 + sizeof(cfi2->hash),
345 sizeof(struct cfi_state) - sizeof(struct hlist_node));
346 }
347
348 static inline u32 cfi_key(struct cfi_state *cfi)
349 {
350 return jhash((void *)cfi + sizeof(cfi->hash),
351 sizeof(*cfi) - sizeof(cfi->hash), 0);
352 }
353
354 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
355 {
356 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
357 struct cfi_state *obj;
358
359 hlist_for_each_entry(obj, head, hash) {
360 if (!cficmp(cfi, obj)) {
361 nr_cfi_cache++;
362 return obj;
363 }
364 }
365
366 obj = cfi_alloc();
367 *obj = *cfi;
368 hlist_add_head(&obj->hash, head);
369
370 return obj;
371 }
372
373 static void cfi_hash_add(struct cfi_state *cfi)
374 {
375 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
376
377 hlist_add_head(&cfi->hash, head);
378 }
379
380 static void *cfi_hash_alloc(unsigned long size)
381 {
382 cfi_bits = max(10, ilog2(size));
383 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
384 PROT_READ|PROT_WRITE,
385 MAP_PRIVATE|MAP_ANON, -1, 0);
386 if (cfi_hash == (void *)-1L) {
387 WARN("mmap fail cfi_hash");
388 cfi_hash = NULL;
389 } else if (opts.stats) {
390 printf("cfi_bits: %d\n", cfi_bits);
391 }
392
393 return cfi_hash;
394 }
395
396 static unsigned long nr_insns;
397 static unsigned long nr_insns_visited;
398
399 /*
400 * Call the arch-specific instruction decoder for all the instructions and add
401 * them to the global instruction list.
402 */
403 static int decode_instructions(struct objtool_file *file)
404 {
405 struct section *sec;
406 struct symbol *func;
407 unsigned long offset;
408 struct instruction *insn;
409 int ret;
410
411 for_each_sec(file, sec) {
412 struct instruction *insns = NULL;
413 u8 prev_len = 0;
414 u8 idx = 0;
415
416 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
417 continue;
418
419 if (strcmp(sec->name, ".altinstr_replacement") &&
420 strcmp(sec->name, ".altinstr_aux") &&
421 strncmp(sec->name, ".discard.", 9))
422 sec->text = true;
423
424 if (!strcmp(sec->name, ".noinstr.text") ||
425 !strcmp(sec->name, ".entry.text") ||
426 !strcmp(sec->name, ".cpuidle.text") ||
427 !strncmp(sec->name, ".text.__x86.", 12))
428 sec->noinstr = true;
429
430 /*
431 * .init.text code is ran before userspace and thus doesn't
432 * strictly need retpolines, except for modules which are
433 * loaded late, they very much do need retpoline in their
434 * .init.text
435 */
436 if (!strcmp(sec->name, ".init.text") && !opts.module)
437 sec->init = true;
438
439 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
440 if (!insns || idx == INSN_CHUNK_MAX) {
441 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
442 if (!insns) {
443 WARN("malloc failed");
444 return -1;
445 }
446 idx = 0;
447 } else {
448 idx++;
449 }
450 insn = &insns[idx];
451 insn->idx = idx;
452
453 INIT_LIST_HEAD(&insn->call_node);
454 insn->sec = sec;
455 insn->offset = offset;
456 insn->prev_len = prev_len;
457
458 ret = arch_decode_instruction(file, sec, offset,
459 sec->sh.sh_size - offset,
460 insn);
461 if (ret)
462 return ret;
463
464 prev_len = insn->len;
465
466 /*
467 * By default, "ud2" is a dead end unless otherwise
468 * annotated, because GCC 7 inserts it for certain
469 * divide-by-zero cases.
470 */
471 if (insn->type == INSN_BUG)
472 insn->dead_end = true;
473
474 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
475 nr_insns++;
476 }
477
478 // printf("%s: last chunk used: %d\n", sec->name, (int)idx);
479
480 sec_for_each_sym(sec, func) {
481 if (func->type != STT_NOTYPE && func->type != STT_FUNC)
482 continue;
483
484 if (func->offset == sec->sh.sh_size) {
485 /* Heuristic: likely an "end" symbol */
486 if (func->type == STT_NOTYPE)
487 continue;
488 WARN("%s(): STT_FUNC at end of section",
489 func->name);
490 return -1;
491 }
492
493 if (func->return_thunk || func->alias != func)
494 continue;
495
496 if (!find_insn(file, sec, func->offset)) {
497 WARN("%s(): can't find starting instruction",
498 func->name);
499 return -1;
500 }
501
502 sym_for_each_insn(file, func, insn) {
503 insn->sym = func;
504 if (func->type == STT_FUNC &&
505 insn->type == INSN_ENDBR &&
506 list_empty(&insn->call_node)) {
507 if (insn->offset == func->offset) {
508 list_add_tail(&insn->call_node, &file->endbr_list);
509 file->nr_endbr++;
510 } else {
511 file->nr_endbr_int++;
512 }
513 }
514 }
515 }
516 }
517
518 if (opts.stats)
519 printf("nr_insns: %lu\n", nr_insns);
520
521 return 0;
522 }
523
524 /*
525 * Read the pv_ops[] .data table to find the static initialized values.
526 */
527 static int add_pv_ops(struct objtool_file *file, const char *symname)
528 {
529 struct symbol *sym, *func;
530 unsigned long off, end;
531 struct reloc *rel;
532 int idx;
533
534 sym = find_symbol_by_name(file->elf, symname);
535 if (!sym)
536 return 0;
537
538 off = sym->offset;
539 end = off + sym->len;
540 for (;;) {
541 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
542 if (!rel)
543 break;
544
545 func = rel->sym;
546 if (func->type == STT_SECTION)
547 func = find_symbol_by_offset(rel->sym->sec, rel->addend);
548
549 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
550
551 objtool_pv_add(file, idx, func);
552
553 off = rel->offset + 1;
554 if (off > end)
555 break;
556 }
557
558 return 0;
559 }
560
561 /*
562 * Allocate and initialize file->pv_ops[].
563 */
564 static int init_pv_ops(struct objtool_file *file)
565 {
566 static const char *pv_ops_tables[] = {
567 "pv_ops",
568 "xen_cpu_ops",
569 "xen_irq_ops",
570 "xen_mmu_ops",
571 NULL,
572 };
573 const char *pv_ops;
574 struct symbol *sym;
575 int idx, nr;
576
577 if (!opts.noinstr)
578 return 0;
579
580 file->pv_ops = NULL;
581
582 sym = find_symbol_by_name(file->elf, "pv_ops");
583 if (!sym)
584 return 0;
585
586 nr = sym->len / sizeof(unsigned long);
587 file->pv_ops = calloc(sizeof(struct pv_state), nr);
588 if (!file->pv_ops)
589 return -1;
590
591 for (idx = 0; idx < nr; idx++)
592 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
593
594 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
595 add_pv_ops(file, pv_ops);
596
597 return 0;
598 }
599
600 static struct instruction *find_last_insn(struct objtool_file *file,
601 struct section *sec)
602 {
603 struct instruction *insn = NULL;
604 unsigned int offset;
605 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
606
607 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
608 insn = find_insn(file, sec, offset);
609
610 return insn;
611 }
612
613 /*
614 * Mark "ud2" instructions and manually annotated dead ends.
615 */
616 static int add_dead_ends(struct objtool_file *file)
617 {
618 struct section *sec;
619 struct reloc *reloc;
620 struct instruction *insn;
621
622 /*
623 * Check for manually annotated dead ends.
624 */
625 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
626 if (!sec)
627 goto reachable;
628
629 list_for_each_entry(reloc, &sec->reloc_list, list) {
630 if (reloc->sym->type != STT_SECTION) {
631 WARN("unexpected relocation symbol type in %s", sec->name);
632 return -1;
633 }
634 insn = find_insn(file, reloc->sym->sec, reloc->addend);
635 if (insn)
636 insn = prev_insn_same_sec(file, insn);
637 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
638 insn = find_last_insn(file, reloc->sym->sec);
639 if (!insn) {
640 WARN("can't find unreachable insn at %s+0x%" PRIx64,
641 reloc->sym->sec->name, reloc->addend);
642 return -1;
643 }
644 } else {
645 WARN("can't find unreachable insn at %s+0x%" PRIx64,
646 reloc->sym->sec->name, reloc->addend);
647 return -1;
648 }
649
650 insn->dead_end = true;
651 }
652
653 reachable:
654 /*
655 * These manually annotated reachable checks are needed for GCC 4.4,
656 * where the Linux unreachable() macro isn't supported. In that case
657 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
658 * not a dead end.
659 */
660 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
661 if (!sec)
662 return 0;
663
664 list_for_each_entry(reloc, &sec->reloc_list, list) {
665 if (reloc->sym->type != STT_SECTION) {
666 WARN("unexpected relocation symbol type in %s", sec->name);
667 return -1;
668 }
669 insn = find_insn(file, reloc->sym->sec, reloc->addend);
670 if (insn)
671 insn = prev_insn_same_sec(file, insn);
672 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
673 insn = find_last_insn(file, reloc->sym->sec);
674 if (!insn) {
675 WARN("can't find reachable insn at %s+0x%" PRIx64,
676 reloc->sym->sec->name, reloc->addend);
677 return -1;
678 }
679 } else {
680 WARN("can't find reachable insn at %s+0x%" PRIx64,
681 reloc->sym->sec->name, reloc->addend);
682 return -1;
683 }
684
685 insn->dead_end = false;
686 }
687
688 return 0;
689 }
690
691 static int create_static_call_sections(struct objtool_file *file)
692 {
693 struct section *sec;
694 struct static_call_site *site;
695 struct instruction *insn;
696 struct symbol *key_sym;
697 char *key_name, *tmp;
698 int idx;
699
700 sec = find_section_by_name(file->elf, ".static_call_sites");
701 if (sec) {
702 INIT_LIST_HEAD(&file->static_call_list);
703 WARN("file already has .static_call_sites section, skipping");
704 return 0;
705 }
706
707 if (list_empty(&file->static_call_list))
708 return 0;
709
710 idx = 0;
711 list_for_each_entry(insn, &file->static_call_list, call_node)
712 idx++;
713
714 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
715 sizeof(struct static_call_site), idx);
716 if (!sec)
717 return -1;
718
719 idx = 0;
720 list_for_each_entry(insn, &file->static_call_list, call_node) {
721
722 site = (struct static_call_site *)sec->data->d_buf + idx;
723 memset(site, 0, sizeof(struct static_call_site));
724
725 /* populate reloc for 'addr' */
726 if (elf_add_reloc_to_insn(file->elf, sec,
727 idx * sizeof(struct static_call_site),
728 R_X86_64_PC32,
729 insn->sec, insn->offset))
730 return -1;
731
732 /* find key symbol */
733 key_name = strdup(insn_call_dest(insn)->name);
734 if (!key_name) {
735 perror("strdup");
736 return -1;
737 }
738 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
739 STATIC_CALL_TRAMP_PREFIX_LEN)) {
740 WARN("static_call: trampoline name malformed: %s", key_name);
741 free(key_name);
742 return -1;
743 }
744 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
745 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
746
747 key_sym = find_symbol_by_name(file->elf, tmp);
748 if (!key_sym) {
749 if (!opts.module) {
750 WARN("static_call: can't find static_call_key symbol: %s", tmp);
751 free(key_name);
752 return -1;
753 }
754
755 /*
756 * For modules(), the key might not be exported, which
757 * means the module can make static calls but isn't
758 * allowed to change them.
759 *
760 * In that case we temporarily set the key to be the
761 * trampoline address. This is fixed up in
762 * static_call_add_module().
763 */
764 key_sym = insn_call_dest(insn);
765 }
766 free(key_name);
767
768 /* populate reloc for 'key' */
769 if (elf_add_reloc(file->elf, sec,
770 idx * sizeof(struct static_call_site) + 4,
771 R_X86_64_PC32, key_sym,
772 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
773 return -1;
774
775 idx++;
776 }
777
778 return 0;
779 }
780
781 static int create_retpoline_sites_sections(struct objtool_file *file)
782 {
783 struct instruction *insn;
784 struct section *sec;
785 int idx;
786
787 sec = find_section_by_name(file->elf, ".retpoline_sites");
788 if (sec) {
789 WARN("file already has .retpoline_sites, skipping");
790 return 0;
791 }
792
793 idx = 0;
794 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
795 idx++;
796
797 if (!idx)
798 return 0;
799
800 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
801 sizeof(int), idx);
802 if (!sec) {
803 WARN("elf_create_section: .retpoline_sites");
804 return -1;
805 }
806
807 idx = 0;
808 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
809
810 int *site = (int *)sec->data->d_buf + idx;
811 *site = 0;
812
813 if (elf_add_reloc_to_insn(file->elf, sec,
814 idx * sizeof(int),
815 R_X86_64_PC32,
816 insn->sec, insn->offset)) {
817 WARN("elf_add_reloc_to_insn: .retpoline_sites");
818 return -1;
819 }
820
821 idx++;
822 }
823
824 return 0;
825 }
826
827 static int create_return_sites_sections(struct objtool_file *file)
828 {
829 struct instruction *insn;
830 struct section *sec;
831 int idx;
832
833 sec = find_section_by_name(file->elf, ".return_sites");
834 if (sec) {
835 WARN("file already has .return_sites, skipping");
836 return 0;
837 }
838
839 idx = 0;
840 list_for_each_entry(insn, &file->return_thunk_list, call_node)
841 idx++;
842
843 if (!idx)
844 return 0;
845
846 sec = elf_create_section(file->elf, ".return_sites", 0,
847 sizeof(int), idx);
848 if (!sec) {
849 WARN("elf_create_section: .return_sites");
850 return -1;
851 }
852
853 idx = 0;
854 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
855
856 int *site = (int *)sec->data->d_buf + idx;
857 *site = 0;
858
859 if (elf_add_reloc_to_insn(file->elf, sec,
860 idx * sizeof(int),
861 R_X86_64_PC32,
862 insn->sec, insn->offset)) {
863 WARN("elf_add_reloc_to_insn: .return_sites");
864 return -1;
865 }
866
867 idx++;
868 }
869
870 return 0;
871 }
872
873 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
874 {
875 struct instruction *insn;
876 struct section *sec;
877 int idx;
878
879 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
880 if (sec) {
881 WARN("file already has .ibt_endbr_seal, skipping");
882 return 0;
883 }
884
885 idx = 0;
886 list_for_each_entry(insn, &file->endbr_list, call_node)
887 idx++;
888
889 if (opts.stats) {
890 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
891 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
892 printf("ibt: superfluous ENDBR: %d\n", idx);
893 }
894
895 if (!idx)
896 return 0;
897
898 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
899 sizeof(int), idx);
900 if (!sec) {
901 WARN("elf_create_section: .ibt_endbr_seal");
902 return -1;
903 }
904
905 idx = 0;
906 list_for_each_entry(insn, &file->endbr_list, call_node) {
907
908 int *site = (int *)sec->data->d_buf + idx;
909 struct symbol *sym = insn->sym;
910 *site = 0;
911
912 if (opts.module && sym && sym->type == STT_FUNC &&
913 insn->offset == sym->offset &&
914 (!strcmp(sym->name, "init_module") ||
915 !strcmp(sym->name, "cleanup_module")))
916 WARN("%s(): not an indirect call target", sym->name);
917
918 if (elf_add_reloc_to_insn(file->elf, sec,
919 idx * sizeof(int),
920 R_X86_64_PC32,
921 insn->sec, insn->offset)) {
922 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
923 return -1;
924 }
925
926 idx++;
927 }
928
929 return 0;
930 }
931
932 static int create_cfi_sections(struct objtool_file *file)
933 {
934 struct section *sec;
935 struct symbol *sym;
936 unsigned int *loc;
937 int idx;
938
939 sec = find_section_by_name(file->elf, ".cfi_sites");
940 if (sec) {
941 INIT_LIST_HEAD(&file->call_list);
942 WARN("file already has .cfi_sites section, skipping");
943 return 0;
944 }
945
946 idx = 0;
947 for_each_sym(file, sym) {
948 if (sym->type != STT_FUNC)
949 continue;
950
951 if (strncmp(sym->name, "__cfi_", 6))
952 continue;
953
954 idx++;
955 }
956
957 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
958 if (!sec)
959 return -1;
960
961 idx = 0;
962 for_each_sym(file, sym) {
963 if (sym->type != STT_FUNC)
964 continue;
965
966 if (strncmp(sym->name, "__cfi_", 6))
967 continue;
968
969 loc = (unsigned int *)sec->data->d_buf + idx;
970 memset(loc, 0, sizeof(unsigned int));
971
972 if (elf_add_reloc_to_insn(file->elf, sec,
973 idx * sizeof(unsigned int),
974 R_X86_64_PC32,
975 sym->sec, sym->offset))
976 return -1;
977
978 idx++;
979 }
980
981 return 0;
982 }
983
984 static int create_mcount_loc_sections(struct objtool_file *file)
985 {
986 int addrsize = elf_class_addrsize(file->elf);
987 struct instruction *insn;
988 struct section *sec;
989 int idx;
990
991 sec = find_section_by_name(file->elf, "__mcount_loc");
992 if (sec) {
993 INIT_LIST_HEAD(&file->mcount_loc_list);
994 WARN("file already has __mcount_loc section, skipping");
995 return 0;
996 }
997
998 if (list_empty(&file->mcount_loc_list))
999 return 0;
1000
1001 idx = 0;
1002 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
1003 idx++;
1004
1005 sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
1006 if (!sec)
1007 return -1;
1008
1009 sec->sh.sh_addralign = addrsize;
1010
1011 idx = 0;
1012 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
1013 void *loc;
1014
1015 loc = sec->data->d_buf + idx;
1016 memset(loc, 0, addrsize);
1017
1018 if (elf_add_reloc_to_insn(file->elf, sec, idx,
1019 addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
1020 insn->sec, insn->offset))
1021 return -1;
1022
1023 idx += addrsize;
1024 }
1025
1026 return 0;
1027 }
1028
1029 static int create_direct_call_sections(struct objtool_file *file)
1030 {
1031 struct instruction *insn;
1032 struct section *sec;
1033 unsigned int *loc;
1034 int idx;
1035
1036 sec = find_section_by_name(file->elf, ".call_sites");
1037 if (sec) {
1038 INIT_LIST_HEAD(&file->call_list);
1039 WARN("file already has .call_sites section, skipping");
1040 return 0;
1041 }
1042
1043 if (list_empty(&file->call_list))
1044 return 0;
1045
1046 idx = 0;
1047 list_for_each_entry(insn, &file->call_list, call_node)
1048 idx++;
1049
1050 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1051 if (!sec)
1052 return -1;
1053
1054 idx = 0;
1055 list_for_each_entry(insn, &file->call_list, call_node) {
1056
1057 loc = (unsigned int *)sec->data->d_buf + idx;
1058 memset(loc, 0, sizeof(unsigned int));
1059
1060 if (elf_add_reloc_to_insn(file->elf, sec,
1061 idx * sizeof(unsigned int),
1062 R_X86_64_PC32,
1063 insn->sec, insn->offset))
1064 return -1;
1065
1066 idx++;
1067 }
1068
1069 return 0;
1070 }
1071
1072 /*
1073 * Warnings shouldn't be reported for ignored functions.
1074 */
1075 static void add_ignores(struct objtool_file *file)
1076 {
1077 struct instruction *insn;
1078 struct section *sec;
1079 struct symbol *func;
1080 struct reloc *reloc;
1081
1082 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1083 if (!sec)
1084 return;
1085
1086 list_for_each_entry(reloc, &sec->reloc_list, list) {
1087 switch (reloc->sym->type) {
1088 case STT_FUNC:
1089 func = reloc->sym;
1090 break;
1091
1092 case STT_SECTION:
1093 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1094 if (!func)
1095 continue;
1096 break;
1097
1098 default:
1099 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1100 continue;
1101 }
1102
1103 func_for_each_insn(file, func, insn)
1104 insn->ignore = true;
1105 }
1106 }
1107
1108 /*
1109 * This is a whitelist of functions that is allowed to be called with AC set.
1110 * The list is meant to be minimal and only contains compiler instrumentation
1111 * ABI and a few functions used to implement *_{to,from}_user() functions.
1112 *
1113 * These functions must not directly change AC, but may PUSHF/POPF.
1114 */
1115 static const char *uaccess_safe_builtin[] = {
1116 /* KASAN */
1117 "kasan_report",
1118 "kasan_check_range",
1119 /* KASAN out-of-line */
1120 "__asan_loadN_noabort",
1121 "__asan_load1_noabort",
1122 "__asan_load2_noabort",
1123 "__asan_load4_noabort",
1124 "__asan_load8_noabort",
1125 "__asan_load16_noabort",
1126 "__asan_storeN_noabort",
1127 "__asan_store1_noabort",
1128 "__asan_store2_noabort",
1129 "__asan_store4_noabort",
1130 "__asan_store8_noabort",
1131 "__asan_store16_noabort",
1132 "__kasan_check_read",
1133 "__kasan_check_write",
1134 /* KASAN in-line */
1135 "__asan_report_load_n_noabort",
1136 "__asan_report_load1_noabort",
1137 "__asan_report_load2_noabort",
1138 "__asan_report_load4_noabort",
1139 "__asan_report_load8_noabort",
1140 "__asan_report_load16_noabort",
1141 "__asan_report_store_n_noabort",
1142 "__asan_report_store1_noabort",
1143 "__asan_report_store2_noabort",
1144 "__asan_report_store4_noabort",
1145 "__asan_report_store8_noabort",
1146 "__asan_report_store16_noabort",
1147 /* KCSAN */
1148 "__kcsan_check_access",
1149 "__kcsan_mb",
1150 "__kcsan_wmb",
1151 "__kcsan_rmb",
1152 "__kcsan_release",
1153 "kcsan_found_watchpoint",
1154 "kcsan_setup_watchpoint",
1155 "kcsan_check_scoped_accesses",
1156 "kcsan_disable_current",
1157 "kcsan_enable_current_nowarn",
1158 /* KCSAN/TSAN */
1159 "__tsan_func_entry",
1160 "__tsan_func_exit",
1161 "__tsan_read_range",
1162 "__tsan_write_range",
1163 "__tsan_read1",
1164 "__tsan_read2",
1165 "__tsan_read4",
1166 "__tsan_read8",
1167 "__tsan_read16",
1168 "__tsan_write1",
1169 "__tsan_write2",
1170 "__tsan_write4",
1171 "__tsan_write8",
1172 "__tsan_write16",
1173 "__tsan_read_write1",
1174 "__tsan_read_write2",
1175 "__tsan_read_write4",
1176 "__tsan_read_write8",
1177 "__tsan_read_write16",
1178 "__tsan_volatile_read1",
1179 "__tsan_volatile_read2",
1180 "__tsan_volatile_read4",
1181 "__tsan_volatile_read8",
1182 "__tsan_volatile_read16",
1183 "__tsan_volatile_write1",
1184 "__tsan_volatile_write2",
1185 "__tsan_volatile_write4",
1186 "__tsan_volatile_write8",
1187 "__tsan_volatile_write16",
1188 "__tsan_atomic8_load",
1189 "__tsan_atomic16_load",
1190 "__tsan_atomic32_load",
1191 "__tsan_atomic64_load",
1192 "__tsan_atomic8_store",
1193 "__tsan_atomic16_store",
1194 "__tsan_atomic32_store",
1195 "__tsan_atomic64_store",
1196 "__tsan_atomic8_exchange",
1197 "__tsan_atomic16_exchange",
1198 "__tsan_atomic32_exchange",
1199 "__tsan_atomic64_exchange",
1200 "__tsan_atomic8_fetch_add",
1201 "__tsan_atomic16_fetch_add",
1202 "__tsan_atomic32_fetch_add",
1203 "__tsan_atomic64_fetch_add",
1204 "__tsan_atomic8_fetch_sub",
1205 "__tsan_atomic16_fetch_sub",
1206 "__tsan_atomic32_fetch_sub",
1207 "__tsan_atomic64_fetch_sub",
1208 "__tsan_atomic8_fetch_and",
1209 "__tsan_atomic16_fetch_and",
1210 "__tsan_atomic32_fetch_and",
1211 "__tsan_atomic64_fetch_and",
1212 "__tsan_atomic8_fetch_or",
1213 "__tsan_atomic16_fetch_or",
1214 "__tsan_atomic32_fetch_or",
1215 "__tsan_atomic64_fetch_or",
1216 "__tsan_atomic8_fetch_xor",
1217 "__tsan_atomic16_fetch_xor",
1218 "__tsan_atomic32_fetch_xor",
1219 "__tsan_atomic64_fetch_xor",
1220 "__tsan_atomic8_fetch_nand",
1221 "__tsan_atomic16_fetch_nand",
1222 "__tsan_atomic32_fetch_nand",
1223 "__tsan_atomic64_fetch_nand",
1224 "__tsan_atomic8_compare_exchange_strong",
1225 "__tsan_atomic16_compare_exchange_strong",
1226 "__tsan_atomic32_compare_exchange_strong",
1227 "__tsan_atomic64_compare_exchange_strong",
1228 "__tsan_atomic8_compare_exchange_weak",
1229 "__tsan_atomic16_compare_exchange_weak",
1230 "__tsan_atomic32_compare_exchange_weak",
1231 "__tsan_atomic64_compare_exchange_weak",
1232 "__tsan_atomic8_compare_exchange_val",
1233 "__tsan_atomic16_compare_exchange_val",
1234 "__tsan_atomic32_compare_exchange_val",
1235 "__tsan_atomic64_compare_exchange_val",
1236 "__tsan_atomic_thread_fence",
1237 "__tsan_atomic_signal_fence",
1238 "__tsan_unaligned_read16",
1239 "__tsan_unaligned_write16",
1240 /* KCOV */
1241 "write_comp_data",
1242 "check_kcov_mode",
1243 "__sanitizer_cov_trace_pc",
1244 "__sanitizer_cov_trace_const_cmp1",
1245 "__sanitizer_cov_trace_const_cmp2",
1246 "__sanitizer_cov_trace_const_cmp4",
1247 "__sanitizer_cov_trace_const_cmp8",
1248 "__sanitizer_cov_trace_cmp1",
1249 "__sanitizer_cov_trace_cmp2",
1250 "__sanitizer_cov_trace_cmp4",
1251 "__sanitizer_cov_trace_cmp8",
1252 "__sanitizer_cov_trace_switch",
1253 /* KMSAN */
1254 "kmsan_copy_to_user",
1255 "kmsan_report",
1256 "kmsan_unpoison_entry_regs",
1257 "kmsan_unpoison_memory",
1258 "__msan_chain_origin",
1259 "__msan_get_context_state",
1260 "__msan_instrument_asm_store",
1261 "__msan_metadata_ptr_for_load_1",
1262 "__msan_metadata_ptr_for_load_2",
1263 "__msan_metadata_ptr_for_load_4",
1264 "__msan_metadata_ptr_for_load_8",
1265 "__msan_metadata_ptr_for_load_n",
1266 "__msan_metadata_ptr_for_store_1",
1267 "__msan_metadata_ptr_for_store_2",
1268 "__msan_metadata_ptr_for_store_4",
1269 "__msan_metadata_ptr_for_store_8",
1270 "__msan_metadata_ptr_for_store_n",
1271 "__msan_poison_alloca",
1272 "__msan_warning",
1273 /* UBSAN */
1274 "ubsan_type_mismatch_common",
1275 "__ubsan_handle_type_mismatch",
1276 "__ubsan_handle_type_mismatch_v1",
1277 "__ubsan_handle_shift_out_of_bounds",
1278 "__ubsan_handle_load_invalid_value",
1279 /* STACKLEAK */
1280 "stackleak_track_stack",
1281 /* misc */
1282 "csum_partial_copy_generic",
1283 "copy_mc_fragile",
1284 "copy_mc_fragile_handle_tail",
1285 "copy_mc_enhanced_fast_string",
1286 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1287 "clear_user_erms",
1288 "clear_user_rep_good",
1289 "clear_user_original",
1290 NULL
1291 };
1292
1293 static void add_uaccess_safe(struct objtool_file *file)
1294 {
1295 struct symbol *func;
1296 const char **name;
1297
1298 if (!opts.uaccess)
1299 return;
1300
1301 for (name = uaccess_safe_builtin; *name; name++) {
1302 func = find_symbol_by_name(file->elf, *name);
1303 if (!func)
1304 continue;
1305
1306 func->uaccess_safe = true;
1307 }
1308 }
1309
1310 /*
1311 * FIXME: For now, just ignore any alternatives which add retpolines. This is
1312 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1313 * But it at least allows objtool to understand the control flow *around* the
1314 * retpoline.
1315 */
1316 static int add_ignore_alternatives(struct objtool_file *file)
1317 {
1318 struct section *sec;
1319 struct reloc *reloc;
1320 struct instruction *insn;
1321
1322 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1323 if (!sec)
1324 return 0;
1325
1326 list_for_each_entry(reloc, &sec->reloc_list, list) {
1327 if (reloc->sym->type != STT_SECTION) {
1328 WARN("unexpected relocation symbol type in %s", sec->name);
1329 return -1;
1330 }
1331
1332 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1333 if (!insn) {
1334 WARN("bad .discard.ignore_alts entry");
1335 return -1;
1336 }
1337
1338 insn->ignore_alts = true;
1339 }
1340
1341 return 0;
1342 }
1343
1344 __weak bool arch_is_retpoline(struct symbol *sym)
1345 {
1346 return false;
1347 }
1348
1349 __weak bool arch_is_rethunk(struct symbol *sym)
1350 {
1351 return false;
1352 }
1353
1354 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1355 {
1356 struct reloc *reloc;
1357
1358 if (insn->no_reloc)
1359 return NULL;
1360
1361 if (!file)
1362 return NULL;
1363
1364 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1365 insn->offset, insn->len);
1366 if (!reloc) {
1367 insn->no_reloc = 1;
1368 return NULL;
1369 }
1370
1371 return reloc;
1372 }
1373
1374 static void remove_insn_ops(struct instruction *insn)
1375 {
1376 struct stack_op *op, *next;
1377
1378 for (op = insn->stack_ops; op; op = next) {
1379 next = op->next;
1380 free(op);
1381 }
1382 insn->stack_ops = NULL;
1383 }
1384
1385 static void annotate_call_site(struct objtool_file *file,
1386 struct instruction *insn, bool sibling)
1387 {
1388 struct reloc *reloc = insn_reloc(file, insn);
1389 struct symbol *sym = insn_call_dest(insn);
1390
1391 if (!sym)
1392 sym = reloc->sym;
1393
1394 /*
1395 * Alternative replacement code is just template code which is
1396 * sometimes copied to the original instruction. For now, don't
1397 * annotate it. (In the future we might consider annotating the
1398 * original instruction if/when it ever makes sense to do so.)
1399 */
1400 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1401 return;
1402
1403 if (sym->static_call_tramp) {
1404 list_add_tail(&insn->call_node, &file->static_call_list);
1405 return;
1406 }
1407
1408 if (sym->retpoline_thunk) {
1409 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1410 return;
1411 }
1412
1413 /*
1414 * Many compilers cannot disable KCOV or sanitizer calls with a function
1415 * attribute so they need a little help, NOP out any such calls from
1416 * noinstr text.
1417 */
1418 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1419 if (reloc) {
1420 reloc->type = R_NONE;
1421 elf_write_reloc(file->elf, reloc);
1422 }
1423
1424 elf_write_insn(file->elf, insn->sec,
1425 insn->offset, insn->len,
1426 sibling ? arch_ret_insn(insn->len)
1427 : arch_nop_insn(insn->len));
1428
1429 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1430
1431 if (sibling) {
1432 /*
1433 * We've replaced the tail-call JMP insn by two new
1434 * insn: RET; INT3, except we only have a single struct
1435 * insn here. Mark it retpoline_safe to avoid the SLS
1436 * warning, instead of adding another insn.
1437 */
1438 insn->retpoline_safe = true;
1439 }
1440
1441 return;
1442 }
1443
1444 if (opts.mcount && sym->fentry) {
1445 if (sibling)
1446 WARN_INSN(insn, "tail call to __fentry__ !?!?");
1447 if (opts.mnop) {
1448 if (reloc) {
1449 reloc->type = R_NONE;
1450 elf_write_reloc(file->elf, reloc);
1451 }
1452
1453 elf_write_insn(file->elf, insn->sec,
1454 insn->offset, insn->len,
1455 arch_nop_insn(insn->len));
1456
1457 insn->type = INSN_NOP;
1458 }
1459
1460 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1461 return;
1462 }
1463
1464 if (insn->type == INSN_CALL && !insn->sec->init)
1465 list_add_tail(&insn->call_node, &file->call_list);
1466
1467 if (!sibling && dead_end_function(file, sym))
1468 insn->dead_end = true;
1469 }
1470
1471 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1472 struct symbol *dest, bool sibling)
1473 {
1474 insn->_call_dest = dest;
1475 if (!dest)
1476 return;
1477
1478 /*
1479 * Whatever stack impact regular CALLs have, should be undone
1480 * by the RETURN of the called function.
1481 *
1482 * Annotated intra-function calls retain the stack_ops but
1483 * are converted to JUMP, see read_intra_function_calls().
1484 */
1485 remove_insn_ops(insn);
1486
1487 annotate_call_site(file, insn, sibling);
1488 }
1489
1490 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1491 {
1492 /*
1493 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1494 * so convert them accordingly.
1495 */
1496 switch (insn->type) {
1497 case INSN_CALL:
1498 insn->type = INSN_CALL_DYNAMIC;
1499 break;
1500 case INSN_JUMP_UNCONDITIONAL:
1501 insn->type = INSN_JUMP_DYNAMIC;
1502 break;
1503 case INSN_JUMP_CONDITIONAL:
1504 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1505 break;
1506 default:
1507 return;
1508 }
1509
1510 insn->retpoline_safe = true;
1511
1512 /*
1513 * Whatever stack impact regular CALLs have, should be undone
1514 * by the RETURN of the called function.
1515 *
1516 * Annotated intra-function calls retain the stack_ops but
1517 * are converted to JUMP, see read_intra_function_calls().
1518 */
1519 remove_insn_ops(insn);
1520
1521 annotate_call_site(file, insn, false);
1522 }
1523
1524 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1525 {
1526 /*
1527 * Return thunk tail calls are really just returns in disguise,
1528 * so convert them accordingly.
1529 */
1530 insn->type = INSN_RETURN;
1531 insn->retpoline_safe = true;
1532
1533 if (add)
1534 list_add_tail(&insn->call_node, &file->return_thunk_list);
1535 }
1536
1537 static bool is_first_func_insn(struct objtool_file *file,
1538 struct instruction *insn, struct symbol *sym)
1539 {
1540 if (insn->offset == sym->offset)
1541 return true;
1542
1543 /* Allow direct CALL/JMP past ENDBR */
1544 if (opts.ibt) {
1545 struct instruction *prev = prev_insn_same_sym(file, insn);
1546
1547 if (prev && prev->type == INSN_ENDBR &&
1548 insn->offset == sym->offset + prev->len)
1549 return true;
1550 }
1551
1552 return false;
1553 }
1554
1555 /*
1556 * A sibling call is a tail-call to another symbol -- to differentiate from a
1557 * recursive tail-call which is to the same symbol.
1558 */
1559 static bool jump_is_sibling_call(struct objtool_file *file,
1560 struct instruction *from, struct instruction *to)
1561 {
1562 struct symbol *fs = from->sym;
1563 struct symbol *ts = to->sym;
1564
1565 /* Not a sibling call if from/to a symbol hole */
1566 if (!fs || !ts)
1567 return false;
1568
1569 /* Not a sibling call if not targeting the start of a symbol. */
1570 if (!is_first_func_insn(file, to, ts))
1571 return false;
1572
1573 /* Disallow sibling calls into STT_NOTYPE */
1574 if (ts->type == STT_NOTYPE)
1575 return false;
1576
1577 /* Must not be self to be a sibling */
1578 return fs->pfunc != ts->pfunc;
1579 }
1580
1581 /*
1582 * Find the destination instructions for all jumps.
1583 */
1584 static int add_jump_destinations(struct objtool_file *file)
1585 {
1586 struct instruction *insn, *jump_dest;
1587 struct reloc *reloc;
1588 struct section *dest_sec;
1589 unsigned long dest_off;
1590
1591 for_each_insn(file, insn) {
1592 if (insn->jump_dest) {
1593 /*
1594 * handle_group_alt() may have previously set
1595 * 'jump_dest' for some alternatives.
1596 */
1597 continue;
1598 }
1599 if (!is_static_jump(insn))
1600 continue;
1601
1602 reloc = insn_reloc(file, insn);
1603 if (!reloc) {
1604 dest_sec = insn->sec;
1605 dest_off = arch_jump_destination(insn);
1606 } else if (reloc->sym->type == STT_SECTION) {
1607 dest_sec = reloc->sym->sec;
1608 dest_off = arch_dest_reloc_offset(reloc->addend);
1609 } else if (reloc->sym->retpoline_thunk) {
1610 add_retpoline_call(file, insn);
1611 continue;
1612 } else if (reloc->sym->return_thunk) {
1613 add_return_call(file, insn, true);
1614 continue;
1615 } else if (insn_func(insn)) {
1616 /*
1617 * External sibling call or internal sibling call with
1618 * STT_FUNC reloc.
1619 */
1620 add_call_dest(file, insn, reloc->sym, true);
1621 continue;
1622 } else if (reloc->sym->sec->idx) {
1623 dest_sec = reloc->sym->sec;
1624 dest_off = reloc->sym->sym.st_value +
1625 arch_dest_reloc_offset(reloc->addend);
1626 } else {
1627 /* non-func asm code jumping to another file */
1628 continue;
1629 }
1630
1631 jump_dest = find_insn(file, dest_sec, dest_off);
1632 if (!jump_dest) {
1633 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1634
1635 /*
1636 * This is a special case for zen_untrain_ret().
1637 * It jumps to __x86_return_thunk(), but objtool
1638 * can't find the thunk's starting RET
1639 * instruction, because the RET is also in the
1640 * middle of another instruction. Objtool only
1641 * knows about the outer instruction.
1642 */
1643 if (sym && sym->return_thunk) {
1644 add_return_call(file, insn, false);
1645 continue;
1646 }
1647
1648 WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
1649 dest_sec->name, dest_off);
1650 return -1;
1651 }
1652
1653 /*
1654 * Cross-function jump.
1655 */
1656 if (insn_func(insn) && insn_func(jump_dest) &&
1657 insn_func(insn) != insn_func(jump_dest)) {
1658
1659 /*
1660 * For GCC 8+, create parent/child links for any cold
1661 * subfunctions. This is _mostly_ redundant with a
1662 * similar initialization in read_symbols().
1663 *
1664 * If a function has aliases, we want the *first* such
1665 * function in the symbol table to be the subfunction's
1666 * parent. In that case we overwrite the
1667 * initialization done in read_symbols().
1668 *
1669 * However this code can't completely replace the
1670 * read_symbols() code because this doesn't detect the
1671 * case where the parent function's only reference to a
1672 * subfunction is through a jump table.
1673 */
1674 if (!strstr(insn_func(insn)->name, ".cold") &&
1675 strstr(insn_func(jump_dest)->name, ".cold")) {
1676 insn_func(insn)->cfunc = insn_func(jump_dest);
1677 insn_func(jump_dest)->pfunc = insn_func(insn);
1678 }
1679 }
1680
1681 if (jump_is_sibling_call(file, insn, jump_dest)) {
1682 /*
1683 * Internal sibling call without reloc or with
1684 * STT_SECTION reloc.
1685 */
1686 add_call_dest(file, insn, insn_func(jump_dest), true);
1687 continue;
1688 }
1689
1690 insn->jump_dest = jump_dest;
1691 }
1692
1693 return 0;
1694 }
1695
1696 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1697 {
1698 struct symbol *call_dest;
1699
1700 call_dest = find_func_by_offset(sec, offset);
1701 if (!call_dest)
1702 call_dest = find_symbol_by_offset(sec, offset);
1703
1704 return call_dest;
1705 }
1706
1707 /*
1708 * Find the destination instructions for all calls.
1709 */
1710 static int add_call_destinations(struct objtool_file *file)
1711 {
1712 struct instruction *insn;
1713 unsigned long dest_off;
1714 struct symbol *dest;
1715 struct reloc *reloc;
1716
1717 for_each_insn(file, insn) {
1718 if (insn->type != INSN_CALL)
1719 continue;
1720
1721 reloc = insn_reloc(file, insn);
1722 if (!reloc) {
1723 dest_off = arch_jump_destination(insn);
1724 dest = find_call_destination(insn->sec, dest_off);
1725
1726 add_call_dest(file, insn, dest, false);
1727
1728 if (insn->ignore)
1729 continue;
1730
1731 if (!insn_call_dest(insn)) {
1732 WARN_INSN(insn, "unannotated intra-function call");
1733 return -1;
1734 }
1735
1736 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
1737 WARN_INSN(insn, "unsupported call to non-function");
1738 return -1;
1739 }
1740
1741 } else if (reloc->sym->type == STT_SECTION) {
1742 dest_off = arch_dest_reloc_offset(reloc->addend);
1743 dest = find_call_destination(reloc->sym->sec, dest_off);
1744 if (!dest) {
1745 WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1746 reloc->sym->sec->name, dest_off);
1747 return -1;
1748 }
1749
1750 add_call_dest(file, insn, dest, false);
1751
1752 } else if (reloc->sym->retpoline_thunk) {
1753 add_retpoline_call(file, insn);
1754
1755 } else
1756 add_call_dest(file, insn, reloc->sym, false);
1757 }
1758
1759 return 0;
1760 }
1761
1762 /*
1763 * The .alternatives section requires some extra special care over and above
1764 * other special sections because alternatives are patched in place.
1765 */
1766 static int handle_group_alt(struct objtool_file *file,
1767 struct special_alt *special_alt,
1768 struct instruction *orig_insn,
1769 struct instruction **new_insn)
1770 {
1771 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1772 struct alt_group *orig_alt_group, *new_alt_group;
1773 unsigned long dest_off;
1774
1775 orig_alt_group = orig_insn->alt_group;
1776 if (!orig_alt_group) {
1777 struct instruction *last_orig_insn = NULL;
1778
1779 orig_alt_group = malloc(sizeof(*orig_alt_group));
1780 if (!orig_alt_group) {
1781 WARN("malloc failed");
1782 return -1;
1783 }
1784 orig_alt_group->cfi = calloc(special_alt->orig_len,
1785 sizeof(struct cfi_state *));
1786 if (!orig_alt_group->cfi) {
1787 WARN("calloc failed");
1788 return -1;
1789 }
1790
1791 insn = orig_insn;
1792 sec_for_each_insn_from(file, insn) {
1793 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1794 break;
1795
1796 insn->alt_group = orig_alt_group;
1797 last_orig_insn = insn;
1798 }
1799 orig_alt_group->orig_group = NULL;
1800 orig_alt_group->first_insn = orig_insn;
1801 orig_alt_group->last_insn = last_orig_insn;
1802 orig_alt_group->nop = NULL;
1803 } else {
1804 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1805 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1806 WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1807 orig_alt_group->last_insn->offset +
1808 orig_alt_group->last_insn->len -
1809 orig_alt_group->first_insn->offset,
1810 special_alt->orig_len);
1811 return -1;
1812 }
1813 }
1814
1815 new_alt_group = malloc(sizeof(*new_alt_group));
1816 if (!new_alt_group) {
1817 WARN("malloc failed");
1818 return -1;
1819 }
1820
1821 if (special_alt->new_len < special_alt->orig_len) {
1822 /*
1823 * Insert a fake nop at the end to make the replacement
1824 * alt_group the same size as the original. This is needed to
1825 * allow propagate_alt_cfi() to do its magic. When the last
1826 * instruction affects the stack, the instruction after it (the
1827 * nop) will propagate the new state to the shared CFI array.
1828 */
1829 nop = malloc(sizeof(*nop));
1830 if (!nop) {
1831 WARN("malloc failed");
1832 return -1;
1833 }
1834 memset(nop, 0, sizeof(*nop));
1835
1836 nop->sec = special_alt->new_sec;
1837 nop->offset = special_alt->new_off + special_alt->new_len;
1838 nop->len = special_alt->orig_len - special_alt->new_len;
1839 nop->type = INSN_NOP;
1840 nop->sym = orig_insn->sym;
1841 nop->alt_group = new_alt_group;
1842 nop->ignore = orig_insn->ignore_alts;
1843 }
1844
1845 if (!special_alt->new_len) {
1846 *new_insn = nop;
1847 goto end;
1848 }
1849
1850 insn = *new_insn;
1851 sec_for_each_insn_from(file, insn) {
1852 struct reloc *alt_reloc;
1853
1854 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1855 break;
1856
1857 last_new_insn = insn;
1858
1859 insn->ignore = orig_insn->ignore_alts;
1860 insn->sym = orig_insn->sym;
1861 insn->alt_group = new_alt_group;
1862
1863 /*
1864 * Since alternative replacement code is copy/pasted by the
1865 * kernel after applying relocations, generally such code can't
1866 * have relative-address relocation references to outside the
1867 * .altinstr_replacement section, unless the arch's
1868 * alternatives code can adjust the relative offsets
1869 * accordingly.
1870 */
1871 alt_reloc = insn_reloc(file, insn);
1872 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1873 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1874
1875 WARN_INSN(insn, "unsupported relocation in alternatives section");
1876 return -1;
1877 }
1878
1879 if (!is_static_jump(insn))
1880 continue;
1881
1882 if (!insn->immediate)
1883 continue;
1884
1885 dest_off = arch_jump_destination(insn);
1886 if (dest_off == special_alt->new_off + special_alt->new_len) {
1887 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1888 if (!insn->jump_dest) {
1889 WARN_INSN(insn, "can't find alternative jump destination");
1890 return -1;
1891 }
1892 }
1893 }
1894
1895 if (!last_new_insn) {
1896 WARN_FUNC("can't find last new alternative instruction",
1897 special_alt->new_sec, special_alt->new_off);
1898 return -1;
1899 }
1900
1901 end:
1902 new_alt_group->orig_group = orig_alt_group;
1903 new_alt_group->first_insn = *new_insn;
1904 new_alt_group->last_insn = last_new_insn;
1905 new_alt_group->nop = nop;
1906 new_alt_group->cfi = orig_alt_group->cfi;
1907 return 0;
1908 }
1909
1910 /*
1911 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1912 * If the original instruction is a jump, make the alt entry an effective nop
1913 * by just skipping the original instruction.
1914 */
1915 static int handle_jump_alt(struct objtool_file *file,
1916 struct special_alt *special_alt,
1917 struct instruction *orig_insn,
1918 struct instruction **new_insn)
1919 {
1920 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1921 orig_insn->type != INSN_NOP) {
1922
1923 WARN_INSN(orig_insn, "unsupported instruction at jump label");
1924 return -1;
1925 }
1926
1927 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1928 struct reloc *reloc = insn_reloc(file, orig_insn);
1929
1930 if (reloc) {
1931 reloc->type = R_NONE;
1932 elf_write_reloc(file->elf, reloc);
1933 }
1934 elf_write_insn(file->elf, orig_insn->sec,
1935 orig_insn->offset, orig_insn->len,
1936 arch_nop_insn(orig_insn->len));
1937 orig_insn->type = INSN_NOP;
1938 }
1939
1940 if (orig_insn->type == INSN_NOP) {
1941 if (orig_insn->len == 2)
1942 file->jl_nop_short++;
1943 else
1944 file->jl_nop_long++;
1945
1946 return 0;
1947 }
1948
1949 if (orig_insn->len == 2)
1950 file->jl_short++;
1951 else
1952 file->jl_long++;
1953
1954 *new_insn = next_insn_same_sec(file, orig_insn);
1955 return 0;
1956 }
1957
1958 /*
1959 * Read all the special sections which have alternate instructions which can be
1960 * patched in or redirected to at runtime. Each instruction having alternate
1961 * instruction(s) has them added to its insn->alts list, which will be
1962 * traversed in validate_branch().
1963 */
1964 static int add_special_section_alts(struct objtool_file *file)
1965 {
1966 struct list_head special_alts;
1967 struct instruction *orig_insn, *new_insn;
1968 struct special_alt *special_alt, *tmp;
1969 struct alternative *alt;
1970 int ret;
1971
1972 ret = special_get_alts(file->elf, &special_alts);
1973 if (ret)
1974 return ret;
1975
1976 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1977
1978 orig_insn = find_insn(file, special_alt->orig_sec,
1979 special_alt->orig_off);
1980 if (!orig_insn) {
1981 WARN_FUNC("special: can't find orig instruction",
1982 special_alt->orig_sec, special_alt->orig_off);
1983 ret = -1;
1984 goto out;
1985 }
1986
1987 new_insn = NULL;
1988 if (!special_alt->group || special_alt->new_len) {
1989 new_insn = find_insn(file, special_alt->new_sec,
1990 special_alt->new_off);
1991 if (!new_insn) {
1992 WARN_FUNC("special: can't find new instruction",
1993 special_alt->new_sec,
1994 special_alt->new_off);
1995 ret = -1;
1996 goto out;
1997 }
1998 }
1999
2000 if (special_alt->group) {
2001 if (!special_alt->orig_len) {
2002 WARN_INSN(orig_insn, "empty alternative entry");
2003 continue;
2004 }
2005
2006 ret = handle_group_alt(file, special_alt, orig_insn,
2007 &new_insn);
2008 if (ret)
2009 goto out;
2010 } else if (special_alt->jump_or_nop) {
2011 ret = handle_jump_alt(file, special_alt, orig_insn,
2012 &new_insn);
2013 if (ret)
2014 goto out;
2015 }
2016
2017 alt = malloc(sizeof(*alt));
2018 if (!alt) {
2019 WARN("malloc failed");
2020 ret = -1;
2021 goto out;
2022 }
2023
2024 alt->insn = new_insn;
2025 alt->skip_orig = special_alt->skip_orig;
2026 orig_insn->ignore_alts |= special_alt->skip_alt;
2027 alt->next = orig_insn->alts;
2028 orig_insn->alts = alt;
2029
2030 list_del(&special_alt->list);
2031 free(special_alt);
2032 }
2033
2034 if (opts.stats) {
2035 printf("jl\\\tNOP\tJMP\n");
2036 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2037 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2038 }
2039
2040 out:
2041 return ret;
2042 }
2043
2044 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
2045 struct reloc *table)
2046 {
2047 struct reloc *reloc = table;
2048 struct instruction *dest_insn;
2049 struct alternative *alt;
2050 struct symbol *pfunc = insn_func(insn)->pfunc;
2051 unsigned int prev_offset = 0;
2052
2053 /*
2054 * Each @reloc is a switch table relocation which points to the target
2055 * instruction.
2056 */
2057 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2058
2059 /* Check for the end of the table: */
2060 if (reloc != table && reloc->jump_table_start)
2061 break;
2062
2063 /* Make sure the table entries are consecutive: */
2064 if (prev_offset && reloc->offset != prev_offset + 8)
2065 break;
2066
2067 /* Detect function pointers from contiguous objects: */
2068 if (reloc->sym->sec == pfunc->sec &&
2069 reloc->addend == pfunc->offset)
2070 break;
2071
2072 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2073 if (!dest_insn)
2074 break;
2075
2076 /* Make sure the destination is in the same function: */
2077 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2078 break;
2079
2080 alt = malloc(sizeof(*alt));
2081 if (!alt) {
2082 WARN("malloc failed");
2083 return -1;
2084 }
2085
2086 alt->insn = dest_insn;
2087 alt->next = insn->alts;
2088 insn->alts = alt;
2089 prev_offset = reloc->offset;
2090 }
2091
2092 if (!prev_offset) {
2093 WARN_INSN(insn, "can't find switch jump table");
2094 return -1;
2095 }
2096
2097 return 0;
2098 }
2099
2100 /*
2101 * find_jump_table() - Given a dynamic jump, find the switch jump table
2102 * associated with it.
2103 */
2104 static struct reloc *find_jump_table(struct objtool_file *file,
2105 struct symbol *func,
2106 struct instruction *insn)
2107 {
2108 struct reloc *table_reloc;
2109 struct instruction *dest_insn, *orig_insn = insn;
2110
2111 /*
2112 * Backward search using the @first_jump_src links, these help avoid
2113 * much of the 'in between' code. Which avoids us getting confused by
2114 * it.
2115 */
2116 for (;
2117 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2118 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2119
2120 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2121 break;
2122
2123 /* allow small jumps within the range */
2124 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2125 insn->jump_dest &&
2126 (insn->jump_dest->offset <= insn->offset ||
2127 insn->jump_dest->offset > orig_insn->offset))
2128 break;
2129
2130 table_reloc = arch_find_switch_table(file, insn);
2131 if (!table_reloc)
2132 continue;
2133 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2134 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2135 continue;
2136
2137 return table_reloc;
2138 }
2139
2140 return NULL;
2141 }
2142
2143 /*
2144 * First pass: Mark the head of each jump table so that in the next pass,
2145 * we know when a given jump table ends and the next one starts.
2146 */
2147 static void mark_func_jump_tables(struct objtool_file *file,
2148 struct symbol *func)
2149 {
2150 struct instruction *insn, *last = NULL;
2151 struct reloc *reloc;
2152
2153 func_for_each_insn(file, func, insn) {
2154 if (!last)
2155 last = insn;
2156
2157 /*
2158 * Store back-pointers for unconditional forward jumps such
2159 * that find_jump_table() can back-track using those and
2160 * avoid some potentially confusing code.
2161 */
2162 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2163 insn->offset > last->offset &&
2164 insn->jump_dest->offset > insn->offset &&
2165 !insn->jump_dest->first_jump_src) {
2166
2167 insn->jump_dest->first_jump_src = insn;
2168 last = insn->jump_dest;
2169 }
2170
2171 if (insn->type != INSN_JUMP_DYNAMIC)
2172 continue;
2173
2174 reloc = find_jump_table(file, func, insn);
2175 if (reloc) {
2176 reloc->jump_table_start = true;
2177 insn->_jump_table = reloc;
2178 }
2179 }
2180 }
2181
2182 static int add_func_jump_tables(struct objtool_file *file,
2183 struct symbol *func)
2184 {
2185 struct instruction *insn;
2186 int ret;
2187
2188 func_for_each_insn(file, func, insn) {
2189 if (!insn_jump_table(insn))
2190 continue;
2191
2192 ret = add_jump_table(file, insn, insn_jump_table(insn));
2193 if (ret)
2194 return ret;
2195 }
2196
2197 return 0;
2198 }
2199
2200 /*
2201 * For some switch statements, gcc generates a jump table in the .rodata
2202 * section which contains a list of addresses within the function to jump to.
2203 * This finds these jump tables and adds them to the insn->alts lists.
2204 */
2205 static int add_jump_table_alts(struct objtool_file *file)
2206 {
2207 struct symbol *func;
2208 int ret;
2209
2210 if (!file->rodata)
2211 return 0;
2212
2213 for_each_sym(file, func) {
2214 if (func->type != STT_FUNC)
2215 continue;
2216
2217 mark_func_jump_tables(file, func);
2218 ret = add_func_jump_tables(file, func);
2219 if (ret)
2220 return ret;
2221 }
2222
2223 return 0;
2224 }
2225
2226 static void set_func_state(struct cfi_state *state)
2227 {
2228 state->cfa = initial_func_cfi.cfa;
2229 memcpy(&state->regs, &initial_func_cfi.regs,
2230 CFI_NUM_REGS * sizeof(struct cfi_reg));
2231 state->stack_size = initial_func_cfi.cfa.offset;
2232 state->type = UNWIND_HINT_TYPE_CALL;
2233 }
2234
2235 static int read_unwind_hints(struct objtool_file *file)
2236 {
2237 struct cfi_state cfi = init_cfi;
2238 struct section *sec, *relocsec;
2239 struct unwind_hint *hint;
2240 struct instruction *insn;
2241 struct reloc *reloc;
2242 int i;
2243
2244 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2245 if (!sec)
2246 return 0;
2247
2248 relocsec = sec->reloc;
2249 if (!relocsec) {
2250 WARN("missing .rela.discard.unwind_hints section");
2251 return -1;
2252 }
2253
2254 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2255 WARN("struct unwind_hint size mismatch");
2256 return -1;
2257 }
2258
2259 file->hints = true;
2260
2261 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2262 hint = (struct unwind_hint *)sec->data->d_buf + i;
2263
2264 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2265 if (!reloc) {
2266 WARN("can't find reloc for unwind_hints[%d]", i);
2267 return -1;
2268 }
2269
2270 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2271 if (!insn) {
2272 WARN("can't find insn for unwind_hints[%d]", i);
2273 return -1;
2274 }
2275
2276 insn->hint = true;
2277
2278 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2279 insn->hint = false;
2280 insn->save = true;
2281 continue;
2282 }
2283
2284 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2285 insn->restore = true;
2286 continue;
2287 }
2288
2289 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2290 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2291
2292 if (sym && sym->bind == STB_GLOBAL) {
2293 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2294 WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2295 }
2296 }
2297 }
2298
2299 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2300 insn->cfi = &func_cfi;
2301 continue;
2302 }
2303
2304 if (insn->cfi)
2305 cfi = *(insn->cfi);
2306
2307 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2308 WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2309 return -1;
2310 }
2311
2312 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2313 cfi.type = hint->type;
2314 cfi.signal = hint->signal;
2315
2316 insn->cfi = cfi_hash_find_or_add(&cfi);
2317 }
2318
2319 return 0;
2320 }
2321
2322 static int read_noendbr_hints(struct objtool_file *file)
2323 {
2324 struct section *sec;
2325 struct instruction *insn;
2326 struct reloc *reloc;
2327
2328 sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2329 if (!sec)
2330 return 0;
2331
2332 list_for_each_entry(reloc, &sec->reloc_list, list) {
2333 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2334 if (!insn) {
2335 WARN("bad .discard.noendbr entry");
2336 return -1;
2337 }
2338
2339 insn->noendbr = 1;
2340 }
2341
2342 return 0;
2343 }
2344
2345 static int read_retpoline_hints(struct objtool_file *file)
2346 {
2347 struct section *sec;
2348 struct instruction *insn;
2349 struct reloc *reloc;
2350
2351 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2352 if (!sec)
2353 return 0;
2354
2355 list_for_each_entry(reloc, &sec->reloc_list, list) {
2356 if (reloc->sym->type != STT_SECTION) {
2357 WARN("unexpected relocation symbol type in %s", sec->name);
2358 return -1;
2359 }
2360
2361 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2362 if (!insn) {
2363 WARN("bad .discard.retpoline_safe entry");
2364 return -1;
2365 }
2366
2367 if (insn->type != INSN_JUMP_DYNAMIC &&
2368 insn->type != INSN_CALL_DYNAMIC &&
2369 insn->type != INSN_RETURN &&
2370 insn->type != INSN_NOP) {
2371 WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2372 return -1;
2373 }
2374
2375 insn->retpoline_safe = true;
2376 }
2377
2378 return 0;
2379 }
2380
2381 static int read_instr_hints(struct objtool_file *file)
2382 {
2383 struct section *sec;
2384 struct instruction *insn;
2385 struct reloc *reloc;
2386
2387 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2388 if (!sec)
2389 return 0;
2390
2391 list_for_each_entry(reloc, &sec->reloc_list, list) {
2392 if (reloc->sym->type != STT_SECTION) {
2393 WARN("unexpected relocation symbol type in %s", sec->name);
2394 return -1;
2395 }
2396
2397 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2398 if (!insn) {
2399 WARN("bad .discard.instr_end entry");
2400 return -1;
2401 }
2402
2403 insn->instr--;
2404 }
2405
2406 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2407 if (!sec)
2408 return 0;
2409
2410 list_for_each_entry(reloc, &sec->reloc_list, list) {
2411 if (reloc->sym->type != STT_SECTION) {
2412 WARN("unexpected relocation symbol type in %s", sec->name);
2413 return -1;
2414 }
2415
2416 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2417 if (!insn) {
2418 WARN("bad .discard.instr_begin entry");
2419 return -1;
2420 }
2421
2422 insn->instr++;
2423 }
2424
2425 return 0;
2426 }
2427
2428 static int read_validate_unret_hints(struct objtool_file *file)
2429 {
2430 struct section *sec;
2431 struct instruction *insn;
2432 struct reloc *reloc;
2433
2434 sec = find_section_by_name(file->elf, ".rela.discard.validate_unret");
2435 if (!sec)
2436 return 0;
2437
2438 list_for_each_entry(reloc, &sec->reloc_list, list) {
2439 if (reloc->sym->type != STT_SECTION) {
2440 WARN("unexpected relocation symbol type in %s", sec->name);
2441 return -1;
2442 }
2443
2444 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2445 if (!insn) {
2446 WARN("bad .discard.instr_end entry");
2447 return -1;
2448 }
2449 insn->unret = 1;
2450 }
2451
2452 return 0;
2453 }
2454
2455
2456 static int read_intra_function_calls(struct objtool_file *file)
2457 {
2458 struct instruction *insn;
2459 struct section *sec;
2460 struct reloc *reloc;
2461
2462 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2463 if (!sec)
2464 return 0;
2465
2466 list_for_each_entry(reloc, &sec->reloc_list, list) {
2467 unsigned long dest_off;
2468
2469 if (reloc->sym->type != STT_SECTION) {
2470 WARN("unexpected relocation symbol type in %s",
2471 sec->name);
2472 return -1;
2473 }
2474
2475 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2476 if (!insn) {
2477 WARN("bad .discard.intra_function_call entry");
2478 return -1;
2479 }
2480
2481 if (insn->type != INSN_CALL) {
2482 WARN_INSN(insn, "intra_function_call not a direct call");
2483 return -1;
2484 }
2485
2486 /*
2487 * Treat intra-function CALLs as JMPs, but with a stack_op.
2488 * See add_call_destinations(), which strips stack_ops from
2489 * normal CALLs.
2490 */
2491 insn->type = INSN_JUMP_UNCONDITIONAL;
2492
2493 dest_off = arch_jump_destination(insn);
2494 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2495 if (!insn->jump_dest) {
2496 WARN_INSN(insn, "can't find call dest at %s+0x%lx",
2497 insn->sec->name, dest_off);
2498 return -1;
2499 }
2500 }
2501
2502 return 0;
2503 }
2504
2505 /*
2506 * Return true if name matches an instrumentation function, where calls to that
2507 * function from noinstr code can safely be removed, but compilers won't do so.
2508 */
2509 static bool is_profiling_func(const char *name)
2510 {
2511 /*
2512 * Many compilers cannot disable KCOV with a function attribute.
2513 */
2514 if (!strncmp(name, "__sanitizer_cov_", 16))
2515 return true;
2516
2517 /*
2518 * Some compilers currently do not remove __tsan_func_entry/exit nor
2519 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2520 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2521 * minimum Clang version is 14.0, this can be removed.
2522 */
2523 if (!strncmp(name, "__tsan_func_", 12) ||
2524 !strcmp(name, "__tsan_atomic_signal_fence"))
2525 return true;
2526
2527 return false;
2528 }
2529
2530 static int classify_symbols(struct objtool_file *file)
2531 {
2532 struct symbol *func;
2533
2534 for_each_sym(file, func) {
2535 if (func->bind != STB_GLOBAL)
2536 continue;
2537
2538 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2539 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2540 func->static_call_tramp = true;
2541
2542 if (arch_is_retpoline(func))
2543 func->retpoline_thunk = true;
2544
2545 if (arch_is_rethunk(func))
2546 func->return_thunk = true;
2547
2548 if (arch_ftrace_match(func->name))
2549 func->fentry = true;
2550
2551 if (is_profiling_func(func->name))
2552 func->profiling_func = true;
2553 }
2554
2555 return 0;
2556 }
2557
2558 static void mark_rodata(struct objtool_file *file)
2559 {
2560 struct section *sec;
2561 bool found = false;
2562
2563 /*
2564 * Search for the following rodata sections, each of which can
2565 * potentially contain jump tables:
2566 *
2567 * - .rodata: can contain GCC switch tables
2568 * - .rodata.<func>: same, if -fdata-sections is being used
2569 * - .rodata..c_jump_table: contains C annotated jump tables
2570 *
2571 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2572 */
2573 for_each_sec(file, sec) {
2574 if (!strncmp(sec->name, ".rodata", 7) &&
2575 !strstr(sec->name, ".str1.")) {
2576 sec->rodata = true;
2577 found = true;
2578 }
2579 }
2580
2581 file->rodata = found;
2582 }
2583
2584 static int decode_sections(struct objtool_file *file)
2585 {
2586 int ret;
2587
2588 mark_rodata(file);
2589
2590 ret = init_pv_ops(file);
2591 if (ret)
2592 return ret;
2593
2594 /*
2595 * Must be before add_{jump_call}_destination.
2596 */
2597 ret = classify_symbols(file);
2598 if (ret)
2599 return ret;
2600
2601 ret = decode_instructions(file);
2602 if (ret)
2603 return ret;
2604
2605 add_ignores(file);
2606 add_uaccess_safe(file);
2607
2608 ret = add_ignore_alternatives(file);
2609 if (ret)
2610 return ret;
2611
2612 /*
2613 * Must be before read_unwind_hints() since that needs insn->noendbr.
2614 */
2615 ret = read_noendbr_hints(file);
2616 if (ret)
2617 return ret;
2618
2619 /*
2620 * Must be before add_jump_destinations(), which depends on 'func'
2621 * being set for alternatives, to enable proper sibling call detection.
2622 */
2623 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2624 ret = add_special_section_alts(file);
2625 if (ret)
2626 return ret;
2627 }
2628
2629 ret = add_jump_destinations(file);
2630 if (ret)
2631 return ret;
2632
2633 /*
2634 * Must be before add_call_destination(); it changes INSN_CALL to
2635 * INSN_JUMP.
2636 */
2637 ret = read_intra_function_calls(file);
2638 if (ret)
2639 return ret;
2640
2641 ret = add_call_destinations(file);
2642 if (ret)
2643 return ret;
2644
2645 /*
2646 * Must be after add_call_destinations() such that it can override
2647 * dead_end_function() marks.
2648 */
2649 ret = add_dead_ends(file);
2650 if (ret)
2651 return ret;
2652
2653 ret = add_jump_table_alts(file);
2654 if (ret)
2655 return ret;
2656
2657 ret = read_unwind_hints(file);
2658 if (ret)
2659 return ret;
2660
2661 ret = read_retpoline_hints(file);
2662 if (ret)
2663 return ret;
2664
2665 ret = read_instr_hints(file);
2666 if (ret)
2667 return ret;
2668
2669 ret = read_validate_unret_hints(file);
2670 if (ret)
2671 return ret;
2672
2673 return 0;
2674 }
2675
2676 static bool is_fentry_call(struct instruction *insn)
2677 {
2678 if (insn->type == INSN_CALL &&
2679 insn_call_dest(insn) &&
2680 insn_call_dest(insn)->fentry)
2681 return true;
2682
2683 return false;
2684 }
2685
2686 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2687 {
2688 struct cfi_state *cfi = &state->cfi;
2689 int i;
2690
2691 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2692 return true;
2693
2694 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2695 return true;
2696
2697 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2698 return true;
2699
2700 for (i = 0; i < CFI_NUM_REGS; i++) {
2701 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2702 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2703 return true;
2704 }
2705
2706 return false;
2707 }
2708
2709 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2710 int expected_offset)
2711 {
2712 return reg->base == CFI_CFA &&
2713 reg->offset == expected_offset;
2714 }
2715
2716 static bool has_valid_stack_frame(struct insn_state *state)
2717 {
2718 struct cfi_state *cfi = &state->cfi;
2719
2720 if (cfi->cfa.base == CFI_BP &&
2721 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2722 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2723 return true;
2724
2725 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2726 return true;
2727
2728 return false;
2729 }
2730
2731 static int update_cfi_state_regs(struct instruction *insn,
2732 struct cfi_state *cfi,
2733 struct stack_op *op)
2734 {
2735 struct cfi_reg *cfa = &cfi->cfa;
2736
2737 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2738 return 0;
2739
2740 /* push */
2741 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2742 cfa->offset += 8;
2743
2744 /* pop */
2745 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2746 cfa->offset -= 8;
2747
2748 /* add immediate to sp */
2749 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2750 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2751 cfa->offset -= op->src.offset;
2752
2753 return 0;
2754 }
2755
2756 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2757 {
2758 if (arch_callee_saved_reg(reg) &&
2759 cfi->regs[reg].base == CFI_UNDEFINED) {
2760 cfi->regs[reg].base = base;
2761 cfi->regs[reg].offset = offset;
2762 }
2763 }
2764
2765 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2766 {
2767 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2768 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2769 }
2770
2771 /*
2772 * A note about DRAP stack alignment:
2773 *
2774 * GCC has the concept of a DRAP register, which is used to help keep track of
2775 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2776 * register. The typical DRAP pattern is:
2777 *
2778 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2779 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2780 * 41 ff 72 f8 pushq -0x8(%r10)
2781 * 55 push %rbp
2782 * 48 89 e5 mov %rsp,%rbp
2783 * (more pushes)
2784 * 41 52 push %r10
2785 * ...
2786 * 41 5a pop %r10
2787 * (more pops)
2788 * 5d pop %rbp
2789 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2790 * c3 retq
2791 *
2792 * There are some variations in the epilogues, like:
2793 *
2794 * 5b pop %rbx
2795 * 41 5a pop %r10
2796 * 41 5c pop %r12
2797 * 41 5d pop %r13
2798 * 41 5e pop %r14
2799 * c9 leaveq
2800 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2801 * c3 retq
2802 *
2803 * and:
2804 *
2805 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2806 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2807 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2808 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2809 * c9 leaveq
2810 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2811 * c3 retq
2812 *
2813 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2814 * restored beforehand:
2815 *
2816 * 41 55 push %r13
2817 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2818 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2819 * ...
2820 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2821 * 41 5d pop %r13
2822 * c3 retq
2823 */
2824 static int update_cfi_state(struct instruction *insn,
2825 struct instruction *next_insn,
2826 struct cfi_state *cfi, struct stack_op *op)
2827 {
2828 struct cfi_reg *cfa = &cfi->cfa;
2829 struct cfi_reg *regs = cfi->regs;
2830
2831 /* stack operations don't make sense with an undefined CFA */
2832 if (cfa->base == CFI_UNDEFINED) {
2833 if (insn_func(insn)) {
2834 WARN_INSN(insn, "undefined stack state");
2835 return -1;
2836 }
2837 return 0;
2838 }
2839
2840 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2841 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2842 return update_cfi_state_regs(insn, cfi, op);
2843
2844 switch (op->dest.type) {
2845
2846 case OP_DEST_REG:
2847 switch (op->src.type) {
2848
2849 case OP_SRC_REG:
2850 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2851 cfa->base == CFI_SP &&
2852 check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2853
2854 /* mov %rsp, %rbp */
2855 cfa->base = op->dest.reg;
2856 cfi->bp_scratch = false;
2857 }
2858
2859 else if (op->src.reg == CFI_SP &&
2860 op->dest.reg == CFI_BP && cfi->drap) {
2861
2862 /* drap: mov %rsp, %rbp */
2863 regs[CFI_BP].base = CFI_BP;
2864 regs[CFI_BP].offset = -cfi->stack_size;
2865 cfi->bp_scratch = false;
2866 }
2867
2868 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2869
2870 /*
2871 * mov %rsp, %reg
2872 *
2873 * This is needed for the rare case where GCC
2874 * does:
2875 *
2876 * mov %rsp, %rax
2877 * ...
2878 * mov %rax, %rsp
2879 */
2880 cfi->vals[op->dest.reg].base = CFI_CFA;
2881 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2882 }
2883
2884 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2885 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2886
2887 /*
2888 * mov %rbp, %rsp
2889 *
2890 * Restore the original stack pointer (Clang).
2891 */
2892 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2893 }
2894
2895 else if (op->dest.reg == cfa->base) {
2896
2897 /* mov %reg, %rsp */
2898 if (cfa->base == CFI_SP &&
2899 cfi->vals[op->src.reg].base == CFI_CFA) {
2900
2901 /*
2902 * This is needed for the rare case
2903 * where GCC does something dumb like:
2904 *
2905 * lea 0x8(%rsp), %rcx
2906 * ...
2907 * mov %rcx, %rsp
2908 */
2909 cfa->offset = -cfi->vals[op->src.reg].offset;
2910 cfi->stack_size = cfa->offset;
2911
2912 } else if (cfa->base == CFI_SP &&
2913 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2914 cfi->vals[op->src.reg].offset == cfa->offset) {
2915
2916 /*
2917 * Stack swizzle:
2918 *
2919 * 1: mov %rsp, (%[tos])
2920 * 2: mov %[tos], %rsp
2921 * ...
2922 * 3: pop %rsp
2923 *
2924 * Where:
2925 *
2926 * 1 - places a pointer to the previous
2927 * stack at the Top-of-Stack of the
2928 * new stack.
2929 *
2930 * 2 - switches to the new stack.
2931 *
2932 * 3 - pops the Top-of-Stack to restore
2933 * the original stack.
2934 *
2935 * Note: we set base to SP_INDIRECT
2936 * here and preserve offset. Therefore
2937 * when the unwinder reaches ToS it
2938 * will dereference SP and then add the
2939 * offset to find the next frame, IOW:
2940 * (%rsp) + offset.
2941 */
2942 cfa->base = CFI_SP_INDIRECT;
2943
2944 } else {
2945 cfa->base = CFI_UNDEFINED;
2946 cfa->offset = 0;
2947 }
2948 }
2949
2950 else if (op->dest.reg == CFI_SP &&
2951 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2952 cfi->vals[op->src.reg].offset == cfa->offset) {
2953
2954 /*
2955 * The same stack swizzle case 2) as above. But
2956 * because we can't change cfa->base, case 3)
2957 * will become a regular POP. Pretend we're a
2958 * PUSH so things don't go unbalanced.
2959 */
2960 cfi->stack_size += 8;
2961 }
2962
2963
2964 break;
2965
2966 case OP_SRC_ADD:
2967 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2968
2969 /* add imm, %rsp */
2970 cfi->stack_size -= op->src.offset;
2971 if (cfa->base == CFI_SP)
2972 cfa->offset -= op->src.offset;
2973 break;
2974 }
2975
2976 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2977
2978 /* lea disp(%rbp), %rsp */
2979 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2980 break;
2981 }
2982
2983 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2984
2985 /* drap: lea disp(%rsp), %drap */
2986 cfi->drap_reg = op->dest.reg;
2987
2988 /*
2989 * lea disp(%rsp), %reg
2990 *
2991 * This is needed for the rare case where GCC
2992 * does something dumb like:
2993 *
2994 * lea 0x8(%rsp), %rcx
2995 * ...
2996 * mov %rcx, %rsp
2997 */
2998 cfi->vals[op->dest.reg].base = CFI_CFA;
2999 cfi->vals[op->dest.reg].offset = \
3000 -cfi->stack_size + op->src.offset;
3001
3002 break;
3003 }
3004
3005 if (cfi->drap && op->dest.reg == CFI_SP &&
3006 op->src.reg == cfi->drap_reg) {
3007
3008 /* drap: lea disp(%drap), %rsp */
3009 cfa->base = CFI_SP;
3010 cfa->offset = cfi->stack_size = -op->src.offset;
3011 cfi->drap_reg = CFI_UNDEFINED;
3012 cfi->drap = false;
3013 break;
3014 }
3015
3016 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3017 WARN_INSN(insn, "unsupported stack register modification");
3018 return -1;
3019 }
3020
3021 break;
3022
3023 case OP_SRC_AND:
3024 if (op->dest.reg != CFI_SP ||
3025 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3026 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3027 WARN_INSN(insn, "unsupported stack pointer realignment");
3028 return -1;
3029 }
3030
3031 if (cfi->drap_reg != CFI_UNDEFINED) {
3032 /* drap: and imm, %rsp */
3033 cfa->base = cfi->drap_reg;
3034 cfa->offset = cfi->stack_size = 0;
3035 cfi->drap = true;
3036 }
3037
3038 /*
3039 * Older versions of GCC (4.8ish) realign the stack
3040 * without DRAP, with a frame pointer.
3041 */
3042
3043 break;
3044
3045 case OP_SRC_POP:
3046 case OP_SRC_POPF:
3047 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3048
3049 /* pop %rsp; # restore from a stack swizzle */
3050 cfa->base = CFI_SP;
3051 break;
3052 }
3053
3054 if (!cfi->drap && op->dest.reg == cfa->base) {
3055
3056 /* pop %rbp */
3057 cfa->base = CFI_SP;
3058 }
3059
3060 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3061 op->dest.reg == cfi->drap_reg &&
3062 cfi->drap_offset == -cfi->stack_size) {
3063
3064 /* drap: pop %drap */
3065 cfa->base = cfi->drap_reg;
3066 cfa->offset = 0;
3067 cfi->drap_offset = -1;
3068
3069 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3070
3071 /* pop %reg */
3072 restore_reg(cfi, op->dest.reg);
3073 }
3074
3075 cfi->stack_size -= 8;
3076 if (cfa->base == CFI_SP)
3077 cfa->offset -= 8;
3078
3079 break;
3080
3081 case OP_SRC_REG_INDIRECT:
3082 if (!cfi->drap && op->dest.reg == cfa->base &&
3083 op->dest.reg == CFI_BP) {
3084
3085 /* mov disp(%rsp), %rbp */
3086 cfa->base = CFI_SP;
3087 cfa->offset = cfi->stack_size;
3088 }
3089
3090 if (cfi->drap && op->src.reg == CFI_BP &&
3091 op->src.offset == cfi->drap_offset) {
3092
3093 /* drap: mov disp(%rbp), %drap */
3094 cfa->base = cfi->drap_reg;
3095 cfa->offset = 0;
3096 cfi->drap_offset = -1;
3097 }
3098
3099 if (cfi->drap && op->src.reg == CFI_BP &&
3100 op->src.offset == regs[op->dest.reg].offset) {
3101
3102 /* drap: mov disp(%rbp), %reg */
3103 restore_reg(cfi, op->dest.reg);
3104
3105 } else if (op->src.reg == cfa->base &&
3106 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3107
3108 /* mov disp(%rbp), %reg */
3109 /* mov disp(%rsp), %reg */
3110 restore_reg(cfi, op->dest.reg);
3111
3112 } else if (op->src.reg == CFI_SP &&
3113 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3114
3115 /* mov disp(%rsp), %reg */
3116 restore_reg(cfi, op->dest.reg);
3117 }
3118
3119 break;
3120
3121 default:
3122 WARN_INSN(insn, "unknown stack-related instruction");
3123 return -1;
3124 }
3125
3126 break;
3127
3128 case OP_DEST_PUSH:
3129 case OP_DEST_PUSHF:
3130 cfi->stack_size += 8;
3131 if (cfa->base == CFI_SP)
3132 cfa->offset += 8;
3133
3134 if (op->src.type != OP_SRC_REG)
3135 break;
3136
3137 if (cfi->drap) {
3138 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3139
3140 /* drap: push %drap */
3141 cfa->base = CFI_BP_INDIRECT;
3142 cfa->offset = -cfi->stack_size;
3143
3144 /* save drap so we know when to restore it */
3145 cfi->drap_offset = -cfi->stack_size;
3146
3147 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3148
3149 /* drap: push %rbp */
3150 cfi->stack_size = 0;
3151
3152 } else {
3153
3154 /* drap: push %reg */
3155 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3156 }
3157
3158 } else {
3159
3160 /* push %reg */
3161 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3162 }
3163
3164 /* detect when asm code uses rbp as a scratch register */
3165 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3166 cfa->base != CFI_BP)
3167 cfi->bp_scratch = true;
3168 break;
3169
3170 case OP_DEST_REG_INDIRECT:
3171
3172 if (cfi->drap) {
3173 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3174
3175 /* drap: mov %drap, disp(%rbp) */
3176 cfa->base = CFI_BP_INDIRECT;
3177 cfa->offset = op->dest.offset;
3178
3179 /* save drap offset so we know when to restore it */
3180 cfi->drap_offset = op->dest.offset;
3181 } else {
3182
3183 /* drap: mov reg, disp(%rbp) */
3184 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3185 }
3186
3187 } else if (op->dest.reg == cfa->base) {
3188
3189 /* mov reg, disp(%rbp) */
3190 /* mov reg, disp(%rsp) */
3191 save_reg(cfi, op->src.reg, CFI_CFA,
3192 op->dest.offset - cfi->cfa.offset);
3193
3194 } else if (op->dest.reg == CFI_SP) {
3195
3196 /* mov reg, disp(%rsp) */
3197 save_reg(cfi, op->src.reg, CFI_CFA,
3198 op->dest.offset - cfi->stack_size);
3199
3200 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3201
3202 /* mov %rsp, (%reg); # setup a stack swizzle. */
3203 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3204 cfi->vals[op->dest.reg].offset = cfa->offset;
3205 }
3206
3207 break;
3208
3209 case OP_DEST_MEM:
3210 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3211 WARN_INSN(insn, "unknown stack-related memory operation");
3212 return -1;
3213 }
3214
3215 /* pop mem */
3216 cfi->stack_size -= 8;
3217 if (cfa->base == CFI_SP)
3218 cfa->offset -= 8;
3219
3220 break;
3221
3222 default:
3223 WARN_INSN(insn, "unknown stack-related instruction");
3224 return -1;
3225 }
3226
3227 return 0;
3228 }
3229
3230 /*
3231 * The stack layouts of alternatives instructions can sometimes diverge when
3232 * they have stack modifications. That's fine as long as the potential stack
3233 * layouts don't conflict at any given potential instruction boundary.
3234 *
3235 * Flatten the CFIs of the different alternative code streams (both original
3236 * and replacement) into a single shared CFI array which can be used to detect
3237 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3238 */
3239 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3240 {
3241 struct cfi_state **alt_cfi;
3242 int group_off;
3243
3244 if (!insn->alt_group)
3245 return 0;
3246
3247 if (!insn->cfi) {
3248 WARN("CFI missing");
3249 return -1;
3250 }
3251
3252 alt_cfi = insn->alt_group->cfi;
3253 group_off = insn->offset - insn->alt_group->first_insn->offset;
3254
3255 if (!alt_cfi[group_off]) {
3256 alt_cfi[group_off] = insn->cfi;
3257 } else {
3258 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3259 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3260 struct instruction *orig = orig_group->first_insn;
3261 char *where = offstr(insn->sec, insn->offset);
3262 WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
3263 free(where);
3264 return -1;
3265 }
3266 }
3267
3268 return 0;
3269 }
3270
3271 static int handle_insn_ops(struct instruction *insn,
3272 struct instruction *next_insn,
3273 struct insn_state *state)
3274 {
3275 struct stack_op *op;
3276
3277 for (op = insn->stack_ops; op; op = op->next) {
3278
3279 if (update_cfi_state(insn, next_insn, &state->cfi, op))
3280 return 1;
3281
3282 if (!insn->alt_group)
3283 continue;
3284
3285 if (op->dest.type == OP_DEST_PUSHF) {
3286 if (!state->uaccess_stack) {
3287 state->uaccess_stack = 1;
3288 } else if (state->uaccess_stack >> 31) {
3289 WARN_INSN(insn, "PUSHF stack exhausted");
3290 return 1;
3291 }
3292 state->uaccess_stack <<= 1;
3293 state->uaccess_stack |= state->uaccess;
3294 }
3295
3296 if (op->src.type == OP_SRC_POPF) {
3297 if (state->uaccess_stack) {
3298 state->uaccess = state->uaccess_stack & 1;
3299 state->uaccess_stack >>= 1;
3300 if (state->uaccess_stack == 1)
3301 state->uaccess_stack = 0;
3302 }
3303 }
3304 }
3305
3306 return 0;
3307 }
3308
3309 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3310 {
3311 struct cfi_state *cfi1 = insn->cfi;
3312 int i;
3313
3314 if (!cfi1) {
3315 WARN("CFI missing");
3316 return false;
3317 }
3318
3319 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3320
3321 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3322 cfi1->cfa.base, cfi1->cfa.offset,
3323 cfi2->cfa.base, cfi2->cfa.offset);
3324
3325 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3326 for (i = 0; i < CFI_NUM_REGS; i++) {
3327 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3328 sizeof(struct cfi_reg)))
3329 continue;
3330
3331 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3332 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3333 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3334 break;
3335 }
3336
3337 } else if (cfi1->type != cfi2->type) {
3338
3339 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3340 cfi1->type, cfi2->type);
3341
3342 } else if (cfi1->drap != cfi2->drap ||
3343 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3344 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3345
3346 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3347 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3348 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3349
3350 } else
3351 return true;
3352
3353 return false;
3354 }
3355
3356 static inline bool func_uaccess_safe(struct symbol *func)
3357 {
3358 if (func)
3359 return func->uaccess_safe;
3360
3361 return false;
3362 }
3363
3364 static inline const char *call_dest_name(struct instruction *insn)
3365 {
3366 static char pvname[19];
3367 struct reloc *rel;
3368 int idx;
3369
3370 if (insn_call_dest(insn))
3371 return insn_call_dest(insn)->name;
3372
3373 rel = insn_reloc(NULL, insn);
3374 if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3375 idx = (rel->addend / sizeof(void *));
3376 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3377 return pvname;
3378 }
3379
3380 return "{dynamic}";
3381 }
3382
3383 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3384 {
3385 struct symbol *target;
3386 struct reloc *rel;
3387 int idx;
3388
3389 rel = insn_reloc(file, insn);
3390 if (!rel || strcmp(rel->sym->name, "pv_ops"))
3391 return false;
3392
3393 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3394
3395 if (file->pv_ops[idx].clean)
3396 return true;
3397
3398 file->pv_ops[idx].clean = true;
3399
3400 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3401 if (!target->sec->noinstr) {
3402 WARN("pv_ops[%d]: %s", idx, target->name);
3403 file->pv_ops[idx].clean = false;
3404 }
3405 }
3406
3407 return file->pv_ops[idx].clean;
3408 }
3409
3410 static inline bool noinstr_call_dest(struct objtool_file *file,
3411 struct instruction *insn,
3412 struct symbol *func)
3413 {
3414 /*
3415 * We can't deal with indirect function calls at present;
3416 * assume they're instrumented.
3417 */
3418 if (!func) {
3419 if (file->pv_ops)
3420 return pv_call_dest(file, insn);
3421
3422 return false;
3423 }
3424
3425 /*
3426 * If the symbol is from a noinstr section; we good.
3427 */
3428 if (func->sec->noinstr)
3429 return true;
3430
3431 /*
3432 * If the symbol is a static_call trampoline, we can't tell.
3433 */
3434 if (func->static_call_tramp)
3435 return true;
3436
3437 /*
3438 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3439 * something 'BAD' happened. At the risk of taking the machine down,
3440 * let them proceed to get the message out.
3441 */
3442 if (!strncmp(func->name, "__ubsan_handle_", 15))
3443 return true;
3444
3445 return false;
3446 }
3447
3448 static int validate_call(struct objtool_file *file,
3449 struct instruction *insn,
3450 struct insn_state *state)
3451 {
3452 if (state->noinstr && state->instr <= 0 &&
3453 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3454 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3455 return 1;
3456 }
3457
3458 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3459 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3460 return 1;
3461 }
3462
3463 if (state->df) {
3464 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3465 return 1;
3466 }
3467
3468 return 0;
3469 }
3470
3471 static int validate_sibling_call(struct objtool_file *file,
3472 struct instruction *insn,
3473 struct insn_state *state)
3474 {
3475 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3476 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3477 return 1;
3478 }
3479
3480 return validate_call(file, insn, state);
3481 }
3482
3483 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3484 {
3485 if (state->noinstr && state->instr > 0) {
3486 WARN_INSN(insn, "return with instrumentation enabled");
3487 return 1;
3488 }
3489
3490 if (state->uaccess && !func_uaccess_safe(func)) {
3491 WARN_INSN(insn, "return with UACCESS enabled");
3492 return 1;
3493 }
3494
3495 if (!state->uaccess && func_uaccess_safe(func)) {
3496 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3497 return 1;
3498 }
3499
3500 if (state->df) {
3501 WARN_INSN(insn, "return with DF set");
3502 return 1;
3503 }
3504
3505 if (func && has_modified_stack_frame(insn, state)) {
3506 WARN_INSN(insn, "return with modified stack frame");
3507 return 1;
3508 }
3509
3510 if (state->cfi.bp_scratch) {
3511 WARN_INSN(insn, "BP used as a scratch register");
3512 return 1;
3513 }
3514
3515 return 0;
3516 }
3517
3518 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3519 struct instruction *insn)
3520 {
3521 struct alt_group *alt_group = insn->alt_group;
3522
3523 /*
3524 * Simulate the fact that alternatives are patched in-place. When the
3525 * end of a replacement alt_group is reached, redirect objtool flow to
3526 * the end of the original alt_group.
3527 *
3528 * insn->alts->insn -> alt_group->first_insn
3529 * ...
3530 * alt_group->last_insn
3531 * [alt_group->nop] -> next(orig_group->last_insn)
3532 */
3533 if (alt_group) {
3534 if (alt_group->nop) {
3535 /* ->nop implies ->orig_group */
3536 if (insn == alt_group->last_insn)
3537 return alt_group->nop;
3538 if (insn == alt_group->nop)
3539 goto next_orig;
3540 }
3541 if (insn == alt_group->last_insn && alt_group->orig_group)
3542 goto next_orig;
3543 }
3544
3545 return next_insn_same_sec(file, insn);
3546
3547 next_orig:
3548 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3549 }
3550
3551 /*
3552 * Follow the branch starting at the given instruction, and recursively follow
3553 * any other branches (jumps). Meanwhile, track the frame pointer state at
3554 * each instruction and validate all the rules described in
3555 * tools/objtool/Documentation/objtool.txt.
3556 */
3557 static int validate_branch(struct objtool_file *file, struct symbol *func,
3558 struct instruction *insn, struct insn_state state)
3559 {
3560 struct alternative *alt;
3561 struct instruction *next_insn, *prev_insn = NULL;
3562 struct section *sec;
3563 u8 visited;
3564 int ret;
3565
3566 sec = insn->sec;
3567
3568 while (1) {
3569 next_insn = next_insn_to_validate(file, insn);
3570
3571 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3572 /* Ignore KCFI type preambles, which always fall through */
3573 if (!strncmp(func->name, "__cfi_", 6) ||
3574 !strncmp(func->name, "__pfx_", 6))
3575 return 0;
3576
3577 WARN("%s() falls through to next function %s()",
3578 func->name, insn_func(insn)->name);
3579 return 1;
3580 }
3581
3582 if (func && insn->ignore) {
3583 WARN_INSN(insn, "BUG: why am I validating an ignored function?");
3584 return 1;
3585 }
3586
3587 visited = VISITED_BRANCH << state.uaccess;
3588 if (insn->visited & VISITED_BRANCH_MASK) {
3589 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3590 return 1;
3591
3592 if (insn->visited & visited)
3593 return 0;
3594 } else {
3595 nr_insns_visited++;
3596 }
3597
3598 if (state.noinstr)
3599 state.instr += insn->instr;
3600
3601 if (insn->hint) {
3602 if (insn->restore) {
3603 struct instruction *save_insn, *i;
3604
3605 i = insn;
3606 save_insn = NULL;
3607
3608 sym_for_each_insn_continue_reverse(file, func, i) {
3609 if (i->save) {
3610 save_insn = i;
3611 break;
3612 }
3613 }
3614
3615 if (!save_insn) {
3616 WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3617 return 1;
3618 }
3619
3620 if (!save_insn->visited) {
3621 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3622 return 1;
3623 }
3624
3625 insn->cfi = save_insn->cfi;
3626 nr_cfi_reused++;
3627 }
3628
3629 state.cfi = *insn->cfi;
3630 } else {
3631 /* XXX track if we actually changed state.cfi */
3632
3633 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3634 insn->cfi = prev_insn->cfi;
3635 nr_cfi_reused++;
3636 } else {
3637 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3638 }
3639 }
3640
3641 insn->visited |= visited;
3642
3643 if (propagate_alt_cfi(file, insn))
3644 return 1;
3645
3646 if (!insn->ignore_alts && insn->alts) {
3647 bool skip_orig = false;
3648
3649 for (alt = insn->alts; alt; alt = alt->next) {
3650 if (alt->skip_orig)
3651 skip_orig = true;
3652
3653 ret = validate_branch(file, func, alt->insn, state);
3654 if (ret) {
3655 if (opts.backtrace)
3656 BT_FUNC("(alt)", insn);
3657 return ret;
3658 }
3659 }
3660
3661 if (skip_orig)
3662 return 0;
3663 }
3664
3665 if (handle_insn_ops(insn, next_insn, &state))
3666 return 1;
3667
3668 switch (insn->type) {
3669
3670 case INSN_RETURN:
3671 return validate_return(func, insn, &state);
3672
3673 case INSN_CALL:
3674 case INSN_CALL_DYNAMIC:
3675 ret = validate_call(file, insn, &state);
3676 if (ret)
3677 return ret;
3678
3679 if (opts.stackval && func && !is_fentry_call(insn) &&
3680 !has_valid_stack_frame(&state)) {
3681 WARN_INSN(insn, "call without frame pointer save/setup");
3682 return 1;
3683 }
3684
3685 if (insn->dead_end)
3686 return 0;
3687
3688 break;
3689
3690 case INSN_JUMP_CONDITIONAL:
3691 case INSN_JUMP_UNCONDITIONAL:
3692 if (is_sibling_call(insn)) {
3693 ret = validate_sibling_call(file, insn, &state);
3694 if (ret)
3695 return ret;
3696
3697 } else if (insn->jump_dest) {
3698 ret = validate_branch(file, func,
3699 insn->jump_dest, state);
3700 if (ret) {
3701 if (opts.backtrace)
3702 BT_FUNC("(branch)", insn);
3703 return ret;
3704 }
3705 }
3706
3707 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3708 return 0;
3709
3710 break;
3711
3712 case INSN_JUMP_DYNAMIC:
3713 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3714 if (is_sibling_call(insn)) {
3715 ret = validate_sibling_call(file, insn, &state);
3716 if (ret)
3717 return ret;
3718 }
3719
3720 if (insn->type == INSN_JUMP_DYNAMIC)
3721 return 0;
3722
3723 break;
3724
3725 case INSN_CONTEXT_SWITCH:
3726 if (func && (!next_insn || !next_insn->hint)) {
3727 WARN_INSN(insn, "unsupported instruction in callable function");
3728 return 1;
3729 }
3730 return 0;
3731
3732 case INSN_STAC:
3733 if (state.uaccess) {
3734 WARN_INSN(insn, "recursive UACCESS enable");
3735 return 1;
3736 }
3737
3738 state.uaccess = true;
3739 break;
3740
3741 case INSN_CLAC:
3742 if (!state.uaccess && func) {
3743 WARN_INSN(insn, "redundant UACCESS disable");
3744 return 1;
3745 }
3746
3747 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3748 WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3749 return 1;
3750 }
3751
3752 state.uaccess = false;
3753 break;
3754
3755 case INSN_STD:
3756 if (state.df) {
3757 WARN_INSN(insn, "recursive STD");
3758 return 1;
3759 }
3760
3761 state.df = true;
3762 break;
3763
3764 case INSN_CLD:
3765 if (!state.df && func) {
3766 WARN_INSN(insn, "redundant CLD");
3767 return 1;
3768 }
3769
3770 state.df = false;
3771 break;
3772
3773 default:
3774 break;
3775 }
3776
3777 if (insn->dead_end)
3778 return 0;
3779
3780 if (!next_insn) {
3781 if (state.cfi.cfa.base == CFI_UNDEFINED)
3782 return 0;
3783 WARN("%s: unexpected end of section", sec->name);
3784 return 1;
3785 }
3786
3787 prev_insn = insn;
3788 insn = next_insn;
3789 }
3790
3791 return 0;
3792 }
3793
3794 static int validate_unwind_hint(struct objtool_file *file,
3795 struct instruction *insn,
3796 struct insn_state *state)
3797 {
3798 if (insn->hint && !insn->visited && !insn->ignore) {
3799 int ret = validate_branch(file, insn_func(insn), insn, *state);
3800 if (ret && opts.backtrace)
3801 BT_FUNC("<=== (hint)", insn);
3802 return ret;
3803 }
3804
3805 return 0;
3806 }
3807
3808 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3809 {
3810 struct instruction *insn;
3811 struct insn_state state;
3812 int warnings = 0;
3813
3814 if (!file->hints)
3815 return 0;
3816
3817 init_insn_state(file, &state, sec);
3818
3819 if (sec) {
3820 sec_for_each_insn(file, sec, insn)
3821 warnings += validate_unwind_hint(file, insn, &state);
3822 } else {
3823 for_each_insn(file, insn)
3824 warnings += validate_unwind_hint(file, insn, &state);
3825 }
3826
3827 return warnings;
3828 }
3829
3830 /*
3831 * Validate rethunk entry constraint: must untrain RET before the first RET.
3832 *
3833 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3834 * before an actual RET instruction.
3835 */
3836 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3837 {
3838 struct instruction *next, *dest;
3839 int ret, warnings = 0;
3840
3841 for (;;) {
3842 next = next_insn_to_validate(file, insn);
3843
3844 if (insn->visited & VISITED_UNRET)
3845 return 0;
3846
3847 insn->visited |= VISITED_UNRET;
3848
3849 if (!insn->ignore_alts && insn->alts) {
3850 struct alternative *alt;
3851 bool skip_orig = false;
3852
3853 for (alt = insn->alts; alt; alt = alt->next) {
3854 if (alt->skip_orig)
3855 skip_orig = true;
3856
3857 ret = validate_unret(file, alt->insn);
3858 if (ret) {
3859 if (opts.backtrace)
3860 BT_FUNC("(alt)", insn);
3861 return ret;
3862 }
3863 }
3864
3865 if (skip_orig)
3866 return 0;
3867 }
3868
3869 switch (insn->type) {
3870
3871 case INSN_CALL_DYNAMIC:
3872 case INSN_JUMP_DYNAMIC:
3873 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3874 WARN_INSN(insn, "early indirect call");
3875 return 1;
3876
3877 case INSN_JUMP_UNCONDITIONAL:
3878 case INSN_JUMP_CONDITIONAL:
3879 if (!is_sibling_call(insn)) {
3880 if (!insn->jump_dest) {
3881 WARN_INSN(insn, "unresolved jump target after linking?!?");
3882 return -1;
3883 }
3884 ret = validate_unret(file, insn->jump_dest);
3885 if (ret) {
3886 if (opts.backtrace) {
3887 BT_FUNC("(branch%s)", insn,
3888 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3889 }
3890 return ret;
3891 }
3892
3893 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3894 return 0;
3895
3896 break;
3897 }
3898
3899 /* fallthrough */
3900 case INSN_CALL:
3901 dest = find_insn(file, insn_call_dest(insn)->sec,
3902 insn_call_dest(insn)->offset);
3903 if (!dest) {
3904 WARN("Unresolved function after linking!?: %s",
3905 insn_call_dest(insn)->name);
3906 return -1;
3907 }
3908
3909 ret = validate_unret(file, dest);
3910 if (ret) {
3911 if (opts.backtrace)
3912 BT_FUNC("(call)", insn);
3913 return ret;
3914 }
3915 /*
3916 * If a call returns without error, it must have seen UNTRAIN_RET.
3917 * Therefore any non-error return is a success.
3918 */
3919 return 0;
3920
3921 case INSN_RETURN:
3922 WARN_INSN(insn, "RET before UNTRAIN");
3923 return 1;
3924
3925 case INSN_NOP:
3926 if (insn->retpoline_safe)
3927 return 0;
3928 break;
3929
3930 default:
3931 break;
3932 }
3933
3934 if (!next) {
3935 WARN_INSN(insn, "teh end!");
3936 return -1;
3937 }
3938 insn = next;
3939 }
3940
3941 return warnings;
3942 }
3943
3944 /*
3945 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3946 * VALIDATE_UNRET_END before RET.
3947 */
3948 static int validate_unrets(struct objtool_file *file)
3949 {
3950 struct instruction *insn;
3951 int ret, warnings = 0;
3952
3953 for_each_insn(file, insn) {
3954 if (!insn->unret)
3955 continue;
3956
3957 ret = validate_unret(file, insn);
3958 if (ret < 0) {
3959 WARN_INSN(insn, "Failed UNRET validation");
3960 return ret;
3961 }
3962 warnings += ret;
3963 }
3964
3965 return warnings;
3966 }
3967
3968 static int validate_retpoline(struct objtool_file *file)
3969 {
3970 struct instruction *insn;
3971 int warnings = 0;
3972
3973 for_each_insn(file, insn) {
3974 if (insn->type != INSN_JUMP_DYNAMIC &&
3975 insn->type != INSN_CALL_DYNAMIC &&
3976 insn->type != INSN_RETURN)
3977 continue;
3978
3979 if (insn->retpoline_safe)
3980 continue;
3981
3982 if (insn->sec->init)
3983 continue;
3984
3985 if (insn->type == INSN_RETURN) {
3986 if (opts.rethunk) {
3987 WARN_INSN(insn, "'naked' return found in RETHUNK build");
3988 } else
3989 continue;
3990 } else {
3991 WARN_INSN(insn, "indirect %s found in RETPOLINE build",
3992 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3993 }
3994
3995 warnings++;
3996 }
3997
3998 return warnings;
3999 }
4000
4001 static bool is_kasan_insn(struct instruction *insn)
4002 {
4003 return (insn->type == INSN_CALL &&
4004 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
4005 }
4006
4007 static bool is_ubsan_insn(struct instruction *insn)
4008 {
4009 return (insn->type == INSN_CALL &&
4010 !strcmp(insn_call_dest(insn)->name,
4011 "__ubsan_handle_builtin_unreachable"));
4012 }
4013
4014 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4015 {
4016 int i;
4017 struct instruction *prev_insn;
4018
4019 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
4020 return true;
4021
4022 /*
4023 * Ignore alternative replacement instructions. This can happen
4024 * when a whitelisted function uses one of the ALTERNATIVE macros.
4025 */
4026 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4027 !strcmp(insn->sec->name, ".altinstr_aux"))
4028 return true;
4029
4030 /*
4031 * Whole archive runs might encounter dead code from weak symbols.
4032 * This is where the linker will have dropped the weak symbol in
4033 * favour of a regular symbol, but leaves the code in place.
4034 *
4035 * In this case we'll find a piece of code (whole function) that is not
4036 * covered by a !section symbol. Ignore them.
4037 */
4038 if (opts.link && !insn_func(insn)) {
4039 int size = find_symbol_hole_containing(insn->sec, insn->offset);
4040 unsigned long end = insn->offset + size;
4041
4042 if (!size) /* not a hole */
4043 return false;
4044
4045 if (size < 0) /* hole until the end */
4046 return true;
4047
4048 sec_for_each_insn_continue(file, insn) {
4049 /*
4050 * If we reach a visited instruction at or before the
4051 * end of the hole, ignore the unreachable.
4052 */
4053 if (insn->visited)
4054 return true;
4055
4056 if (insn->offset >= end)
4057 break;
4058
4059 /*
4060 * If this hole jumps to a .cold function, mark it ignore too.
4061 */
4062 if (insn->jump_dest && insn_func(insn->jump_dest) &&
4063 strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4064 struct instruction *dest = insn->jump_dest;
4065 func_for_each_insn(file, insn_func(dest), dest)
4066 dest->ignore = true;
4067 }
4068 }
4069
4070 return false;
4071 }
4072
4073 if (!insn_func(insn))
4074 return false;
4075
4076 if (insn_func(insn)->static_call_tramp)
4077 return true;
4078
4079 /*
4080 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4081 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4082 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4083 * (or occasionally a JMP to UD2).
4084 *
4085 * It may also insert a UD2 after calling a __noreturn function.
4086 */
4087 prev_insn = prev_insn_same_sec(file, insn);
4088 if (prev_insn->dead_end &&
4089 (insn->type == INSN_BUG ||
4090 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4091 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4092 return true;
4093
4094 /*
4095 * Check if this (or a subsequent) instruction is related to
4096 * CONFIG_UBSAN or CONFIG_KASAN.
4097 *
4098 * End the search at 5 instructions to avoid going into the weeds.
4099 */
4100 for (i = 0; i < 5; i++) {
4101
4102 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4103 return true;
4104
4105 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4106 if (insn->jump_dest &&
4107 insn_func(insn->jump_dest) == insn_func(insn)) {
4108 insn = insn->jump_dest;
4109 continue;
4110 }
4111
4112 break;
4113 }
4114
4115 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4116 break;
4117
4118 insn = next_insn_same_sec(file, insn);
4119 }
4120
4121 return false;
4122 }
4123
4124 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
4125 {
4126 struct instruction *insn, *prev;
4127 struct cfi_state *cfi;
4128
4129 insn = find_insn(file, func->sec, func->offset);
4130 if (!insn)
4131 return -1;
4132
4133 for (prev = prev_insn_same_sec(file, insn);
4134 prev;
4135 prev = prev_insn_same_sec(file, prev)) {
4136 u64 offset;
4137
4138 if (prev->type != INSN_NOP)
4139 return -1;
4140
4141 offset = func->offset - prev->offset;
4142
4143 if (offset > opts.prefix)
4144 return -1;
4145
4146 if (offset < opts.prefix)
4147 continue;
4148
4149 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4150 break;
4151 }
4152
4153 if (!prev)
4154 return -1;
4155
4156 if (!insn->cfi) {
4157 /*
4158 * This can happen if stack validation isn't enabled or the
4159 * function is annotated with STACK_FRAME_NON_STANDARD.
4160 */
4161 return 0;
4162 }
4163
4164 /* Propagate insn->cfi to the prefix code */
4165 cfi = cfi_hash_find_or_add(insn->cfi);
4166 for (; prev != insn; prev = next_insn_same_sec(file, prev))
4167 prev->cfi = cfi;
4168
4169 return 0;
4170 }
4171
4172 static int add_prefix_symbols(struct objtool_file *file)
4173 {
4174 struct section *sec;
4175 struct symbol *func;
4176 int warnings = 0;
4177
4178 for_each_sec(file, sec) {
4179 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4180 continue;
4181
4182 sec_for_each_sym(sec, func) {
4183 if (func->type != STT_FUNC)
4184 continue;
4185
4186 add_prefix_symbol(file, func);
4187 }
4188 }
4189
4190 return warnings;
4191 }
4192
4193 static int validate_symbol(struct objtool_file *file, struct section *sec,
4194 struct symbol *sym, struct insn_state *state)
4195 {
4196 struct instruction *insn;
4197 int ret;
4198
4199 if (!sym->len) {
4200 WARN("%s() is missing an ELF size annotation", sym->name);
4201 return 1;
4202 }
4203
4204 if (sym->pfunc != sym || sym->alias != sym)
4205 return 0;
4206
4207 insn = find_insn(file, sec, sym->offset);
4208 if (!insn || insn->ignore || insn->visited)
4209 return 0;
4210
4211 state->uaccess = sym->uaccess_safe;
4212
4213 ret = validate_branch(file, insn_func(insn), insn, *state);
4214 if (ret && opts.backtrace)
4215 BT_FUNC("<=== (sym)", insn);
4216 return ret;
4217 }
4218
4219 static int validate_section(struct objtool_file *file, struct section *sec)
4220 {
4221 struct insn_state state;
4222 struct symbol *func;
4223 int warnings = 0;
4224
4225 sec_for_each_sym(sec, func) {
4226 if (func->type != STT_FUNC)
4227 continue;
4228
4229 init_insn_state(file, &state, sec);
4230 set_func_state(&state.cfi);
4231
4232 warnings += validate_symbol(file, sec, func, &state);
4233 }
4234
4235 return warnings;
4236 }
4237
4238 static int validate_noinstr_sections(struct objtool_file *file)
4239 {
4240 struct section *sec;
4241 int warnings = 0;
4242
4243 sec = find_section_by_name(file->elf, ".noinstr.text");
4244 if (sec) {
4245 warnings += validate_section(file, sec);
4246 warnings += validate_unwind_hints(file, sec);
4247 }
4248
4249 sec = find_section_by_name(file->elf, ".entry.text");
4250 if (sec) {
4251 warnings += validate_section(file, sec);
4252 warnings += validate_unwind_hints(file, sec);
4253 }
4254
4255 sec = find_section_by_name(file->elf, ".cpuidle.text");
4256 if (sec) {
4257 warnings += validate_section(file, sec);
4258 warnings += validate_unwind_hints(file, sec);
4259 }
4260
4261 return warnings;
4262 }
4263
4264 static int validate_functions(struct objtool_file *file)
4265 {
4266 struct section *sec;
4267 int warnings = 0;
4268
4269 for_each_sec(file, sec) {
4270 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4271 continue;
4272
4273 warnings += validate_section(file, sec);
4274 }
4275
4276 return warnings;
4277 }
4278
4279 static void mark_endbr_used(struct instruction *insn)
4280 {
4281 if (!list_empty(&insn->call_node))
4282 list_del_init(&insn->call_node);
4283 }
4284
4285 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4286 {
4287 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4288 struct instruction *first;
4289
4290 if (!sym)
4291 return false;
4292
4293 first = find_insn(file, sym->sec, sym->offset);
4294 if (!first)
4295 return false;
4296
4297 if (first->type != INSN_ENDBR && !first->noendbr)
4298 return false;
4299
4300 return insn->offset == sym->offset + sym->len;
4301 }
4302
4303 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4304 {
4305 struct instruction *dest;
4306 struct reloc *reloc;
4307 unsigned long off;
4308 int warnings = 0;
4309
4310 /*
4311 * Looking for function pointer load relocations. Ignore
4312 * direct/indirect branches:
4313 */
4314 switch (insn->type) {
4315 case INSN_CALL:
4316 case INSN_CALL_DYNAMIC:
4317 case INSN_JUMP_CONDITIONAL:
4318 case INSN_JUMP_UNCONDITIONAL:
4319 case INSN_JUMP_DYNAMIC:
4320 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4321 case INSN_RETURN:
4322 case INSN_NOP:
4323 return 0;
4324 default:
4325 break;
4326 }
4327
4328 for (reloc = insn_reloc(file, insn);
4329 reloc;
4330 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4331 reloc->offset + 1,
4332 (insn->offset + insn->len) - (reloc->offset + 1))) {
4333
4334 /*
4335 * static_call_update() references the trampoline, which
4336 * doesn't have (or need) ENDBR. Skip warning in that case.
4337 */
4338 if (reloc->sym->static_call_tramp)
4339 continue;
4340
4341 off = reloc->sym->offset;
4342 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4343 off += arch_dest_reloc_offset(reloc->addend);
4344 else
4345 off += reloc->addend;
4346
4347 dest = find_insn(file, reloc->sym->sec, off);
4348 if (!dest)
4349 continue;
4350
4351 if (dest->type == INSN_ENDBR) {
4352 mark_endbr_used(dest);
4353 continue;
4354 }
4355
4356 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4357 /*
4358 * Anything from->to self is either _THIS_IP_ or
4359 * IRET-to-self.
4360 *
4361 * There is no sane way to annotate _THIS_IP_ since the
4362 * compiler treats the relocation as a constant and is
4363 * happy to fold in offsets, skewing any annotation we
4364 * do, leading to vast amounts of false-positives.
4365 *
4366 * There's also compiler generated _THIS_IP_ through
4367 * KCOV and such which we have no hope of annotating.
4368 *
4369 * As such, blanket accept self-references without
4370 * issue.
4371 */
4372 continue;
4373 }
4374
4375 /*
4376 * Accept anything ANNOTATE_NOENDBR.
4377 */
4378 if (dest->noendbr)
4379 continue;
4380
4381 /*
4382 * Accept if this is the instruction after a symbol
4383 * that is (no)endbr -- typical code-range usage.
4384 */
4385 if (noendbr_range(file, dest))
4386 continue;
4387
4388 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4389
4390 warnings++;
4391 }
4392
4393 return warnings;
4394 }
4395
4396 static int validate_ibt_data_reloc(struct objtool_file *file,
4397 struct reloc *reloc)
4398 {
4399 struct instruction *dest;
4400
4401 dest = find_insn(file, reloc->sym->sec,
4402 reloc->sym->offset + reloc->addend);
4403 if (!dest)
4404 return 0;
4405
4406 if (dest->type == INSN_ENDBR) {
4407 mark_endbr_used(dest);
4408 return 0;
4409 }
4410
4411 if (dest->noendbr)
4412 return 0;
4413
4414 WARN_FUNC("data relocation to !ENDBR: %s",
4415 reloc->sec->base, reloc->offset,
4416 offstr(dest->sec, dest->offset));
4417
4418 return 1;
4419 }
4420
4421 /*
4422 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4423 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4424 * NOPs) later, in create_ibt_endbr_seal_sections().
4425 */
4426 static int validate_ibt(struct objtool_file *file)
4427 {
4428 struct section *sec;
4429 struct reloc *reloc;
4430 struct instruction *insn;
4431 int warnings = 0;
4432
4433 for_each_insn(file, insn)
4434 warnings += validate_ibt_insn(file, insn);
4435
4436 for_each_sec(file, sec) {
4437
4438 /* Already done by validate_ibt_insn() */
4439 if (sec->sh.sh_flags & SHF_EXECINSTR)
4440 continue;
4441
4442 if (!sec->reloc)
4443 continue;
4444
4445 /*
4446 * These sections can reference text addresses, but not with
4447 * the intent to indirect branch to them.
4448 */
4449 if ((!strncmp(sec->name, ".discard", 8) &&
4450 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4451 !strncmp(sec->name, ".debug", 6) ||
4452 !strcmp(sec->name, ".altinstructions") ||
4453 !strcmp(sec->name, ".ibt_endbr_seal") ||
4454 !strcmp(sec->name, ".orc_unwind_ip") ||
4455 !strcmp(sec->name, ".parainstructions") ||
4456 !strcmp(sec->name, ".retpoline_sites") ||
4457 !strcmp(sec->name, ".smp_locks") ||
4458 !strcmp(sec->name, ".static_call_sites") ||
4459 !strcmp(sec->name, "_error_injection_whitelist") ||
4460 !strcmp(sec->name, "_kprobe_blacklist") ||
4461 !strcmp(sec->name, "__bug_table") ||
4462 !strcmp(sec->name, "__ex_table") ||
4463 !strcmp(sec->name, "__jump_table") ||
4464 !strcmp(sec->name, "__mcount_loc") ||
4465 !strcmp(sec->name, ".kcfi_traps") ||
4466 strstr(sec->name, "__patchable_function_entries"))
4467 continue;
4468
4469 list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4470 warnings += validate_ibt_data_reloc(file, reloc);
4471 }
4472
4473 return warnings;
4474 }
4475
4476 static int validate_sls(struct objtool_file *file)
4477 {
4478 struct instruction *insn, *next_insn;
4479 int warnings = 0;
4480
4481 for_each_insn(file, insn) {
4482 next_insn = next_insn_same_sec(file, insn);
4483
4484 if (insn->retpoline_safe)
4485 continue;
4486
4487 switch (insn->type) {
4488 case INSN_RETURN:
4489 if (!next_insn || next_insn->type != INSN_TRAP) {
4490 WARN_INSN(insn, "missing int3 after ret");
4491 warnings++;
4492 }
4493
4494 break;
4495 case INSN_JUMP_DYNAMIC:
4496 if (!next_insn || next_insn->type != INSN_TRAP) {
4497 WARN_INSN(insn, "missing int3 after indirect jump");
4498 warnings++;
4499 }
4500 break;
4501 default:
4502 break;
4503 }
4504 }
4505
4506 return warnings;
4507 }
4508
4509 static int validate_reachable_instructions(struct objtool_file *file)
4510 {
4511 struct instruction *insn;
4512
4513 if (file->ignore_unreachables)
4514 return 0;
4515
4516 for_each_insn(file, insn) {
4517 if (insn->visited || ignore_unreachable_insn(file, insn))
4518 continue;
4519
4520 WARN_INSN(insn, "unreachable instruction");
4521 return 1;
4522 }
4523
4524 return 0;
4525 }
4526
4527 int check(struct objtool_file *file)
4528 {
4529 int ret, warnings = 0;
4530
4531 arch_initial_func_cfi_state(&initial_func_cfi);
4532 init_cfi_state(&init_cfi);
4533 init_cfi_state(&func_cfi);
4534 set_func_state(&func_cfi);
4535
4536 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4537 goto out;
4538
4539 cfi_hash_add(&init_cfi);
4540 cfi_hash_add(&func_cfi);
4541
4542 ret = decode_sections(file);
4543 if (ret < 0)
4544 goto out;
4545
4546 warnings += ret;
4547
4548 if (!nr_insns)
4549 goto out;
4550
4551 if (opts.retpoline) {
4552 ret = validate_retpoline(file);
4553 if (ret < 0)
4554 return ret;
4555 warnings += ret;
4556 }
4557
4558 if (opts.stackval || opts.orc || opts.uaccess) {
4559 ret = validate_functions(file);
4560 if (ret < 0)
4561 goto out;
4562 warnings += ret;
4563
4564 ret = validate_unwind_hints(file, NULL);
4565 if (ret < 0)
4566 goto out;
4567 warnings += ret;
4568
4569 if (!warnings) {
4570 ret = validate_reachable_instructions(file);
4571 if (ret < 0)
4572 goto out;
4573 warnings += ret;
4574 }
4575
4576 } else if (opts.noinstr) {
4577 ret = validate_noinstr_sections(file);
4578 if (ret < 0)
4579 goto out;
4580 warnings += ret;
4581 }
4582
4583 if (opts.unret) {
4584 /*
4585 * Must be after validate_branch() and friends, it plays
4586 * further games with insn->visited.
4587 */
4588 ret = validate_unrets(file);
4589 if (ret < 0)
4590 return ret;
4591 warnings += ret;
4592 }
4593
4594 if (opts.ibt) {
4595 ret = validate_ibt(file);
4596 if (ret < 0)
4597 goto out;
4598 warnings += ret;
4599 }
4600
4601 if (opts.sls) {
4602 ret = validate_sls(file);
4603 if (ret < 0)
4604 goto out;
4605 warnings += ret;
4606 }
4607
4608 if (opts.static_call) {
4609 ret = create_static_call_sections(file);
4610 if (ret < 0)
4611 goto out;
4612 warnings += ret;
4613 }
4614
4615 if (opts.retpoline) {
4616 ret = create_retpoline_sites_sections(file);
4617 if (ret < 0)
4618 goto out;
4619 warnings += ret;
4620 }
4621
4622 if (opts.cfi) {
4623 ret = create_cfi_sections(file);
4624 if (ret < 0)
4625 goto out;
4626 warnings += ret;
4627 }
4628
4629 if (opts.rethunk) {
4630 ret = create_return_sites_sections(file);
4631 if (ret < 0)
4632 goto out;
4633 warnings += ret;
4634
4635 if (opts.hack_skylake) {
4636 ret = create_direct_call_sections(file);
4637 if (ret < 0)
4638 goto out;
4639 warnings += ret;
4640 }
4641 }
4642
4643 if (opts.mcount) {
4644 ret = create_mcount_loc_sections(file);
4645 if (ret < 0)
4646 goto out;
4647 warnings += ret;
4648 }
4649
4650 if (opts.prefix) {
4651 ret = add_prefix_symbols(file);
4652 if (ret < 0)
4653 return ret;
4654 warnings += ret;
4655 }
4656
4657 if (opts.ibt) {
4658 ret = create_ibt_endbr_seal_sections(file);
4659 if (ret < 0)
4660 goto out;
4661 warnings += ret;
4662 }
4663
4664 if (opts.orc && nr_insns) {
4665 ret = orc_create(file);
4666 if (ret < 0)
4667 goto out;
4668 warnings += ret;
4669 }
4670
4671
4672 if (opts.stats) {
4673 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4674 printf("nr_cfi: %ld\n", nr_cfi);
4675 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4676 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4677 }
4678
4679 out:
4680 /*
4681 * For now, don't fail the kernel build on fatal warnings. These
4682 * errors are still fairly common due to the growing matrix of
4683 * supported toolchains and their recent pace of change.
4684 */
4685 return 0;
4686 }