]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/objtool/check.c
objtool: Generate ORC data for __pfx code
[thirdparty/linux.git] / tools / objtool / check.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
4 */
5
6 #include <string.h>
7 #include <stdlib.h>
8 #include <inttypes.h>
9 #include <sys/mman.h>
10
11 #include <arch/elf.h>
12 #include <objtool/builtin.h>
13 #include <objtool/cfi.h>
14 #include <objtool/arch.h>
15 #include <objtool/check.h>
16 #include <objtool/special.h>
17 #include <objtool/warn.h>
18 #include <objtool/endianness.h>
19
20 #include <linux/objtool_types.h>
21 #include <linux/hashtable.h>
22 #include <linux/kernel.h>
23 #include <linux/static_call_types.h>
24
25 struct alternative {
26 struct alternative *next;
27 struct instruction *insn;
28 bool skip_orig;
29 };
30
31 static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
32
33 static struct cfi_init_state initial_func_cfi;
34 static struct cfi_state init_cfi;
35 static struct cfi_state func_cfi;
36
37 struct instruction *find_insn(struct objtool_file *file,
38 struct section *sec, unsigned long offset)
39 {
40 struct instruction *insn;
41
42 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
43 if (insn->sec == sec && insn->offset == offset)
44 return insn;
45 }
46
47 return NULL;
48 }
49
50 struct instruction *next_insn_same_sec(struct objtool_file *file,
51 struct instruction *insn)
52 {
53 if (insn->idx == INSN_CHUNK_MAX)
54 return find_insn(file, insn->sec, insn->offset + insn->len);
55
56 insn++;
57 if (!insn->len)
58 return NULL;
59
60 return insn;
61 }
62
63 static struct instruction *next_insn_same_func(struct objtool_file *file,
64 struct instruction *insn)
65 {
66 struct instruction *next = next_insn_same_sec(file, insn);
67 struct symbol *func = insn_func(insn);
68
69 if (!func)
70 return NULL;
71
72 if (next && insn_func(next) == func)
73 return next;
74
75 /* Check if we're already in the subfunction: */
76 if (func == func->cfunc)
77 return NULL;
78
79 /* Move to the subfunction: */
80 return find_insn(file, func->cfunc->sec, func->cfunc->offset);
81 }
82
83 static struct instruction *prev_insn_same_sec(struct objtool_file *file,
84 struct instruction *insn)
85 {
86 if (insn->idx == 0) {
87 if (insn->prev_len)
88 return find_insn(file, insn->sec, insn->offset - insn->prev_len);
89 return NULL;
90 }
91
92 return insn - 1;
93 }
94
95 static struct instruction *prev_insn_same_sym(struct objtool_file *file,
96 struct instruction *insn)
97 {
98 struct instruction *prev = prev_insn_same_sec(file, insn);
99
100 if (prev && insn_func(prev) == insn_func(insn))
101 return prev;
102
103 return NULL;
104 }
105
106 #define for_each_insn(file, insn) \
107 for (struct section *__sec, *__fake = (struct section *)1; \
108 __fake; __fake = NULL) \
109 for_each_sec(file, __sec) \
110 sec_for_each_insn(file, __sec, insn)
111
112 #define func_for_each_insn(file, func, insn) \
113 for (insn = find_insn(file, func->sec, func->offset); \
114 insn; \
115 insn = next_insn_same_func(file, insn))
116
117 #define sym_for_each_insn(file, sym, insn) \
118 for (insn = find_insn(file, sym->sec, sym->offset); \
119 insn && insn->offset < sym->offset + sym->len; \
120 insn = next_insn_same_sec(file, insn))
121
122 #define sym_for_each_insn_continue_reverse(file, sym, insn) \
123 for (insn = prev_insn_same_sec(file, insn); \
124 insn && insn->offset >= sym->offset; \
125 insn = prev_insn_same_sec(file, insn))
126
127 #define sec_for_each_insn_from(file, insn) \
128 for (; insn; insn = next_insn_same_sec(file, insn))
129
130 #define sec_for_each_insn_continue(file, insn) \
131 for (insn = next_insn_same_sec(file, insn); insn; \
132 insn = next_insn_same_sec(file, insn))
133
134 static inline struct symbol *insn_call_dest(struct instruction *insn)
135 {
136 if (insn->type == INSN_JUMP_DYNAMIC ||
137 insn->type == INSN_CALL_DYNAMIC)
138 return NULL;
139
140 return insn->_call_dest;
141 }
142
143 static inline struct reloc *insn_jump_table(struct instruction *insn)
144 {
145 if (insn->type == INSN_JUMP_DYNAMIC ||
146 insn->type == INSN_CALL_DYNAMIC)
147 return insn->_jump_table;
148
149 return NULL;
150 }
151
152 static bool is_jump_table_jump(struct instruction *insn)
153 {
154 struct alt_group *alt_group = insn->alt_group;
155
156 if (insn_jump_table(insn))
157 return true;
158
159 /* Retpoline alternative for a jump table? */
160 return alt_group && alt_group->orig_group &&
161 insn_jump_table(alt_group->orig_group->first_insn);
162 }
163
164 static bool is_sibling_call(struct instruction *insn)
165 {
166 /*
167 * Assume only STT_FUNC calls have jump-tables.
168 */
169 if (insn_func(insn)) {
170 /* An indirect jump is either a sibling call or a jump to a table. */
171 if (insn->type == INSN_JUMP_DYNAMIC)
172 return !is_jump_table_jump(insn);
173 }
174
175 /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
176 return (is_static_jump(insn) && insn_call_dest(insn));
177 }
178
179 /*
180 * This checks to see if the given function is a "noreturn" function.
181 *
182 * For global functions which are outside the scope of this object file, we
183 * have to keep a manual list of them.
184 *
185 * For local functions, we have to detect them manually by simply looking for
186 * the lack of a return instruction.
187 */
188 static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
189 int recursion)
190 {
191 int i;
192 struct instruction *insn;
193 bool empty = true;
194
195 /*
196 * Unfortunately these have to be hard coded because the noreturn
197 * attribute isn't provided in ELF data. Keep 'em sorted.
198 */
199 static const char * const global_noreturns[] = {
200 "__invalid_creds",
201 "__module_put_and_kthread_exit",
202 "__reiserfs_panic",
203 "__stack_chk_fail",
204 "__ubsan_handle_builtin_unreachable",
205 "arch_cpu_idle_dead",
206 "cpu_bringup_and_idle",
207 "cpu_startup_entry",
208 "do_exit",
209 "do_group_exit",
210 "do_task_dead",
211 "ex_handler_msr_mce",
212 "fortify_panic",
213 "kthread_complete_and_exit",
214 "kthread_exit",
215 "kunit_try_catch_throw",
216 "lbug_with_loc",
217 "machine_real_restart",
218 "make_task_dead",
219 "panic",
220 "rewind_stack_and_make_dead",
221 "sev_es_terminate",
222 "snp_abort",
223 "stop_this_cpu",
224 "usercopy_abort",
225 "xen_cpu_bringup_again",
226 "xen_start_kernel",
227 };
228
229 if (!func)
230 return false;
231
232 if (func->bind == STB_WEAK)
233 return false;
234
235 if (func->bind == STB_GLOBAL)
236 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
237 if (!strcmp(func->name, global_noreturns[i]))
238 return true;
239
240 if (!func->len)
241 return false;
242
243 insn = find_insn(file, func->sec, func->offset);
244 if (!insn || !insn_func(insn))
245 return false;
246
247 func_for_each_insn(file, func, insn) {
248 empty = false;
249
250 if (insn->type == INSN_RETURN)
251 return false;
252 }
253
254 if (empty)
255 return false;
256
257 /*
258 * A function can have a sibling call instead of a return. In that
259 * case, the function's dead-end status depends on whether the target
260 * of the sibling call returns.
261 */
262 func_for_each_insn(file, func, insn) {
263 if (is_sibling_call(insn)) {
264 struct instruction *dest = insn->jump_dest;
265
266 if (!dest)
267 /* sibling call to another file */
268 return false;
269
270 /* local sibling call */
271 if (recursion == 5) {
272 /*
273 * Infinite recursion: two functions have
274 * sibling calls to each other. This is a very
275 * rare case. It means they aren't dead ends.
276 */
277 return false;
278 }
279
280 return __dead_end_function(file, insn_func(dest), recursion+1);
281 }
282 }
283
284 return true;
285 }
286
287 static bool dead_end_function(struct objtool_file *file, struct symbol *func)
288 {
289 return __dead_end_function(file, func, 0);
290 }
291
292 static void init_cfi_state(struct cfi_state *cfi)
293 {
294 int i;
295
296 for (i = 0; i < CFI_NUM_REGS; i++) {
297 cfi->regs[i].base = CFI_UNDEFINED;
298 cfi->vals[i].base = CFI_UNDEFINED;
299 }
300 cfi->cfa.base = CFI_UNDEFINED;
301 cfi->drap_reg = CFI_UNDEFINED;
302 cfi->drap_offset = -1;
303 }
304
305 static void init_insn_state(struct objtool_file *file, struct insn_state *state,
306 struct section *sec)
307 {
308 memset(state, 0, sizeof(*state));
309 init_cfi_state(&state->cfi);
310
311 /*
312 * We need the full vmlinux for noinstr validation, otherwise we can
313 * not correctly determine insn_call_dest(insn)->sec (external symbols
314 * do not have a section).
315 */
316 if (opts.link && opts.noinstr && sec)
317 state->noinstr = sec->noinstr;
318 }
319
320 static struct cfi_state *cfi_alloc(void)
321 {
322 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
323 if (!cfi) {
324 WARN("calloc failed");
325 exit(1);
326 }
327 nr_cfi++;
328 return cfi;
329 }
330
331 static int cfi_bits;
332 static struct hlist_head *cfi_hash;
333
334 static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
335 {
336 return memcmp((void *)cfi1 + sizeof(cfi1->hash),
337 (void *)cfi2 + sizeof(cfi2->hash),
338 sizeof(struct cfi_state) - sizeof(struct hlist_node));
339 }
340
341 static inline u32 cfi_key(struct cfi_state *cfi)
342 {
343 return jhash((void *)cfi + sizeof(cfi->hash),
344 sizeof(*cfi) - sizeof(cfi->hash), 0);
345 }
346
347 static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
348 {
349 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
350 struct cfi_state *obj;
351
352 hlist_for_each_entry(obj, head, hash) {
353 if (!cficmp(cfi, obj)) {
354 nr_cfi_cache++;
355 return obj;
356 }
357 }
358
359 obj = cfi_alloc();
360 *obj = *cfi;
361 hlist_add_head(&obj->hash, head);
362
363 return obj;
364 }
365
366 static void cfi_hash_add(struct cfi_state *cfi)
367 {
368 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
369
370 hlist_add_head(&cfi->hash, head);
371 }
372
373 static void *cfi_hash_alloc(unsigned long size)
374 {
375 cfi_bits = max(10, ilog2(size));
376 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
377 PROT_READ|PROT_WRITE,
378 MAP_PRIVATE|MAP_ANON, -1, 0);
379 if (cfi_hash == (void *)-1L) {
380 WARN("mmap fail cfi_hash");
381 cfi_hash = NULL;
382 } else if (opts.stats) {
383 printf("cfi_bits: %d\n", cfi_bits);
384 }
385
386 return cfi_hash;
387 }
388
389 static unsigned long nr_insns;
390 static unsigned long nr_insns_visited;
391
392 /*
393 * Call the arch-specific instruction decoder for all the instructions and add
394 * them to the global instruction list.
395 */
396 static int decode_instructions(struct objtool_file *file)
397 {
398 struct section *sec;
399 struct symbol *func;
400 unsigned long offset;
401 struct instruction *insn;
402 int ret;
403
404 for_each_sec(file, sec) {
405 struct instruction *insns = NULL;
406 u8 prev_len = 0;
407 u8 idx = 0;
408
409 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
410 continue;
411
412 if (strcmp(sec->name, ".altinstr_replacement") &&
413 strcmp(sec->name, ".altinstr_aux") &&
414 strncmp(sec->name, ".discard.", 9))
415 sec->text = true;
416
417 if (!strcmp(sec->name, ".noinstr.text") ||
418 !strcmp(sec->name, ".entry.text") ||
419 !strcmp(sec->name, ".cpuidle.text") ||
420 !strncmp(sec->name, ".text.__x86.", 12))
421 sec->noinstr = true;
422
423 /*
424 * .init.text code is ran before userspace and thus doesn't
425 * strictly need retpolines, except for modules which are
426 * loaded late, they very much do need retpoline in their
427 * .init.text
428 */
429 if (!strcmp(sec->name, ".init.text") && !opts.module)
430 sec->init = true;
431
432 for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
433 if (!insns || idx == INSN_CHUNK_MAX) {
434 insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
435 if (!insns) {
436 WARN("malloc failed");
437 return -1;
438 }
439 idx = 0;
440 } else {
441 idx++;
442 }
443 insn = &insns[idx];
444 insn->idx = idx;
445
446 INIT_LIST_HEAD(&insn->call_node);
447 insn->sec = sec;
448 insn->offset = offset;
449 insn->prev_len = prev_len;
450
451 ret = arch_decode_instruction(file, sec, offset,
452 sec->sh.sh_size - offset,
453 insn);
454 if (ret)
455 return ret;
456
457 prev_len = insn->len;
458
459 /*
460 * By default, "ud2" is a dead end unless otherwise
461 * annotated, because GCC 7 inserts it for certain
462 * divide-by-zero cases.
463 */
464 if (insn->type == INSN_BUG)
465 insn->dead_end = true;
466
467 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
468 nr_insns++;
469 }
470
471 // printf("%s: last chunk used: %d\n", sec->name, (int)idx);
472
473 sec_for_each_sym(sec, func) {
474 if (func->type != STT_NOTYPE && func->type != STT_FUNC)
475 continue;
476
477 if (func->offset == sec->sh.sh_size) {
478 /* Heuristic: likely an "end" symbol */
479 if (func->type == STT_NOTYPE)
480 continue;
481 WARN("%s(): STT_FUNC at end of section",
482 func->name);
483 return -1;
484 }
485
486 if (func->return_thunk || func->alias != func)
487 continue;
488
489 if (!find_insn(file, sec, func->offset)) {
490 WARN("%s(): can't find starting instruction",
491 func->name);
492 return -1;
493 }
494
495 sym_for_each_insn(file, func, insn) {
496 insn->sym = func;
497 if (func->type == STT_FUNC &&
498 insn->type == INSN_ENDBR &&
499 list_empty(&insn->call_node)) {
500 if (insn->offset == func->offset) {
501 list_add_tail(&insn->call_node, &file->endbr_list);
502 file->nr_endbr++;
503 } else {
504 file->nr_endbr_int++;
505 }
506 }
507 }
508 }
509 }
510
511 if (opts.stats)
512 printf("nr_insns: %lu\n", nr_insns);
513
514 return 0;
515 }
516
517 /*
518 * Read the pv_ops[] .data table to find the static initialized values.
519 */
520 static int add_pv_ops(struct objtool_file *file, const char *symname)
521 {
522 struct symbol *sym, *func;
523 unsigned long off, end;
524 struct reloc *rel;
525 int idx;
526
527 sym = find_symbol_by_name(file->elf, symname);
528 if (!sym)
529 return 0;
530
531 off = sym->offset;
532 end = off + sym->len;
533 for (;;) {
534 rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
535 if (!rel)
536 break;
537
538 func = rel->sym;
539 if (func->type == STT_SECTION)
540 func = find_symbol_by_offset(rel->sym->sec, rel->addend);
541
542 idx = (rel->offset - sym->offset) / sizeof(unsigned long);
543
544 objtool_pv_add(file, idx, func);
545
546 off = rel->offset + 1;
547 if (off > end)
548 break;
549 }
550
551 return 0;
552 }
553
554 /*
555 * Allocate and initialize file->pv_ops[].
556 */
557 static int init_pv_ops(struct objtool_file *file)
558 {
559 static const char *pv_ops_tables[] = {
560 "pv_ops",
561 "xen_cpu_ops",
562 "xen_irq_ops",
563 "xen_mmu_ops",
564 NULL,
565 };
566 const char *pv_ops;
567 struct symbol *sym;
568 int idx, nr;
569
570 if (!opts.noinstr)
571 return 0;
572
573 file->pv_ops = NULL;
574
575 sym = find_symbol_by_name(file->elf, "pv_ops");
576 if (!sym)
577 return 0;
578
579 nr = sym->len / sizeof(unsigned long);
580 file->pv_ops = calloc(sizeof(struct pv_state), nr);
581 if (!file->pv_ops)
582 return -1;
583
584 for (idx = 0; idx < nr; idx++)
585 INIT_LIST_HEAD(&file->pv_ops[idx].targets);
586
587 for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
588 add_pv_ops(file, pv_ops);
589
590 return 0;
591 }
592
593 static struct instruction *find_last_insn(struct objtool_file *file,
594 struct section *sec)
595 {
596 struct instruction *insn = NULL;
597 unsigned int offset;
598 unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
599
600 for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
601 insn = find_insn(file, sec, offset);
602
603 return insn;
604 }
605
606 /*
607 * Mark "ud2" instructions and manually annotated dead ends.
608 */
609 static int add_dead_ends(struct objtool_file *file)
610 {
611 struct section *sec;
612 struct reloc *reloc;
613 struct instruction *insn;
614
615 /*
616 * Check for manually annotated dead ends.
617 */
618 sec = find_section_by_name(file->elf, ".rela.discard.unreachable");
619 if (!sec)
620 goto reachable;
621
622 list_for_each_entry(reloc, &sec->reloc_list, list) {
623 if (reloc->sym->type != STT_SECTION) {
624 WARN("unexpected relocation symbol type in %s", sec->name);
625 return -1;
626 }
627 insn = find_insn(file, reloc->sym->sec, reloc->addend);
628 if (insn)
629 insn = prev_insn_same_sec(file, insn);
630 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
631 insn = find_last_insn(file, reloc->sym->sec);
632 if (!insn) {
633 WARN("can't find unreachable insn at %s+0x%" PRIx64,
634 reloc->sym->sec->name, reloc->addend);
635 return -1;
636 }
637 } else {
638 WARN("can't find unreachable insn at %s+0x%" PRIx64,
639 reloc->sym->sec->name, reloc->addend);
640 return -1;
641 }
642
643 insn->dead_end = true;
644 }
645
646 reachable:
647 /*
648 * These manually annotated reachable checks are needed for GCC 4.4,
649 * where the Linux unreachable() macro isn't supported. In that case
650 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's
651 * not a dead end.
652 */
653 sec = find_section_by_name(file->elf, ".rela.discard.reachable");
654 if (!sec)
655 return 0;
656
657 list_for_each_entry(reloc, &sec->reloc_list, list) {
658 if (reloc->sym->type != STT_SECTION) {
659 WARN("unexpected relocation symbol type in %s", sec->name);
660 return -1;
661 }
662 insn = find_insn(file, reloc->sym->sec, reloc->addend);
663 if (insn)
664 insn = prev_insn_same_sec(file, insn);
665 else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
666 insn = find_last_insn(file, reloc->sym->sec);
667 if (!insn) {
668 WARN("can't find reachable insn at %s+0x%" PRIx64,
669 reloc->sym->sec->name, reloc->addend);
670 return -1;
671 }
672 } else {
673 WARN("can't find reachable insn at %s+0x%" PRIx64,
674 reloc->sym->sec->name, reloc->addend);
675 return -1;
676 }
677
678 insn->dead_end = false;
679 }
680
681 return 0;
682 }
683
684 static int create_static_call_sections(struct objtool_file *file)
685 {
686 struct section *sec;
687 struct static_call_site *site;
688 struct instruction *insn;
689 struct symbol *key_sym;
690 char *key_name, *tmp;
691 int idx;
692
693 sec = find_section_by_name(file->elf, ".static_call_sites");
694 if (sec) {
695 INIT_LIST_HEAD(&file->static_call_list);
696 WARN("file already has .static_call_sites section, skipping");
697 return 0;
698 }
699
700 if (list_empty(&file->static_call_list))
701 return 0;
702
703 idx = 0;
704 list_for_each_entry(insn, &file->static_call_list, call_node)
705 idx++;
706
707 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
708 sizeof(struct static_call_site), idx);
709 if (!sec)
710 return -1;
711
712 idx = 0;
713 list_for_each_entry(insn, &file->static_call_list, call_node) {
714
715 site = (struct static_call_site *)sec->data->d_buf + idx;
716 memset(site, 0, sizeof(struct static_call_site));
717
718 /* populate reloc for 'addr' */
719 if (elf_add_reloc_to_insn(file->elf, sec,
720 idx * sizeof(struct static_call_site),
721 R_X86_64_PC32,
722 insn->sec, insn->offset))
723 return -1;
724
725 /* find key symbol */
726 key_name = strdup(insn_call_dest(insn)->name);
727 if (!key_name) {
728 perror("strdup");
729 return -1;
730 }
731 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
732 STATIC_CALL_TRAMP_PREFIX_LEN)) {
733 WARN("static_call: trampoline name malformed: %s", key_name);
734 free(key_name);
735 return -1;
736 }
737 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
738 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
739
740 key_sym = find_symbol_by_name(file->elf, tmp);
741 if (!key_sym) {
742 if (!opts.module) {
743 WARN("static_call: can't find static_call_key symbol: %s", tmp);
744 free(key_name);
745 return -1;
746 }
747
748 /*
749 * For modules(), the key might not be exported, which
750 * means the module can make static calls but isn't
751 * allowed to change them.
752 *
753 * In that case we temporarily set the key to be the
754 * trampoline address. This is fixed up in
755 * static_call_add_module().
756 */
757 key_sym = insn_call_dest(insn);
758 }
759 free(key_name);
760
761 /* populate reloc for 'key' */
762 if (elf_add_reloc(file->elf, sec,
763 idx * sizeof(struct static_call_site) + 4,
764 R_X86_64_PC32, key_sym,
765 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
766 return -1;
767
768 idx++;
769 }
770
771 return 0;
772 }
773
774 static int create_retpoline_sites_sections(struct objtool_file *file)
775 {
776 struct instruction *insn;
777 struct section *sec;
778 int idx;
779
780 sec = find_section_by_name(file->elf, ".retpoline_sites");
781 if (sec) {
782 WARN("file already has .retpoline_sites, skipping");
783 return 0;
784 }
785
786 idx = 0;
787 list_for_each_entry(insn, &file->retpoline_call_list, call_node)
788 idx++;
789
790 if (!idx)
791 return 0;
792
793 sec = elf_create_section(file->elf, ".retpoline_sites", 0,
794 sizeof(int), idx);
795 if (!sec) {
796 WARN("elf_create_section: .retpoline_sites");
797 return -1;
798 }
799
800 idx = 0;
801 list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
802
803 int *site = (int *)sec->data->d_buf + idx;
804 *site = 0;
805
806 if (elf_add_reloc_to_insn(file->elf, sec,
807 idx * sizeof(int),
808 R_X86_64_PC32,
809 insn->sec, insn->offset)) {
810 WARN("elf_add_reloc_to_insn: .retpoline_sites");
811 return -1;
812 }
813
814 idx++;
815 }
816
817 return 0;
818 }
819
820 static int create_return_sites_sections(struct objtool_file *file)
821 {
822 struct instruction *insn;
823 struct section *sec;
824 int idx;
825
826 sec = find_section_by_name(file->elf, ".return_sites");
827 if (sec) {
828 WARN("file already has .return_sites, skipping");
829 return 0;
830 }
831
832 idx = 0;
833 list_for_each_entry(insn, &file->return_thunk_list, call_node)
834 idx++;
835
836 if (!idx)
837 return 0;
838
839 sec = elf_create_section(file->elf, ".return_sites", 0,
840 sizeof(int), idx);
841 if (!sec) {
842 WARN("elf_create_section: .return_sites");
843 return -1;
844 }
845
846 idx = 0;
847 list_for_each_entry(insn, &file->return_thunk_list, call_node) {
848
849 int *site = (int *)sec->data->d_buf + idx;
850 *site = 0;
851
852 if (elf_add_reloc_to_insn(file->elf, sec,
853 idx * sizeof(int),
854 R_X86_64_PC32,
855 insn->sec, insn->offset)) {
856 WARN("elf_add_reloc_to_insn: .return_sites");
857 return -1;
858 }
859
860 idx++;
861 }
862
863 return 0;
864 }
865
866 static int create_ibt_endbr_seal_sections(struct objtool_file *file)
867 {
868 struct instruction *insn;
869 struct section *sec;
870 int idx;
871
872 sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
873 if (sec) {
874 WARN("file already has .ibt_endbr_seal, skipping");
875 return 0;
876 }
877
878 idx = 0;
879 list_for_each_entry(insn, &file->endbr_list, call_node)
880 idx++;
881
882 if (opts.stats) {
883 printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
884 printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
885 printf("ibt: superfluous ENDBR: %d\n", idx);
886 }
887
888 if (!idx)
889 return 0;
890
891 sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
892 sizeof(int), idx);
893 if (!sec) {
894 WARN("elf_create_section: .ibt_endbr_seal");
895 return -1;
896 }
897
898 idx = 0;
899 list_for_each_entry(insn, &file->endbr_list, call_node) {
900
901 int *site = (int *)sec->data->d_buf + idx;
902 struct symbol *sym = insn->sym;
903 *site = 0;
904
905 if (opts.module && sym && sym->type == STT_FUNC &&
906 insn->offset == sym->offset &&
907 (!strcmp(sym->name, "init_module") ||
908 !strcmp(sym->name, "cleanup_module")))
909 WARN("%s(): not an indirect call target", sym->name);
910
911 if (elf_add_reloc_to_insn(file->elf, sec,
912 idx * sizeof(int),
913 R_X86_64_PC32,
914 insn->sec, insn->offset)) {
915 WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
916 return -1;
917 }
918
919 idx++;
920 }
921
922 return 0;
923 }
924
925 static int create_cfi_sections(struct objtool_file *file)
926 {
927 struct section *sec;
928 struct symbol *sym;
929 unsigned int *loc;
930 int idx;
931
932 sec = find_section_by_name(file->elf, ".cfi_sites");
933 if (sec) {
934 INIT_LIST_HEAD(&file->call_list);
935 WARN("file already has .cfi_sites section, skipping");
936 return 0;
937 }
938
939 idx = 0;
940 for_each_sym(file, sym) {
941 if (sym->type != STT_FUNC)
942 continue;
943
944 if (strncmp(sym->name, "__cfi_", 6))
945 continue;
946
947 idx++;
948 }
949
950 sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
951 if (!sec)
952 return -1;
953
954 idx = 0;
955 for_each_sym(file, sym) {
956 if (sym->type != STT_FUNC)
957 continue;
958
959 if (strncmp(sym->name, "__cfi_", 6))
960 continue;
961
962 loc = (unsigned int *)sec->data->d_buf + idx;
963 memset(loc, 0, sizeof(unsigned int));
964
965 if (elf_add_reloc_to_insn(file->elf, sec,
966 idx * sizeof(unsigned int),
967 R_X86_64_PC32,
968 sym->sec, sym->offset))
969 return -1;
970
971 idx++;
972 }
973
974 return 0;
975 }
976
977 static int create_mcount_loc_sections(struct objtool_file *file)
978 {
979 int addrsize = elf_class_addrsize(file->elf);
980 struct instruction *insn;
981 struct section *sec;
982 int idx;
983
984 sec = find_section_by_name(file->elf, "__mcount_loc");
985 if (sec) {
986 INIT_LIST_HEAD(&file->mcount_loc_list);
987 WARN("file already has __mcount_loc section, skipping");
988 return 0;
989 }
990
991 if (list_empty(&file->mcount_loc_list))
992 return 0;
993
994 idx = 0;
995 list_for_each_entry(insn, &file->mcount_loc_list, call_node)
996 idx++;
997
998 sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
999 if (!sec)
1000 return -1;
1001
1002 sec->sh.sh_addralign = addrsize;
1003
1004 idx = 0;
1005 list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
1006 void *loc;
1007
1008 loc = sec->data->d_buf + idx;
1009 memset(loc, 0, addrsize);
1010
1011 if (elf_add_reloc_to_insn(file->elf, sec, idx,
1012 addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
1013 insn->sec, insn->offset))
1014 return -1;
1015
1016 idx += addrsize;
1017 }
1018
1019 return 0;
1020 }
1021
1022 static int create_direct_call_sections(struct objtool_file *file)
1023 {
1024 struct instruction *insn;
1025 struct section *sec;
1026 unsigned int *loc;
1027 int idx;
1028
1029 sec = find_section_by_name(file->elf, ".call_sites");
1030 if (sec) {
1031 INIT_LIST_HEAD(&file->call_list);
1032 WARN("file already has .call_sites section, skipping");
1033 return 0;
1034 }
1035
1036 if (list_empty(&file->call_list))
1037 return 0;
1038
1039 idx = 0;
1040 list_for_each_entry(insn, &file->call_list, call_node)
1041 idx++;
1042
1043 sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
1044 if (!sec)
1045 return -1;
1046
1047 idx = 0;
1048 list_for_each_entry(insn, &file->call_list, call_node) {
1049
1050 loc = (unsigned int *)sec->data->d_buf + idx;
1051 memset(loc, 0, sizeof(unsigned int));
1052
1053 if (elf_add_reloc_to_insn(file->elf, sec,
1054 idx * sizeof(unsigned int),
1055 R_X86_64_PC32,
1056 insn->sec, insn->offset))
1057 return -1;
1058
1059 idx++;
1060 }
1061
1062 return 0;
1063 }
1064
1065 /*
1066 * Warnings shouldn't be reported for ignored functions.
1067 */
1068 static void add_ignores(struct objtool_file *file)
1069 {
1070 struct instruction *insn;
1071 struct section *sec;
1072 struct symbol *func;
1073 struct reloc *reloc;
1074
1075 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
1076 if (!sec)
1077 return;
1078
1079 list_for_each_entry(reloc, &sec->reloc_list, list) {
1080 switch (reloc->sym->type) {
1081 case STT_FUNC:
1082 func = reloc->sym;
1083 break;
1084
1085 case STT_SECTION:
1086 func = find_func_by_offset(reloc->sym->sec, reloc->addend);
1087 if (!func)
1088 continue;
1089 break;
1090
1091 default:
1092 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
1093 continue;
1094 }
1095
1096 func_for_each_insn(file, func, insn)
1097 insn->ignore = true;
1098 }
1099 }
1100
1101 /*
1102 * This is a whitelist of functions that is allowed to be called with AC set.
1103 * The list is meant to be minimal and only contains compiler instrumentation
1104 * ABI and a few functions used to implement *_{to,from}_user() functions.
1105 *
1106 * These functions must not directly change AC, but may PUSHF/POPF.
1107 */
1108 static const char *uaccess_safe_builtin[] = {
1109 /* KASAN */
1110 "kasan_report",
1111 "kasan_check_range",
1112 /* KASAN out-of-line */
1113 "__asan_loadN_noabort",
1114 "__asan_load1_noabort",
1115 "__asan_load2_noabort",
1116 "__asan_load4_noabort",
1117 "__asan_load8_noabort",
1118 "__asan_load16_noabort",
1119 "__asan_storeN_noabort",
1120 "__asan_store1_noabort",
1121 "__asan_store2_noabort",
1122 "__asan_store4_noabort",
1123 "__asan_store8_noabort",
1124 "__asan_store16_noabort",
1125 "__kasan_check_read",
1126 "__kasan_check_write",
1127 /* KASAN in-line */
1128 "__asan_report_load_n_noabort",
1129 "__asan_report_load1_noabort",
1130 "__asan_report_load2_noabort",
1131 "__asan_report_load4_noabort",
1132 "__asan_report_load8_noabort",
1133 "__asan_report_load16_noabort",
1134 "__asan_report_store_n_noabort",
1135 "__asan_report_store1_noabort",
1136 "__asan_report_store2_noabort",
1137 "__asan_report_store4_noabort",
1138 "__asan_report_store8_noabort",
1139 "__asan_report_store16_noabort",
1140 /* KCSAN */
1141 "__kcsan_check_access",
1142 "__kcsan_mb",
1143 "__kcsan_wmb",
1144 "__kcsan_rmb",
1145 "__kcsan_release",
1146 "kcsan_found_watchpoint",
1147 "kcsan_setup_watchpoint",
1148 "kcsan_check_scoped_accesses",
1149 "kcsan_disable_current",
1150 "kcsan_enable_current_nowarn",
1151 /* KCSAN/TSAN */
1152 "__tsan_func_entry",
1153 "__tsan_func_exit",
1154 "__tsan_read_range",
1155 "__tsan_write_range",
1156 "__tsan_read1",
1157 "__tsan_read2",
1158 "__tsan_read4",
1159 "__tsan_read8",
1160 "__tsan_read16",
1161 "__tsan_write1",
1162 "__tsan_write2",
1163 "__tsan_write4",
1164 "__tsan_write8",
1165 "__tsan_write16",
1166 "__tsan_read_write1",
1167 "__tsan_read_write2",
1168 "__tsan_read_write4",
1169 "__tsan_read_write8",
1170 "__tsan_read_write16",
1171 "__tsan_volatile_read1",
1172 "__tsan_volatile_read2",
1173 "__tsan_volatile_read4",
1174 "__tsan_volatile_read8",
1175 "__tsan_volatile_read16",
1176 "__tsan_volatile_write1",
1177 "__tsan_volatile_write2",
1178 "__tsan_volatile_write4",
1179 "__tsan_volatile_write8",
1180 "__tsan_volatile_write16",
1181 "__tsan_atomic8_load",
1182 "__tsan_atomic16_load",
1183 "__tsan_atomic32_load",
1184 "__tsan_atomic64_load",
1185 "__tsan_atomic8_store",
1186 "__tsan_atomic16_store",
1187 "__tsan_atomic32_store",
1188 "__tsan_atomic64_store",
1189 "__tsan_atomic8_exchange",
1190 "__tsan_atomic16_exchange",
1191 "__tsan_atomic32_exchange",
1192 "__tsan_atomic64_exchange",
1193 "__tsan_atomic8_fetch_add",
1194 "__tsan_atomic16_fetch_add",
1195 "__tsan_atomic32_fetch_add",
1196 "__tsan_atomic64_fetch_add",
1197 "__tsan_atomic8_fetch_sub",
1198 "__tsan_atomic16_fetch_sub",
1199 "__tsan_atomic32_fetch_sub",
1200 "__tsan_atomic64_fetch_sub",
1201 "__tsan_atomic8_fetch_and",
1202 "__tsan_atomic16_fetch_and",
1203 "__tsan_atomic32_fetch_and",
1204 "__tsan_atomic64_fetch_and",
1205 "__tsan_atomic8_fetch_or",
1206 "__tsan_atomic16_fetch_or",
1207 "__tsan_atomic32_fetch_or",
1208 "__tsan_atomic64_fetch_or",
1209 "__tsan_atomic8_fetch_xor",
1210 "__tsan_atomic16_fetch_xor",
1211 "__tsan_atomic32_fetch_xor",
1212 "__tsan_atomic64_fetch_xor",
1213 "__tsan_atomic8_fetch_nand",
1214 "__tsan_atomic16_fetch_nand",
1215 "__tsan_atomic32_fetch_nand",
1216 "__tsan_atomic64_fetch_nand",
1217 "__tsan_atomic8_compare_exchange_strong",
1218 "__tsan_atomic16_compare_exchange_strong",
1219 "__tsan_atomic32_compare_exchange_strong",
1220 "__tsan_atomic64_compare_exchange_strong",
1221 "__tsan_atomic8_compare_exchange_weak",
1222 "__tsan_atomic16_compare_exchange_weak",
1223 "__tsan_atomic32_compare_exchange_weak",
1224 "__tsan_atomic64_compare_exchange_weak",
1225 "__tsan_atomic8_compare_exchange_val",
1226 "__tsan_atomic16_compare_exchange_val",
1227 "__tsan_atomic32_compare_exchange_val",
1228 "__tsan_atomic64_compare_exchange_val",
1229 "__tsan_atomic_thread_fence",
1230 "__tsan_atomic_signal_fence",
1231 "__tsan_unaligned_read16",
1232 "__tsan_unaligned_write16",
1233 /* KCOV */
1234 "write_comp_data",
1235 "check_kcov_mode",
1236 "__sanitizer_cov_trace_pc",
1237 "__sanitizer_cov_trace_const_cmp1",
1238 "__sanitizer_cov_trace_const_cmp2",
1239 "__sanitizer_cov_trace_const_cmp4",
1240 "__sanitizer_cov_trace_const_cmp8",
1241 "__sanitizer_cov_trace_cmp1",
1242 "__sanitizer_cov_trace_cmp2",
1243 "__sanitizer_cov_trace_cmp4",
1244 "__sanitizer_cov_trace_cmp8",
1245 "__sanitizer_cov_trace_switch",
1246 /* KMSAN */
1247 "kmsan_copy_to_user",
1248 "kmsan_report",
1249 "kmsan_unpoison_entry_regs",
1250 "kmsan_unpoison_memory",
1251 "__msan_chain_origin",
1252 "__msan_get_context_state",
1253 "__msan_instrument_asm_store",
1254 "__msan_metadata_ptr_for_load_1",
1255 "__msan_metadata_ptr_for_load_2",
1256 "__msan_metadata_ptr_for_load_4",
1257 "__msan_metadata_ptr_for_load_8",
1258 "__msan_metadata_ptr_for_load_n",
1259 "__msan_metadata_ptr_for_store_1",
1260 "__msan_metadata_ptr_for_store_2",
1261 "__msan_metadata_ptr_for_store_4",
1262 "__msan_metadata_ptr_for_store_8",
1263 "__msan_metadata_ptr_for_store_n",
1264 "__msan_poison_alloca",
1265 "__msan_warning",
1266 /* UBSAN */
1267 "ubsan_type_mismatch_common",
1268 "__ubsan_handle_type_mismatch",
1269 "__ubsan_handle_type_mismatch_v1",
1270 "__ubsan_handle_shift_out_of_bounds",
1271 "__ubsan_handle_load_invalid_value",
1272 /* STACKLEAK */
1273 "stackleak_track_stack",
1274 /* misc */
1275 "csum_partial_copy_generic",
1276 "copy_mc_fragile",
1277 "copy_mc_fragile_handle_tail",
1278 "copy_mc_enhanced_fast_string",
1279 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
1280 "clear_user_erms",
1281 "clear_user_rep_good",
1282 "clear_user_original",
1283 NULL
1284 };
1285
1286 static void add_uaccess_safe(struct objtool_file *file)
1287 {
1288 struct symbol *func;
1289 const char **name;
1290
1291 if (!opts.uaccess)
1292 return;
1293
1294 for (name = uaccess_safe_builtin; *name; name++) {
1295 func = find_symbol_by_name(file->elf, *name);
1296 if (!func)
1297 continue;
1298
1299 func->uaccess_safe = true;
1300 }
1301 }
1302
1303 /*
1304 * FIXME: For now, just ignore any alternatives which add retpolines. This is
1305 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline.
1306 * But it at least allows objtool to understand the control flow *around* the
1307 * retpoline.
1308 */
1309 static int add_ignore_alternatives(struct objtool_file *file)
1310 {
1311 struct section *sec;
1312 struct reloc *reloc;
1313 struct instruction *insn;
1314
1315 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
1316 if (!sec)
1317 return 0;
1318
1319 list_for_each_entry(reloc, &sec->reloc_list, list) {
1320 if (reloc->sym->type != STT_SECTION) {
1321 WARN("unexpected relocation symbol type in %s", sec->name);
1322 return -1;
1323 }
1324
1325 insn = find_insn(file, reloc->sym->sec, reloc->addend);
1326 if (!insn) {
1327 WARN("bad .discard.ignore_alts entry");
1328 return -1;
1329 }
1330
1331 insn->ignore_alts = true;
1332 }
1333
1334 return 0;
1335 }
1336
1337 __weak bool arch_is_retpoline(struct symbol *sym)
1338 {
1339 return false;
1340 }
1341
1342 __weak bool arch_is_rethunk(struct symbol *sym)
1343 {
1344 return false;
1345 }
1346
1347 static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
1348 {
1349 struct reloc *reloc;
1350
1351 if (insn->no_reloc)
1352 return NULL;
1353
1354 if (!file)
1355 return NULL;
1356
1357 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
1358 insn->offset, insn->len);
1359 if (!reloc) {
1360 insn->no_reloc = 1;
1361 return NULL;
1362 }
1363
1364 return reloc;
1365 }
1366
1367 static void remove_insn_ops(struct instruction *insn)
1368 {
1369 struct stack_op *op, *next;
1370
1371 for (op = insn->stack_ops; op; op = next) {
1372 next = op->next;
1373 free(op);
1374 }
1375 insn->stack_ops = NULL;
1376 }
1377
1378 static void annotate_call_site(struct objtool_file *file,
1379 struct instruction *insn, bool sibling)
1380 {
1381 struct reloc *reloc = insn_reloc(file, insn);
1382 struct symbol *sym = insn_call_dest(insn);
1383
1384 if (!sym)
1385 sym = reloc->sym;
1386
1387 /*
1388 * Alternative replacement code is just template code which is
1389 * sometimes copied to the original instruction. For now, don't
1390 * annotate it. (In the future we might consider annotating the
1391 * original instruction if/when it ever makes sense to do so.)
1392 */
1393 if (!strcmp(insn->sec->name, ".altinstr_replacement"))
1394 return;
1395
1396 if (sym->static_call_tramp) {
1397 list_add_tail(&insn->call_node, &file->static_call_list);
1398 return;
1399 }
1400
1401 if (sym->retpoline_thunk) {
1402 list_add_tail(&insn->call_node, &file->retpoline_call_list);
1403 return;
1404 }
1405
1406 /*
1407 * Many compilers cannot disable KCOV or sanitizer calls with a function
1408 * attribute so they need a little help, NOP out any such calls from
1409 * noinstr text.
1410 */
1411 if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
1412 if (reloc) {
1413 reloc->type = R_NONE;
1414 elf_write_reloc(file->elf, reloc);
1415 }
1416
1417 elf_write_insn(file->elf, insn->sec,
1418 insn->offset, insn->len,
1419 sibling ? arch_ret_insn(insn->len)
1420 : arch_nop_insn(insn->len));
1421
1422 insn->type = sibling ? INSN_RETURN : INSN_NOP;
1423
1424 if (sibling) {
1425 /*
1426 * We've replaced the tail-call JMP insn by two new
1427 * insn: RET; INT3, except we only have a single struct
1428 * insn here. Mark it retpoline_safe to avoid the SLS
1429 * warning, instead of adding another insn.
1430 */
1431 insn->retpoline_safe = true;
1432 }
1433
1434 return;
1435 }
1436
1437 if (opts.mcount && sym->fentry) {
1438 if (sibling)
1439 WARN_INSN(insn, "tail call to __fentry__ !?!?");
1440 if (opts.mnop) {
1441 if (reloc) {
1442 reloc->type = R_NONE;
1443 elf_write_reloc(file->elf, reloc);
1444 }
1445
1446 elf_write_insn(file->elf, insn->sec,
1447 insn->offset, insn->len,
1448 arch_nop_insn(insn->len));
1449
1450 insn->type = INSN_NOP;
1451 }
1452
1453 list_add_tail(&insn->call_node, &file->mcount_loc_list);
1454 return;
1455 }
1456
1457 if (insn->type == INSN_CALL && !insn->sec->init)
1458 list_add_tail(&insn->call_node, &file->call_list);
1459
1460 if (!sibling && dead_end_function(file, sym))
1461 insn->dead_end = true;
1462 }
1463
1464 static void add_call_dest(struct objtool_file *file, struct instruction *insn,
1465 struct symbol *dest, bool sibling)
1466 {
1467 insn->_call_dest = dest;
1468 if (!dest)
1469 return;
1470
1471 /*
1472 * Whatever stack impact regular CALLs have, should be undone
1473 * by the RETURN of the called function.
1474 *
1475 * Annotated intra-function calls retain the stack_ops but
1476 * are converted to JUMP, see read_intra_function_calls().
1477 */
1478 remove_insn_ops(insn);
1479
1480 annotate_call_site(file, insn, sibling);
1481 }
1482
1483 static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
1484 {
1485 /*
1486 * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
1487 * so convert them accordingly.
1488 */
1489 switch (insn->type) {
1490 case INSN_CALL:
1491 insn->type = INSN_CALL_DYNAMIC;
1492 break;
1493 case INSN_JUMP_UNCONDITIONAL:
1494 insn->type = INSN_JUMP_DYNAMIC;
1495 break;
1496 case INSN_JUMP_CONDITIONAL:
1497 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
1498 break;
1499 default:
1500 return;
1501 }
1502
1503 insn->retpoline_safe = true;
1504
1505 /*
1506 * Whatever stack impact regular CALLs have, should be undone
1507 * by the RETURN of the called function.
1508 *
1509 * Annotated intra-function calls retain the stack_ops but
1510 * are converted to JUMP, see read_intra_function_calls().
1511 */
1512 remove_insn_ops(insn);
1513
1514 annotate_call_site(file, insn, false);
1515 }
1516
1517 static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
1518 {
1519 /*
1520 * Return thunk tail calls are really just returns in disguise,
1521 * so convert them accordingly.
1522 */
1523 insn->type = INSN_RETURN;
1524 insn->retpoline_safe = true;
1525
1526 if (add)
1527 list_add_tail(&insn->call_node, &file->return_thunk_list);
1528 }
1529
1530 static bool is_first_func_insn(struct objtool_file *file,
1531 struct instruction *insn, struct symbol *sym)
1532 {
1533 if (insn->offset == sym->offset)
1534 return true;
1535
1536 /* Allow direct CALL/JMP past ENDBR */
1537 if (opts.ibt) {
1538 struct instruction *prev = prev_insn_same_sym(file, insn);
1539
1540 if (prev && prev->type == INSN_ENDBR &&
1541 insn->offset == sym->offset + prev->len)
1542 return true;
1543 }
1544
1545 return false;
1546 }
1547
1548 /*
1549 * A sibling call is a tail-call to another symbol -- to differentiate from a
1550 * recursive tail-call which is to the same symbol.
1551 */
1552 static bool jump_is_sibling_call(struct objtool_file *file,
1553 struct instruction *from, struct instruction *to)
1554 {
1555 struct symbol *fs = from->sym;
1556 struct symbol *ts = to->sym;
1557
1558 /* Not a sibling call if from/to a symbol hole */
1559 if (!fs || !ts)
1560 return false;
1561
1562 /* Not a sibling call if not targeting the start of a symbol. */
1563 if (!is_first_func_insn(file, to, ts))
1564 return false;
1565
1566 /* Disallow sibling calls into STT_NOTYPE */
1567 if (ts->type == STT_NOTYPE)
1568 return false;
1569
1570 /* Must not be self to be a sibling */
1571 return fs->pfunc != ts->pfunc;
1572 }
1573
1574 /*
1575 * Find the destination instructions for all jumps.
1576 */
1577 static int add_jump_destinations(struct objtool_file *file)
1578 {
1579 struct instruction *insn, *jump_dest;
1580 struct reloc *reloc;
1581 struct section *dest_sec;
1582 unsigned long dest_off;
1583
1584 for_each_insn(file, insn) {
1585 if (insn->jump_dest) {
1586 /*
1587 * handle_group_alt() may have previously set
1588 * 'jump_dest' for some alternatives.
1589 */
1590 continue;
1591 }
1592 if (!is_static_jump(insn))
1593 continue;
1594
1595 reloc = insn_reloc(file, insn);
1596 if (!reloc) {
1597 dest_sec = insn->sec;
1598 dest_off = arch_jump_destination(insn);
1599 } else if (reloc->sym->type == STT_SECTION) {
1600 dest_sec = reloc->sym->sec;
1601 dest_off = arch_dest_reloc_offset(reloc->addend);
1602 } else if (reloc->sym->retpoline_thunk) {
1603 add_retpoline_call(file, insn);
1604 continue;
1605 } else if (reloc->sym->return_thunk) {
1606 add_return_call(file, insn, true);
1607 continue;
1608 } else if (insn_func(insn)) {
1609 /*
1610 * External sibling call or internal sibling call with
1611 * STT_FUNC reloc.
1612 */
1613 add_call_dest(file, insn, reloc->sym, true);
1614 continue;
1615 } else if (reloc->sym->sec->idx) {
1616 dest_sec = reloc->sym->sec;
1617 dest_off = reloc->sym->sym.st_value +
1618 arch_dest_reloc_offset(reloc->addend);
1619 } else {
1620 /* non-func asm code jumping to another file */
1621 continue;
1622 }
1623
1624 jump_dest = find_insn(file, dest_sec, dest_off);
1625 if (!jump_dest) {
1626 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
1627
1628 /*
1629 * This is a special case for zen_untrain_ret().
1630 * It jumps to __x86_return_thunk(), but objtool
1631 * can't find the thunk's starting RET
1632 * instruction, because the RET is also in the
1633 * middle of another instruction. Objtool only
1634 * knows about the outer instruction.
1635 */
1636 if (sym && sym->return_thunk) {
1637 add_return_call(file, insn, false);
1638 continue;
1639 }
1640
1641 WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
1642 dest_sec->name, dest_off);
1643 return -1;
1644 }
1645
1646 /*
1647 * Cross-function jump.
1648 */
1649 if (insn_func(insn) && insn_func(jump_dest) &&
1650 insn_func(insn) != insn_func(jump_dest)) {
1651
1652 /*
1653 * For GCC 8+, create parent/child links for any cold
1654 * subfunctions. This is _mostly_ redundant with a
1655 * similar initialization in read_symbols().
1656 *
1657 * If a function has aliases, we want the *first* such
1658 * function in the symbol table to be the subfunction's
1659 * parent. In that case we overwrite the
1660 * initialization done in read_symbols().
1661 *
1662 * However this code can't completely replace the
1663 * read_symbols() code because this doesn't detect the
1664 * case where the parent function's only reference to a
1665 * subfunction is through a jump table.
1666 */
1667 if (!strstr(insn_func(insn)->name, ".cold") &&
1668 strstr(insn_func(jump_dest)->name, ".cold")) {
1669 insn_func(insn)->cfunc = insn_func(jump_dest);
1670 insn_func(jump_dest)->pfunc = insn_func(insn);
1671 }
1672 }
1673
1674 if (jump_is_sibling_call(file, insn, jump_dest)) {
1675 /*
1676 * Internal sibling call without reloc or with
1677 * STT_SECTION reloc.
1678 */
1679 add_call_dest(file, insn, insn_func(jump_dest), true);
1680 continue;
1681 }
1682
1683 insn->jump_dest = jump_dest;
1684 }
1685
1686 return 0;
1687 }
1688
1689 static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
1690 {
1691 struct symbol *call_dest;
1692
1693 call_dest = find_func_by_offset(sec, offset);
1694 if (!call_dest)
1695 call_dest = find_symbol_by_offset(sec, offset);
1696
1697 return call_dest;
1698 }
1699
1700 /*
1701 * Find the destination instructions for all calls.
1702 */
1703 static int add_call_destinations(struct objtool_file *file)
1704 {
1705 struct instruction *insn;
1706 unsigned long dest_off;
1707 struct symbol *dest;
1708 struct reloc *reloc;
1709
1710 for_each_insn(file, insn) {
1711 if (insn->type != INSN_CALL)
1712 continue;
1713
1714 reloc = insn_reloc(file, insn);
1715 if (!reloc) {
1716 dest_off = arch_jump_destination(insn);
1717 dest = find_call_destination(insn->sec, dest_off);
1718
1719 add_call_dest(file, insn, dest, false);
1720
1721 if (insn->ignore)
1722 continue;
1723
1724 if (!insn_call_dest(insn)) {
1725 WARN_INSN(insn, "unannotated intra-function call");
1726 return -1;
1727 }
1728
1729 if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
1730 WARN_INSN(insn, "unsupported call to non-function");
1731 return -1;
1732 }
1733
1734 } else if (reloc->sym->type == STT_SECTION) {
1735 dest_off = arch_dest_reloc_offset(reloc->addend);
1736 dest = find_call_destination(reloc->sym->sec, dest_off);
1737 if (!dest) {
1738 WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
1739 reloc->sym->sec->name, dest_off);
1740 return -1;
1741 }
1742
1743 add_call_dest(file, insn, dest, false);
1744
1745 } else if (reloc->sym->retpoline_thunk) {
1746 add_retpoline_call(file, insn);
1747
1748 } else
1749 add_call_dest(file, insn, reloc->sym, false);
1750 }
1751
1752 return 0;
1753 }
1754
1755 /*
1756 * The .alternatives section requires some extra special care over and above
1757 * other special sections because alternatives are patched in place.
1758 */
1759 static int handle_group_alt(struct objtool_file *file,
1760 struct special_alt *special_alt,
1761 struct instruction *orig_insn,
1762 struct instruction **new_insn)
1763 {
1764 struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
1765 struct alt_group *orig_alt_group, *new_alt_group;
1766 unsigned long dest_off;
1767
1768 orig_alt_group = orig_insn->alt_group;
1769 if (!orig_alt_group) {
1770 struct instruction *last_orig_insn = NULL;
1771
1772 orig_alt_group = malloc(sizeof(*orig_alt_group));
1773 if (!orig_alt_group) {
1774 WARN("malloc failed");
1775 return -1;
1776 }
1777 orig_alt_group->cfi = calloc(special_alt->orig_len,
1778 sizeof(struct cfi_state *));
1779 if (!orig_alt_group->cfi) {
1780 WARN("calloc failed");
1781 return -1;
1782 }
1783
1784 insn = orig_insn;
1785 sec_for_each_insn_from(file, insn) {
1786 if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
1787 break;
1788
1789 insn->alt_group = orig_alt_group;
1790 last_orig_insn = insn;
1791 }
1792 orig_alt_group->orig_group = NULL;
1793 orig_alt_group->first_insn = orig_insn;
1794 orig_alt_group->last_insn = last_orig_insn;
1795 orig_alt_group->nop = NULL;
1796 } else {
1797 if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
1798 orig_alt_group->first_insn->offset != special_alt->orig_len) {
1799 WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
1800 orig_alt_group->last_insn->offset +
1801 orig_alt_group->last_insn->len -
1802 orig_alt_group->first_insn->offset,
1803 special_alt->orig_len);
1804 return -1;
1805 }
1806 }
1807
1808 new_alt_group = malloc(sizeof(*new_alt_group));
1809 if (!new_alt_group) {
1810 WARN("malloc failed");
1811 return -1;
1812 }
1813
1814 if (special_alt->new_len < special_alt->orig_len) {
1815 /*
1816 * Insert a fake nop at the end to make the replacement
1817 * alt_group the same size as the original. This is needed to
1818 * allow propagate_alt_cfi() to do its magic. When the last
1819 * instruction affects the stack, the instruction after it (the
1820 * nop) will propagate the new state to the shared CFI array.
1821 */
1822 nop = malloc(sizeof(*nop));
1823 if (!nop) {
1824 WARN("malloc failed");
1825 return -1;
1826 }
1827 memset(nop, 0, sizeof(*nop));
1828
1829 nop->sec = special_alt->new_sec;
1830 nop->offset = special_alt->new_off + special_alt->new_len;
1831 nop->len = special_alt->orig_len - special_alt->new_len;
1832 nop->type = INSN_NOP;
1833 nop->sym = orig_insn->sym;
1834 nop->alt_group = new_alt_group;
1835 nop->ignore = orig_insn->ignore_alts;
1836 }
1837
1838 if (!special_alt->new_len) {
1839 *new_insn = nop;
1840 goto end;
1841 }
1842
1843 insn = *new_insn;
1844 sec_for_each_insn_from(file, insn) {
1845 struct reloc *alt_reloc;
1846
1847 if (insn->offset >= special_alt->new_off + special_alt->new_len)
1848 break;
1849
1850 last_new_insn = insn;
1851
1852 insn->ignore = orig_insn->ignore_alts;
1853 insn->sym = orig_insn->sym;
1854 insn->alt_group = new_alt_group;
1855
1856 /*
1857 * Since alternative replacement code is copy/pasted by the
1858 * kernel after applying relocations, generally such code can't
1859 * have relative-address relocation references to outside the
1860 * .altinstr_replacement section, unless the arch's
1861 * alternatives code can adjust the relative offsets
1862 * accordingly.
1863 */
1864 alt_reloc = insn_reloc(file, insn);
1865 if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
1866 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
1867
1868 WARN_INSN(insn, "unsupported relocation in alternatives section");
1869 return -1;
1870 }
1871
1872 if (!is_static_jump(insn))
1873 continue;
1874
1875 if (!insn->immediate)
1876 continue;
1877
1878 dest_off = arch_jump_destination(insn);
1879 if (dest_off == special_alt->new_off + special_alt->new_len) {
1880 insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
1881 if (!insn->jump_dest) {
1882 WARN_INSN(insn, "can't find alternative jump destination");
1883 return -1;
1884 }
1885 }
1886 }
1887
1888 if (!last_new_insn) {
1889 WARN_FUNC("can't find last new alternative instruction",
1890 special_alt->new_sec, special_alt->new_off);
1891 return -1;
1892 }
1893
1894 end:
1895 new_alt_group->orig_group = orig_alt_group;
1896 new_alt_group->first_insn = *new_insn;
1897 new_alt_group->last_insn = last_new_insn;
1898 new_alt_group->nop = nop;
1899 new_alt_group->cfi = orig_alt_group->cfi;
1900 return 0;
1901 }
1902
1903 /*
1904 * A jump table entry can either convert a nop to a jump or a jump to a nop.
1905 * If the original instruction is a jump, make the alt entry an effective nop
1906 * by just skipping the original instruction.
1907 */
1908 static int handle_jump_alt(struct objtool_file *file,
1909 struct special_alt *special_alt,
1910 struct instruction *orig_insn,
1911 struct instruction **new_insn)
1912 {
1913 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
1914 orig_insn->type != INSN_NOP) {
1915
1916 WARN_INSN(orig_insn, "unsupported instruction at jump label");
1917 return -1;
1918 }
1919
1920 if (opts.hack_jump_label && special_alt->key_addend & 2) {
1921 struct reloc *reloc = insn_reloc(file, orig_insn);
1922
1923 if (reloc) {
1924 reloc->type = R_NONE;
1925 elf_write_reloc(file->elf, reloc);
1926 }
1927 elf_write_insn(file->elf, orig_insn->sec,
1928 orig_insn->offset, orig_insn->len,
1929 arch_nop_insn(orig_insn->len));
1930 orig_insn->type = INSN_NOP;
1931 }
1932
1933 if (orig_insn->type == INSN_NOP) {
1934 if (orig_insn->len == 2)
1935 file->jl_nop_short++;
1936 else
1937 file->jl_nop_long++;
1938
1939 return 0;
1940 }
1941
1942 if (orig_insn->len == 2)
1943 file->jl_short++;
1944 else
1945 file->jl_long++;
1946
1947 *new_insn = next_insn_same_sec(file, orig_insn);
1948 return 0;
1949 }
1950
1951 /*
1952 * Read all the special sections which have alternate instructions which can be
1953 * patched in or redirected to at runtime. Each instruction having alternate
1954 * instruction(s) has them added to its insn->alts list, which will be
1955 * traversed in validate_branch().
1956 */
1957 static int add_special_section_alts(struct objtool_file *file)
1958 {
1959 struct list_head special_alts;
1960 struct instruction *orig_insn, *new_insn;
1961 struct special_alt *special_alt, *tmp;
1962 struct alternative *alt;
1963 int ret;
1964
1965 ret = special_get_alts(file->elf, &special_alts);
1966 if (ret)
1967 return ret;
1968
1969 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) {
1970
1971 orig_insn = find_insn(file, special_alt->orig_sec,
1972 special_alt->orig_off);
1973 if (!orig_insn) {
1974 WARN_FUNC("special: can't find orig instruction",
1975 special_alt->orig_sec, special_alt->orig_off);
1976 ret = -1;
1977 goto out;
1978 }
1979
1980 new_insn = NULL;
1981 if (!special_alt->group || special_alt->new_len) {
1982 new_insn = find_insn(file, special_alt->new_sec,
1983 special_alt->new_off);
1984 if (!new_insn) {
1985 WARN_FUNC("special: can't find new instruction",
1986 special_alt->new_sec,
1987 special_alt->new_off);
1988 ret = -1;
1989 goto out;
1990 }
1991 }
1992
1993 if (special_alt->group) {
1994 if (!special_alt->orig_len) {
1995 WARN_INSN(orig_insn, "empty alternative entry");
1996 continue;
1997 }
1998
1999 ret = handle_group_alt(file, special_alt, orig_insn,
2000 &new_insn);
2001 if (ret)
2002 goto out;
2003 } else if (special_alt->jump_or_nop) {
2004 ret = handle_jump_alt(file, special_alt, orig_insn,
2005 &new_insn);
2006 if (ret)
2007 goto out;
2008 }
2009
2010 alt = malloc(sizeof(*alt));
2011 if (!alt) {
2012 WARN("malloc failed");
2013 ret = -1;
2014 goto out;
2015 }
2016
2017 alt->insn = new_insn;
2018 alt->skip_orig = special_alt->skip_orig;
2019 orig_insn->ignore_alts |= special_alt->skip_alt;
2020 alt->next = orig_insn->alts;
2021 orig_insn->alts = alt;
2022
2023 list_del(&special_alt->list);
2024 free(special_alt);
2025 }
2026
2027 if (opts.stats) {
2028 printf("jl\\\tNOP\tJMP\n");
2029 printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
2030 printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
2031 }
2032
2033 out:
2034 return ret;
2035 }
2036
2037 static int add_jump_table(struct objtool_file *file, struct instruction *insn,
2038 struct reloc *table)
2039 {
2040 struct reloc *reloc = table;
2041 struct instruction *dest_insn;
2042 struct alternative *alt;
2043 struct symbol *pfunc = insn_func(insn)->pfunc;
2044 unsigned int prev_offset = 0;
2045
2046 /*
2047 * Each @reloc is a switch table relocation which points to the target
2048 * instruction.
2049 */
2050 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
2051
2052 /* Check for the end of the table: */
2053 if (reloc != table && reloc->jump_table_start)
2054 break;
2055
2056 /* Make sure the table entries are consecutive: */
2057 if (prev_offset && reloc->offset != prev_offset + 8)
2058 break;
2059
2060 /* Detect function pointers from contiguous objects: */
2061 if (reloc->sym->sec == pfunc->sec &&
2062 reloc->addend == pfunc->offset)
2063 break;
2064
2065 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
2066 if (!dest_insn)
2067 break;
2068
2069 /* Make sure the destination is in the same function: */
2070 if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
2071 break;
2072
2073 alt = malloc(sizeof(*alt));
2074 if (!alt) {
2075 WARN("malloc failed");
2076 return -1;
2077 }
2078
2079 alt->insn = dest_insn;
2080 alt->next = insn->alts;
2081 insn->alts = alt;
2082 prev_offset = reloc->offset;
2083 }
2084
2085 if (!prev_offset) {
2086 WARN_INSN(insn, "can't find switch jump table");
2087 return -1;
2088 }
2089
2090 return 0;
2091 }
2092
2093 /*
2094 * find_jump_table() - Given a dynamic jump, find the switch jump table
2095 * associated with it.
2096 */
2097 static struct reloc *find_jump_table(struct objtool_file *file,
2098 struct symbol *func,
2099 struct instruction *insn)
2100 {
2101 struct reloc *table_reloc;
2102 struct instruction *dest_insn, *orig_insn = insn;
2103
2104 /*
2105 * Backward search using the @first_jump_src links, these help avoid
2106 * much of the 'in between' code. Which avoids us getting confused by
2107 * it.
2108 */
2109 for (;
2110 insn && insn_func(insn) && insn_func(insn)->pfunc == func;
2111 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
2112
2113 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
2114 break;
2115
2116 /* allow small jumps within the range */
2117 if (insn->type == INSN_JUMP_UNCONDITIONAL &&
2118 insn->jump_dest &&
2119 (insn->jump_dest->offset <= insn->offset ||
2120 insn->jump_dest->offset > orig_insn->offset))
2121 break;
2122
2123 table_reloc = arch_find_switch_table(file, insn);
2124 if (!table_reloc)
2125 continue;
2126 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
2127 if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
2128 continue;
2129
2130 return table_reloc;
2131 }
2132
2133 return NULL;
2134 }
2135
2136 /*
2137 * First pass: Mark the head of each jump table so that in the next pass,
2138 * we know when a given jump table ends and the next one starts.
2139 */
2140 static void mark_func_jump_tables(struct objtool_file *file,
2141 struct symbol *func)
2142 {
2143 struct instruction *insn, *last = NULL;
2144 struct reloc *reloc;
2145
2146 func_for_each_insn(file, func, insn) {
2147 if (!last)
2148 last = insn;
2149
2150 /*
2151 * Store back-pointers for unconditional forward jumps such
2152 * that find_jump_table() can back-track using those and
2153 * avoid some potentially confusing code.
2154 */
2155 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest &&
2156 insn->offset > last->offset &&
2157 insn->jump_dest->offset > insn->offset &&
2158 !insn->jump_dest->first_jump_src) {
2159
2160 insn->jump_dest->first_jump_src = insn;
2161 last = insn->jump_dest;
2162 }
2163
2164 if (insn->type != INSN_JUMP_DYNAMIC)
2165 continue;
2166
2167 reloc = find_jump_table(file, func, insn);
2168 if (reloc) {
2169 reloc->jump_table_start = true;
2170 insn->_jump_table = reloc;
2171 }
2172 }
2173 }
2174
2175 static int add_func_jump_tables(struct objtool_file *file,
2176 struct symbol *func)
2177 {
2178 struct instruction *insn;
2179 int ret;
2180
2181 func_for_each_insn(file, func, insn) {
2182 if (!insn_jump_table(insn))
2183 continue;
2184
2185 ret = add_jump_table(file, insn, insn_jump_table(insn));
2186 if (ret)
2187 return ret;
2188 }
2189
2190 return 0;
2191 }
2192
2193 /*
2194 * For some switch statements, gcc generates a jump table in the .rodata
2195 * section which contains a list of addresses within the function to jump to.
2196 * This finds these jump tables and adds them to the insn->alts lists.
2197 */
2198 static int add_jump_table_alts(struct objtool_file *file)
2199 {
2200 struct symbol *func;
2201 int ret;
2202
2203 if (!file->rodata)
2204 return 0;
2205
2206 for_each_sym(file, func) {
2207 if (func->type != STT_FUNC)
2208 continue;
2209
2210 mark_func_jump_tables(file, func);
2211 ret = add_func_jump_tables(file, func);
2212 if (ret)
2213 return ret;
2214 }
2215
2216 return 0;
2217 }
2218
2219 static void set_func_state(struct cfi_state *state)
2220 {
2221 state->cfa = initial_func_cfi.cfa;
2222 memcpy(&state->regs, &initial_func_cfi.regs,
2223 CFI_NUM_REGS * sizeof(struct cfi_reg));
2224 state->stack_size = initial_func_cfi.cfa.offset;
2225 state->type = UNWIND_HINT_TYPE_CALL;
2226 }
2227
2228 static int read_unwind_hints(struct objtool_file *file)
2229 {
2230 struct cfi_state cfi = init_cfi;
2231 struct section *sec, *relocsec;
2232 struct unwind_hint *hint;
2233 struct instruction *insn;
2234 struct reloc *reloc;
2235 int i;
2236
2237 sec = find_section_by_name(file->elf, ".discard.unwind_hints");
2238 if (!sec)
2239 return 0;
2240
2241 relocsec = sec->reloc;
2242 if (!relocsec) {
2243 WARN("missing .rela.discard.unwind_hints section");
2244 return -1;
2245 }
2246
2247 if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
2248 WARN("struct unwind_hint size mismatch");
2249 return -1;
2250 }
2251
2252 file->hints = true;
2253
2254 for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
2255 hint = (struct unwind_hint *)sec->data->d_buf + i;
2256
2257 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
2258 if (!reloc) {
2259 WARN("can't find reloc for unwind_hints[%d]", i);
2260 return -1;
2261 }
2262
2263 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2264 if (!insn) {
2265 WARN("can't find insn for unwind_hints[%d]", i);
2266 return -1;
2267 }
2268
2269 insn->hint = true;
2270
2271 if (hint->type == UNWIND_HINT_TYPE_SAVE) {
2272 insn->hint = false;
2273 insn->save = true;
2274 continue;
2275 }
2276
2277 if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
2278 insn->restore = true;
2279 continue;
2280 }
2281
2282 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
2283 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
2284
2285 if (sym && sym->bind == STB_GLOBAL) {
2286 if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
2287 WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
2288 }
2289 }
2290 }
2291
2292 if (hint->type == UNWIND_HINT_TYPE_FUNC) {
2293 insn->cfi = &func_cfi;
2294 continue;
2295 }
2296
2297 if (insn->cfi)
2298 cfi = *(insn->cfi);
2299
2300 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
2301 WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
2302 return -1;
2303 }
2304
2305 cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
2306 cfi.type = hint->type;
2307 cfi.signal = hint->signal;
2308
2309 insn->cfi = cfi_hash_find_or_add(&cfi);
2310 }
2311
2312 return 0;
2313 }
2314
2315 static int read_noendbr_hints(struct objtool_file *file)
2316 {
2317 struct section *sec;
2318 struct instruction *insn;
2319 struct reloc *reloc;
2320
2321 sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
2322 if (!sec)
2323 return 0;
2324
2325 list_for_each_entry(reloc, &sec->reloc_list, list) {
2326 insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
2327 if (!insn) {
2328 WARN("bad .discard.noendbr entry");
2329 return -1;
2330 }
2331
2332 insn->noendbr = 1;
2333 }
2334
2335 return 0;
2336 }
2337
2338 static int read_retpoline_hints(struct objtool_file *file)
2339 {
2340 struct section *sec;
2341 struct instruction *insn;
2342 struct reloc *reloc;
2343
2344 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
2345 if (!sec)
2346 return 0;
2347
2348 list_for_each_entry(reloc, &sec->reloc_list, list) {
2349 if (reloc->sym->type != STT_SECTION) {
2350 WARN("unexpected relocation symbol type in %s", sec->name);
2351 return -1;
2352 }
2353
2354 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2355 if (!insn) {
2356 WARN("bad .discard.retpoline_safe entry");
2357 return -1;
2358 }
2359
2360 if (insn->type != INSN_JUMP_DYNAMIC &&
2361 insn->type != INSN_CALL_DYNAMIC &&
2362 insn->type != INSN_RETURN &&
2363 insn->type != INSN_NOP) {
2364 WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
2365 return -1;
2366 }
2367
2368 insn->retpoline_safe = true;
2369 }
2370
2371 return 0;
2372 }
2373
2374 static int read_instr_hints(struct objtool_file *file)
2375 {
2376 struct section *sec;
2377 struct instruction *insn;
2378 struct reloc *reloc;
2379
2380 sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
2381 if (!sec)
2382 return 0;
2383
2384 list_for_each_entry(reloc, &sec->reloc_list, list) {
2385 if (reloc->sym->type != STT_SECTION) {
2386 WARN("unexpected relocation symbol type in %s", sec->name);
2387 return -1;
2388 }
2389
2390 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2391 if (!insn) {
2392 WARN("bad .discard.instr_end entry");
2393 return -1;
2394 }
2395
2396 insn->instr--;
2397 }
2398
2399 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
2400 if (!sec)
2401 return 0;
2402
2403 list_for_each_entry(reloc, &sec->reloc_list, list) {
2404 if (reloc->sym->type != STT_SECTION) {
2405 WARN("unexpected relocation symbol type in %s", sec->name);
2406 return -1;
2407 }
2408
2409 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2410 if (!insn) {
2411 WARN("bad .discard.instr_begin entry");
2412 return -1;
2413 }
2414
2415 insn->instr++;
2416 }
2417
2418 return 0;
2419 }
2420
2421 static int read_validate_unret_hints(struct objtool_file *file)
2422 {
2423 struct section *sec;
2424 struct instruction *insn;
2425 struct reloc *reloc;
2426
2427 sec = find_section_by_name(file->elf, ".rela.discard.validate_unret");
2428 if (!sec)
2429 return 0;
2430
2431 list_for_each_entry(reloc, &sec->reloc_list, list) {
2432 if (reloc->sym->type != STT_SECTION) {
2433 WARN("unexpected relocation symbol type in %s", sec->name);
2434 return -1;
2435 }
2436
2437 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2438 if (!insn) {
2439 WARN("bad .discard.instr_end entry");
2440 return -1;
2441 }
2442 insn->unret = 1;
2443 }
2444
2445 return 0;
2446 }
2447
2448
2449 static int read_intra_function_calls(struct objtool_file *file)
2450 {
2451 struct instruction *insn;
2452 struct section *sec;
2453 struct reloc *reloc;
2454
2455 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
2456 if (!sec)
2457 return 0;
2458
2459 list_for_each_entry(reloc, &sec->reloc_list, list) {
2460 unsigned long dest_off;
2461
2462 if (reloc->sym->type != STT_SECTION) {
2463 WARN("unexpected relocation symbol type in %s",
2464 sec->name);
2465 return -1;
2466 }
2467
2468 insn = find_insn(file, reloc->sym->sec, reloc->addend);
2469 if (!insn) {
2470 WARN("bad .discard.intra_function_call entry");
2471 return -1;
2472 }
2473
2474 if (insn->type != INSN_CALL) {
2475 WARN_INSN(insn, "intra_function_call not a direct call");
2476 return -1;
2477 }
2478
2479 /*
2480 * Treat intra-function CALLs as JMPs, but with a stack_op.
2481 * See add_call_destinations(), which strips stack_ops from
2482 * normal CALLs.
2483 */
2484 insn->type = INSN_JUMP_UNCONDITIONAL;
2485
2486 dest_off = arch_jump_destination(insn);
2487 insn->jump_dest = find_insn(file, insn->sec, dest_off);
2488 if (!insn->jump_dest) {
2489 WARN_INSN(insn, "can't find call dest at %s+0x%lx",
2490 insn->sec->name, dest_off);
2491 return -1;
2492 }
2493 }
2494
2495 return 0;
2496 }
2497
2498 /*
2499 * Return true if name matches an instrumentation function, where calls to that
2500 * function from noinstr code can safely be removed, but compilers won't do so.
2501 */
2502 static bool is_profiling_func(const char *name)
2503 {
2504 /*
2505 * Many compilers cannot disable KCOV with a function attribute.
2506 */
2507 if (!strncmp(name, "__sanitizer_cov_", 16))
2508 return true;
2509
2510 /*
2511 * Some compilers currently do not remove __tsan_func_entry/exit nor
2512 * __tsan_atomic_signal_fence (used for barrier instrumentation) with
2513 * the __no_sanitize_thread attribute, remove them. Once the kernel's
2514 * minimum Clang version is 14.0, this can be removed.
2515 */
2516 if (!strncmp(name, "__tsan_func_", 12) ||
2517 !strcmp(name, "__tsan_atomic_signal_fence"))
2518 return true;
2519
2520 return false;
2521 }
2522
2523 static int classify_symbols(struct objtool_file *file)
2524 {
2525 struct symbol *func;
2526
2527 for_each_sym(file, func) {
2528 if (func->bind != STB_GLOBAL)
2529 continue;
2530
2531 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
2532 strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
2533 func->static_call_tramp = true;
2534
2535 if (arch_is_retpoline(func))
2536 func->retpoline_thunk = true;
2537
2538 if (arch_is_rethunk(func))
2539 func->return_thunk = true;
2540
2541 if (arch_ftrace_match(func->name))
2542 func->fentry = true;
2543
2544 if (is_profiling_func(func->name))
2545 func->profiling_func = true;
2546 }
2547
2548 return 0;
2549 }
2550
2551 static void mark_rodata(struct objtool_file *file)
2552 {
2553 struct section *sec;
2554 bool found = false;
2555
2556 /*
2557 * Search for the following rodata sections, each of which can
2558 * potentially contain jump tables:
2559 *
2560 * - .rodata: can contain GCC switch tables
2561 * - .rodata.<func>: same, if -fdata-sections is being used
2562 * - .rodata..c_jump_table: contains C annotated jump tables
2563 *
2564 * .rodata.str1.* sections are ignored; they don't contain jump tables.
2565 */
2566 for_each_sec(file, sec) {
2567 if (!strncmp(sec->name, ".rodata", 7) &&
2568 !strstr(sec->name, ".str1.")) {
2569 sec->rodata = true;
2570 found = true;
2571 }
2572 }
2573
2574 file->rodata = found;
2575 }
2576
2577 static int decode_sections(struct objtool_file *file)
2578 {
2579 int ret;
2580
2581 mark_rodata(file);
2582
2583 ret = init_pv_ops(file);
2584 if (ret)
2585 return ret;
2586
2587 /*
2588 * Must be before add_{jump_call}_destination.
2589 */
2590 ret = classify_symbols(file);
2591 if (ret)
2592 return ret;
2593
2594 ret = decode_instructions(file);
2595 if (ret)
2596 return ret;
2597
2598 add_ignores(file);
2599 add_uaccess_safe(file);
2600
2601 ret = add_ignore_alternatives(file);
2602 if (ret)
2603 return ret;
2604
2605 /*
2606 * Must be before read_unwind_hints() since that needs insn->noendbr.
2607 */
2608 ret = read_noendbr_hints(file);
2609 if (ret)
2610 return ret;
2611
2612 /*
2613 * Must be before add_jump_destinations(), which depends on 'func'
2614 * being set for alternatives, to enable proper sibling call detection.
2615 */
2616 if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
2617 ret = add_special_section_alts(file);
2618 if (ret)
2619 return ret;
2620 }
2621
2622 ret = add_jump_destinations(file);
2623 if (ret)
2624 return ret;
2625
2626 /*
2627 * Must be before add_call_destination(); it changes INSN_CALL to
2628 * INSN_JUMP.
2629 */
2630 ret = read_intra_function_calls(file);
2631 if (ret)
2632 return ret;
2633
2634 ret = add_call_destinations(file);
2635 if (ret)
2636 return ret;
2637
2638 /*
2639 * Must be after add_call_destinations() such that it can override
2640 * dead_end_function() marks.
2641 */
2642 ret = add_dead_ends(file);
2643 if (ret)
2644 return ret;
2645
2646 ret = add_jump_table_alts(file);
2647 if (ret)
2648 return ret;
2649
2650 ret = read_unwind_hints(file);
2651 if (ret)
2652 return ret;
2653
2654 ret = read_retpoline_hints(file);
2655 if (ret)
2656 return ret;
2657
2658 ret = read_instr_hints(file);
2659 if (ret)
2660 return ret;
2661
2662 ret = read_validate_unret_hints(file);
2663 if (ret)
2664 return ret;
2665
2666 return 0;
2667 }
2668
2669 static bool is_fentry_call(struct instruction *insn)
2670 {
2671 if (insn->type == INSN_CALL &&
2672 insn_call_dest(insn) &&
2673 insn_call_dest(insn)->fentry)
2674 return true;
2675
2676 return false;
2677 }
2678
2679 static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
2680 {
2681 struct cfi_state *cfi = &state->cfi;
2682 int i;
2683
2684 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
2685 return true;
2686
2687 if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
2688 return true;
2689
2690 if (cfi->stack_size != initial_func_cfi.cfa.offset)
2691 return true;
2692
2693 for (i = 0; i < CFI_NUM_REGS; i++) {
2694 if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
2695 cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
2696 return true;
2697 }
2698
2699 return false;
2700 }
2701
2702 static bool check_reg_frame_pos(const struct cfi_reg *reg,
2703 int expected_offset)
2704 {
2705 return reg->base == CFI_CFA &&
2706 reg->offset == expected_offset;
2707 }
2708
2709 static bool has_valid_stack_frame(struct insn_state *state)
2710 {
2711 struct cfi_state *cfi = &state->cfi;
2712
2713 if (cfi->cfa.base == CFI_BP &&
2714 check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
2715 check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
2716 return true;
2717
2718 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
2719 return true;
2720
2721 return false;
2722 }
2723
2724 static int update_cfi_state_regs(struct instruction *insn,
2725 struct cfi_state *cfi,
2726 struct stack_op *op)
2727 {
2728 struct cfi_reg *cfa = &cfi->cfa;
2729
2730 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
2731 return 0;
2732
2733 /* push */
2734 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF)
2735 cfa->offset += 8;
2736
2737 /* pop */
2738 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF)
2739 cfa->offset -= 8;
2740
2741 /* add immediate to sp */
2742 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD &&
2743 op->dest.reg == CFI_SP && op->src.reg == CFI_SP)
2744 cfa->offset -= op->src.offset;
2745
2746 return 0;
2747 }
2748
2749 static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
2750 {
2751 if (arch_callee_saved_reg(reg) &&
2752 cfi->regs[reg].base == CFI_UNDEFINED) {
2753 cfi->regs[reg].base = base;
2754 cfi->regs[reg].offset = offset;
2755 }
2756 }
2757
2758 static void restore_reg(struct cfi_state *cfi, unsigned char reg)
2759 {
2760 cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
2761 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
2762 }
2763
2764 /*
2765 * A note about DRAP stack alignment:
2766 *
2767 * GCC has the concept of a DRAP register, which is used to help keep track of
2768 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP
2769 * register. The typical DRAP pattern is:
2770 *
2771 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10
2772 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp
2773 * 41 ff 72 f8 pushq -0x8(%r10)
2774 * 55 push %rbp
2775 * 48 89 e5 mov %rsp,%rbp
2776 * (more pushes)
2777 * 41 52 push %r10
2778 * ...
2779 * 41 5a pop %r10
2780 * (more pops)
2781 * 5d pop %rbp
2782 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2783 * c3 retq
2784 *
2785 * There are some variations in the epilogues, like:
2786 *
2787 * 5b pop %rbx
2788 * 41 5a pop %r10
2789 * 41 5c pop %r12
2790 * 41 5d pop %r13
2791 * 41 5e pop %r14
2792 * c9 leaveq
2793 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2794 * c3 retq
2795 *
2796 * and:
2797 *
2798 * 4c 8b 55 e8 mov -0x18(%rbp),%r10
2799 * 48 8b 5d e0 mov -0x20(%rbp),%rbx
2800 * 4c 8b 65 f0 mov -0x10(%rbp),%r12
2801 * 4c 8b 6d f8 mov -0x8(%rbp),%r13
2802 * c9 leaveq
2803 * 49 8d 62 f8 lea -0x8(%r10),%rsp
2804 * c3 retq
2805 *
2806 * Sometimes r13 is used as the DRAP register, in which case it's saved and
2807 * restored beforehand:
2808 *
2809 * 41 55 push %r13
2810 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13
2811 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp
2812 * ...
2813 * 49 8d 65 f0 lea -0x10(%r13),%rsp
2814 * 41 5d pop %r13
2815 * c3 retq
2816 */
2817 static int update_cfi_state(struct instruction *insn,
2818 struct instruction *next_insn,
2819 struct cfi_state *cfi, struct stack_op *op)
2820 {
2821 struct cfi_reg *cfa = &cfi->cfa;
2822 struct cfi_reg *regs = cfi->regs;
2823
2824 /* stack operations don't make sense with an undefined CFA */
2825 if (cfa->base == CFI_UNDEFINED) {
2826 if (insn_func(insn)) {
2827 WARN_INSN(insn, "undefined stack state");
2828 return -1;
2829 }
2830 return 0;
2831 }
2832
2833 if (cfi->type == UNWIND_HINT_TYPE_REGS ||
2834 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
2835 return update_cfi_state_regs(insn, cfi, op);
2836
2837 switch (op->dest.type) {
2838
2839 case OP_DEST_REG:
2840 switch (op->src.type) {
2841
2842 case OP_SRC_REG:
2843 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
2844 cfa->base == CFI_SP &&
2845 check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
2846
2847 /* mov %rsp, %rbp */
2848 cfa->base = op->dest.reg;
2849 cfi->bp_scratch = false;
2850 }
2851
2852 else if (op->src.reg == CFI_SP &&
2853 op->dest.reg == CFI_BP && cfi->drap) {
2854
2855 /* drap: mov %rsp, %rbp */
2856 regs[CFI_BP].base = CFI_BP;
2857 regs[CFI_BP].offset = -cfi->stack_size;
2858 cfi->bp_scratch = false;
2859 }
2860
2861 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2862
2863 /*
2864 * mov %rsp, %reg
2865 *
2866 * This is needed for the rare case where GCC
2867 * does:
2868 *
2869 * mov %rsp, %rax
2870 * ...
2871 * mov %rax, %rsp
2872 */
2873 cfi->vals[op->dest.reg].base = CFI_CFA;
2874 cfi->vals[op->dest.reg].offset = -cfi->stack_size;
2875 }
2876
2877 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
2878 (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
2879
2880 /*
2881 * mov %rbp, %rsp
2882 *
2883 * Restore the original stack pointer (Clang).
2884 */
2885 cfi->stack_size = -cfi->regs[CFI_BP].offset;
2886 }
2887
2888 else if (op->dest.reg == cfa->base) {
2889
2890 /* mov %reg, %rsp */
2891 if (cfa->base == CFI_SP &&
2892 cfi->vals[op->src.reg].base == CFI_CFA) {
2893
2894 /*
2895 * This is needed for the rare case
2896 * where GCC does something dumb like:
2897 *
2898 * lea 0x8(%rsp), %rcx
2899 * ...
2900 * mov %rcx, %rsp
2901 */
2902 cfa->offset = -cfi->vals[op->src.reg].offset;
2903 cfi->stack_size = cfa->offset;
2904
2905 } else if (cfa->base == CFI_SP &&
2906 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2907 cfi->vals[op->src.reg].offset == cfa->offset) {
2908
2909 /*
2910 * Stack swizzle:
2911 *
2912 * 1: mov %rsp, (%[tos])
2913 * 2: mov %[tos], %rsp
2914 * ...
2915 * 3: pop %rsp
2916 *
2917 * Where:
2918 *
2919 * 1 - places a pointer to the previous
2920 * stack at the Top-of-Stack of the
2921 * new stack.
2922 *
2923 * 2 - switches to the new stack.
2924 *
2925 * 3 - pops the Top-of-Stack to restore
2926 * the original stack.
2927 *
2928 * Note: we set base to SP_INDIRECT
2929 * here and preserve offset. Therefore
2930 * when the unwinder reaches ToS it
2931 * will dereference SP and then add the
2932 * offset to find the next frame, IOW:
2933 * (%rsp) + offset.
2934 */
2935 cfa->base = CFI_SP_INDIRECT;
2936
2937 } else {
2938 cfa->base = CFI_UNDEFINED;
2939 cfa->offset = 0;
2940 }
2941 }
2942
2943 else if (op->dest.reg == CFI_SP &&
2944 cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
2945 cfi->vals[op->src.reg].offset == cfa->offset) {
2946
2947 /*
2948 * The same stack swizzle case 2) as above. But
2949 * because we can't change cfa->base, case 3)
2950 * will become a regular POP. Pretend we're a
2951 * PUSH so things don't go unbalanced.
2952 */
2953 cfi->stack_size += 8;
2954 }
2955
2956
2957 break;
2958
2959 case OP_SRC_ADD:
2960 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
2961
2962 /* add imm, %rsp */
2963 cfi->stack_size -= op->src.offset;
2964 if (cfa->base == CFI_SP)
2965 cfa->offset -= op->src.offset;
2966 break;
2967 }
2968
2969 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
2970
2971 /* lea disp(%rbp), %rsp */
2972 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
2973 break;
2974 }
2975
2976 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
2977
2978 /* drap: lea disp(%rsp), %drap */
2979 cfi->drap_reg = op->dest.reg;
2980
2981 /*
2982 * lea disp(%rsp), %reg
2983 *
2984 * This is needed for the rare case where GCC
2985 * does something dumb like:
2986 *
2987 * lea 0x8(%rsp), %rcx
2988 * ...
2989 * mov %rcx, %rsp
2990 */
2991 cfi->vals[op->dest.reg].base = CFI_CFA;
2992 cfi->vals[op->dest.reg].offset = \
2993 -cfi->stack_size + op->src.offset;
2994
2995 break;
2996 }
2997
2998 if (cfi->drap && op->dest.reg == CFI_SP &&
2999 op->src.reg == cfi->drap_reg) {
3000
3001 /* drap: lea disp(%drap), %rsp */
3002 cfa->base = CFI_SP;
3003 cfa->offset = cfi->stack_size = -op->src.offset;
3004 cfi->drap_reg = CFI_UNDEFINED;
3005 cfi->drap = false;
3006 break;
3007 }
3008
3009 if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
3010 WARN_INSN(insn, "unsupported stack register modification");
3011 return -1;
3012 }
3013
3014 break;
3015
3016 case OP_SRC_AND:
3017 if (op->dest.reg != CFI_SP ||
3018 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
3019 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
3020 WARN_INSN(insn, "unsupported stack pointer realignment");
3021 return -1;
3022 }
3023
3024 if (cfi->drap_reg != CFI_UNDEFINED) {
3025 /* drap: and imm, %rsp */
3026 cfa->base = cfi->drap_reg;
3027 cfa->offset = cfi->stack_size = 0;
3028 cfi->drap = true;
3029 }
3030
3031 /*
3032 * Older versions of GCC (4.8ish) realign the stack
3033 * without DRAP, with a frame pointer.
3034 */
3035
3036 break;
3037
3038 case OP_SRC_POP:
3039 case OP_SRC_POPF:
3040 if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
3041
3042 /* pop %rsp; # restore from a stack swizzle */
3043 cfa->base = CFI_SP;
3044 break;
3045 }
3046
3047 if (!cfi->drap && op->dest.reg == cfa->base) {
3048
3049 /* pop %rbp */
3050 cfa->base = CFI_SP;
3051 }
3052
3053 if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
3054 op->dest.reg == cfi->drap_reg &&
3055 cfi->drap_offset == -cfi->stack_size) {
3056
3057 /* drap: pop %drap */
3058 cfa->base = cfi->drap_reg;
3059 cfa->offset = 0;
3060 cfi->drap_offset = -1;
3061
3062 } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
3063
3064 /* pop %reg */
3065 restore_reg(cfi, op->dest.reg);
3066 }
3067
3068 cfi->stack_size -= 8;
3069 if (cfa->base == CFI_SP)
3070 cfa->offset -= 8;
3071
3072 break;
3073
3074 case OP_SRC_REG_INDIRECT:
3075 if (!cfi->drap && op->dest.reg == cfa->base &&
3076 op->dest.reg == CFI_BP) {
3077
3078 /* mov disp(%rsp), %rbp */
3079 cfa->base = CFI_SP;
3080 cfa->offset = cfi->stack_size;
3081 }
3082
3083 if (cfi->drap && op->src.reg == CFI_BP &&
3084 op->src.offset == cfi->drap_offset) {
3085
3086 /* drap: mov disp(%rbp), %drap */
3087 cfa->base = cfi->drap_reg;
3088 cfa->offset = 0;
3089 cfi->drap_offset = -1;
3090 }
3091
3092 if (cfi->drap && op->src.reg == CFI_BP &&
3093 op->src.offset == regs[op->dest.reg].offset) {
3094
3095 /* drap: mov disp(%rbp), %reg */
3096 restore_reg(cfi, op->dest.reg);
3097
3098 } else if (op->src.reg == cfa->base &&
3099 op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
3100
3101 /* mov disp(%rbp), %reg */
3102 /* mov disp(%rsp), %reg */
3103 restore_reg(cfi, op->dest.reg);
3104
3105 } else if (op->src.reg == CFI_SP &&
3106 op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
3107
3108 /* mov disp(%rsp), %reg */
3109 restore_reg(cfi, op->dest.reg);
3110 }
3111
3112 break;
3113
3114 default:
3115 WARN_INSN(insn, "unknown stack-related instruction");
3116 return -1;
3117 }
3118
3119 break;
3120
3121 case OP_DEST_PUSH:
3122 case OP_DEST_PUSHF:
3123 cfi->stack_size += 8;
3124 if (cfa->base == CFI_SP)
3125 cfa->offset += 8;
3126
3127 if (op->src.type != OP_SRC_REG)
3128 break;
3129
3130 if (cfi->drap) {
3131 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3132
3133 /* drap: push %drap */
3134 cfa->base = CFI_BP_INDIRECT;
3135 cfa->offset = -cfi->stack_size;
3136
3137 /* save drap so we know when to restore it */
3138 cfi->drap_offset = -cfi->stack_size;
3139
3140 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
3141
3142 /* drap: push %rbp */
3143 cfi->stack_size = 0;
3144
3145 } else {
3146
3147 /* drap: push %reg */
3148 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
3149 }
3150
3151 } else {
3152
3153 /* push %reg */
3154 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
3155 }
3156
3157 /* detect when asm code uses rbp as a scratch register */
3158 if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
3159 cfa->base != CFI_BP)
3160 cfi->bp_scratch = true;
3161 break;
3162
3163 case OP_DEST_REG_INDIRECT:
3164
3165 if (cfi->drap) {
3166 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
3167
3168 /* drap: mov %drap, disp(%rbp) */
3169 cfa->base = CFI_BP_INDIRECT;
3170 cfa->offset = op->dest.offset;
3171
3172 /* save drap offset so we know when to restore it */
3173 cfi->drap_offset = op->dest.offset;
3174 } else {
3175
3176 /* drap: mov reg, disp(%rbp) */
3177 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
3178 }
3179
3180 } else if (op->dest.reg == cfa->base) {
3181
3182 /* mov reg, disp(%rbp) */
3183 /* mov reg, disp(%rsp) */
3184 save_reg(cfi, op->src.reg, CFI_CFA,
3185 op->dest.offset - cfi->cfa.offset);
3186
3187 } else if (op->dest.reg == CFI_SP) {
3188
3189 /* mov reg, disp(%rsp) */
3190 save_reg(cfi, op->src.reg, CFI_CFA,
3191 op->dest.offset - cfi->stack_size);
3192
3193 } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
3194
3195 /* mov %rsp, (%reg); # setup a stack swizzle. */
3196 cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
3197 cfi->vals[op->dest.reg].offset = cfa->offset;
3198 }
3199
3200 break;
3201
3202 case OP_DEST_MEM:
3203 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
3204 WARN_INSN(insn, "unknown stack-related memory operation");
3205 return -1;
3206 }
3207
3208 /* pop mem */
3209 cfi->stack_size -= 8;
3210 if (cfa->base == CFI_SP)
3211 cfa->offset -= 8;
3212
3213 break;
3214
3215 default:
3216 WARN_INSN(insn, "unknown stack-related instruction");
3217 return -1;
3218 }
3219
3220 return 0;
3221 }
3222
3223 /*
3224 * The stack layouts of alternatives instructions can sometimes diverge when
3225 * they have stack modifications. That's fine as long as the potential stack
3226 * layouts don't conflict at any given potential instruction boundary.
3227 *
3228 * Flatten the CFIs of the different alternative code streams (both original
3229 * and replacement) into a single shared CFI array which can be used to detect
3230 * conflicts and nicely feed a linear array of ORC entries to the unwinder.
3231 */
3232 static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
3233 {
3234 struct cfi_state **alt_cfi;
3235 int group_off;
3236
3237 if (!insn->alt_group)
3238 return 0;
3239
3240 if (!insn->cfi) {
3241 WARN("CFI missing");
3242 return -1;
3243 }
3244
3245 alt_cfi = insn->alt_group->cfi;
3246 group_off = insn->offset - insn->alt_group->first_insn->offset;
3247
3248 if (!alt_cfi[group_off]) {
3249 alt_cfi[group_off] = insn->cfi;
3250 } else {
3251 if (cficmp(alt_cfi[group_off], insn->cfi)) {
3252 struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
3253 struct instruction *orig = orig_group->first_insn;
3254 char *where = offstr(insn->sec, insn->offset);
3255 WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
3256 free(where);
3257 return -1;
3258 }
3259 }
3260
3261 return 0;
3262 }
3263
3264 static int handle_insn_ops(struct instruction *insn,
3265 struct instruction *next_insn,
3266 struct insn_state *state)
3267 {
3268 struct stack_op *op;
3269
3270 for (op = insn->stack_ops; op; op = op->next) {
3271
3272 if (update_cfi_state(insn, next_insn, &state->cfi, op))
3273 return 1;
3274
3275 if (!insn->alt_group)
3276 continue;
3277
3278 if (op->dest.type == OP_DEST_PUSHF) {
3279 if (!state->uaccess_stack) {
3280 state->uaccess_stack = 1;
3281 } else if (state->uaccess_stack >> 31) {
3282 WARN_INSN(insn, "PUSHF stack exhausted");
3283 return 1;
3284 }
3285 state->uaccess_stack <<= 1;
3286 state->uaccess_stack |= state->uaccess;
3287 }
3288
3289 if (op->src.type == OP_SRC_POPF) {
3290 if (state->uaccess_stack) {
3291 state->uaccess = state->uaccess_stack & 1;
3292 state->uaccess_stack >>= 1;
3293 if (state->uaccess_stack == 1)
3294 state->uaccess_stack = 0;
3295 }
3296 }
3297 }
3298
3299 return 0;
3300 }
3301
3302 static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
3303 {
3304 struct cfi_state *cfi1 = insn->cfi;
3305 int i;
3306
3307 if (!cfi1) {
3308 WARN("CFI missing");
3309 return false;
3310 }
3311
3312 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
3313
3314 WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
3315 cfi1->cfa.base, cfi1->cfa.offset,
3316 cfi2->cfa.base, cfi2->cfa.offset);
3317
3318 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
3319 for (i = 0; i < CFI_NUM_REGS; i++) {
3320 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
3321 sizeof(struct cfi_reg)))
3322 continue;
3323
3324 WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
3325 i, cfi1->regs[i].base, cfi1->regs[i].offset,
3326 i, cfi2->regs[i].base, cfi2->regs[i].offset);
3327 break;
3328 }
3329
3330 } else if (cfi1->type != cfi2->type) {
3331
3332 WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
3333 cfi1->type, cfi2->type);
3334
3335 } else if (cfi1->drap != cfi2->drap ||
3336 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
3337 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
3338
3339 WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
3340 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
3341 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
3342
3343 } else
3344 return true;
3345
3346 return false;
3347 }
3348
3349 static inline bool func_uaccess_safe(struct symbol *func)
3350 {
3351 if (func)
3352 return func->uaccess_safe;
3353
3354 return false;
3355 }
3356
3357 static inline const char *call_dest_name(struct instruction *insn)
3358 {
3359 static char pvname[19];
3360 struct reloc *rel;
3361 int idx;
3362
3363 if (insn_call_dest(insn))
3364 return insn_call_dest(insn)->name;
3365
3366 rel = insn_reloc(NULL, insn);
3367 if (rel && !strcmp(rel->sym->name, "pv_ops")) {
3368 idx = (rel->addend / sizeof(void *));
3369 snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
3370 return pvname;
3371 }
3372
3373 return "{dynamic}";
3374 }
3375
3376 static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
3377 {
3378 struct symbol *target;
3379 struct reloc *rel;
3380 int idx;
3381
3382 rel = insn_reloc(file, insn);
3383 if (!rel || strcmp(rel->sym->name, "pv_ops"))
3384 return false;
3385
3386 idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
3387
3388 if (file->pv_ops[idx].clean)
3389 return true;
3390
3391 file->pv_ops[idx].clean = true;
3392
3393 list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
3394 if (!target->sec->noinstr) {
3395 WARN("pv_ops[%d]: %s", idx, target->name);
3396 file->pv_ops[idx].clean = false;
3397 }
3398 }
3399
3400 return file->pv_ops[idx].clean;
3401 }
3402
3403 static inline bool noinstr_call_dest(struct objtool_file *file,
3404 struct instruction *insn,
3405 struct symbol *func)
3406 {
3407 /*
3408 * We can't deal with indirect function calls at present;
3409 * assume they're instrumented.
3410 */
3411 if (!func) {
3412 if (file->pv_ops)
3413 return pv_call_dest(file, insn);
3414
3415 return false;
3416 }
3417
3418 /*
3419 * If the symbol is from a noinstr section; we good.
3420 */
3421 if (func->sec->noinstr)
3422 return true;
3423
3424 /*
3425 * If the symbol is a static_call trampoline, we can't tell.
3426 */
3427 if (func->static_call_tramp)
3428 return true;
3429
3430 /*
3431 * The __ubsan_handle_*() calls are like WARN(), they only happen when
3432 * something 'BAD' happened. At the risk of taking the machine down,
3433 * let them proceed to get the message out.
3434 */
3435 if (!strncmp(func->name, "__ubsan_handle_", 15))
3436 return true;
3437
3438 return false;
3439 }
3440
3441 static int validate_call(struct objtool_file *file,
3442 struct instruction *insn,
3443 struct insn_state *state)
3444 {
3445 if (state->noinstr && state->instr <= 0 &&
3446 !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
3447 WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
3448 return 1;
3449 }
3450
3451 if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
3452 WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
3453 return 1;
3454 }
3455
3456 if (state->df) {
3457 WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
3458 return 1;
3459 }
3460
3461 return 0;
3462 }
3463
3464 static int validate_sibling_call(struct objtool_file *file,
3465 struct instruction *insn,
3466 struct insn_state *state)
3467 {
3468 if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
3469 WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
3470 return 1;
3471 }
3472
3473 return validate_call(file, insn, state);
3474 }
3475
3476 static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
3477 {
3478 if (state->noinstr && state->instr > 0) {
3479 WARN_INSN(insn, "return with instrumentation enabled");
3480 return 1;
3481 }
3482
3483 if (state->uaccess && !func_uaccess_safe(func)) {
3484 WARN_INSN(insn, "return with UACCESS enabled");
3485 return 1;
3486 }
3487
3488 if (!state->uaccess && func_uaccess_safe(func)) {
3489 WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
3490 return 1;
3491 }
3492
3493 if (state->df) {
3494 WARN_INSN(insn, "return with DF set");
3495 return 1;
3496 }
3497
3498 if (func && has_modified_stack_frame(insn, state)) {
3499 WARN_INSN(insn, "return with modified stack frame");
3500 return 1;
3501 }
3502
3503 if (state->cfi.bp_scratch) {
3504 WARN_INSN(insn, "BP used as a scratch register");
3505 return 1;
3506 }
3507
3508 return 0;
3509 }
3510
3511 static struct instruction *next_insn_to_validate(struct objtool_file *file,
3512 struct instruction *insn)
3513 {
3514 struct alt_group *alt_group = insn->alt_group;
3515
3516 /*
3517 * Simulate the fact that alternatives are patched in-place. When the
3518 * end of a replacement alt_group is reached, redirect objtool flow to
3519 * the end of the original alt_group.
3520 *
3521 * insn->alts->insn -> alt_group->first_insn
3522 * ...
3523 * alt_group->last_insn
3524 * [alt_group->nop] -> next(orig_group->last_insn)
3525 */
3526 if (alt_group) {
3527 if (alt_group->nop) {
3528 /* ->nop implies ->orig_group */
3529 if (insn == alt_group->last_insn)
3530 return alt_group->nop;
3531 if (insn == alt_group->nop)
3532 goto next_orig;
3533 }
3534 if (insn == alt_group->last_insn && alt_group->orig_group)
3535 goto next_orig;
3536 }
3537
3538 return next_insn_same_sec(file, insn);
3539
3540 next_orig:
3541 return next_insn_same_sec(file, alt_group->orig_group->last_insn);
3542 }
3543
3544 /*
3545 * Follow the branch starting at the given instruction, and recursively follow
3546 * any other branches (jumps). Meanwhile, track the frame pointer state at
3547 * each instruction and validate all the rules described in
3548 * tools/objtool/Documentation/objtool.txt.
3549 */
3550 static int validate_branch(struct objtool_file *file, struct symbol *func,
3551 struct instruction *insn, struct insn_state state)
3552 {
3553 struct alternative *alt;
3554 struct instruction *next_insn, *prev_insn = NULL;
3555 struct section *sec;
3556 u8 visited;
3557 int ret;
3558
3559 sec = insn->sec;
3560
3561 while (1) {
3562 next_insn = next_insn_to_validate(file, insn);
3563
3564 if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
3565 /* Ignore KCFI type preambles, which always fall through */
3566 if (!strncmp(func->name, "__cfi_", 6) ||
3567 !strncmp(func->name, "__pfx_", 6))
3568 return 0;
3569
3570 WARN("%s() falls through to next function %s()",
3571 func->name, insn_func(insn)->name);
3572 return 1;
3573 }
3574
3575 if (func && insn->ignore) {
3576 WARN_INSN(insn, "BUG: why am I validating an ignored function?");
3577 return 1;
3578 }
3579
3580 visited = VISITED_BRANCH << state.uaccess;
3581 if (insn->visited & VISITED_BRANCH_MASK) {
3582 if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
3583 return 1;
3584
3585 if (insn->visited & visited)
3586 return 0;
3587 } else {
3588 nr_insns_visited++;
3589 }
3590
3591 if (state.noinstr)
3592 state.instr += insn->instr;
3593
3594 if (insn->hint) {
3595 if (insn->restore) {
3596 struct instruction *save_insn, *i;
3597
3598 i = insn;
3599 save_insn = NULL;
3600
3601 sym_for_each_insn_continue_reverse(file, func, i) {
3602 if (i->save) {
3603 save_insn = i;
3604 break;
3605 }
3606 }
3607
3608 if (!save_insn) {
3609 WARN_INSN(insn, "no corresponding CFI save for CFI restore");
3610 return 1;
3611 }
3612
3613 if (!save_insn->visited) {
3614 WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
3615 return 1;
3616 }
3617
3618 insn->cfi = save_insn->cfi;
3619 nr_cfi_reused++;
3620 }
3621
3622 state.cfi = *insn->cfi;
3623 } else {
3624 /* XXX track if we actually changed state.cfi */
3625
3626 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
3627 insn->cfi = prev_insn->cfi;
3628 nr_cfi_reused++;
3629 } else {
3630 insn->cfi = cfi_hash_find_or_add(&state.cfi);
3631 }
3632 }
3633
3634 insn->visited |= visited;
3635
3636 if (propagate_alt_cfi(file, insn))
3637 return 1;
3638
3639 if (!insn->ignore_alts && insn->alts) {
3640 bool skip_orig = false;
3641
3642 for (alt = insn->alts; alt; alt = alt->next) {
3643 if (alt->skip_orig)
3644 skip_orig = true;
3645
3646 ret = validate_branch(file, func, alt->insn, state);
3647 if (ret) {
3648 if (opts.backtrace)
3649 BT_FUNC("(alt)", insn);
3650 return ret;
3651 }
3652 }
3653
3654 if (skip_orig)
3655 return 0;
3656 }
3657
3658 if (handle_insn_ops(insn, next_insn, &state))
3659 return 1;
3660
3661 switch (insn->type) {
3662
3663 case INSN_RETURN:
3664 return validate_return(func, insn, &state);
3665
3666 case INSN_CALL:
3667 case INSN_CALL_DYNAMIC:
3668 ret = validate_call(file, insn, &state);
3669 if (ret)
3670 return ret;
3671
3672 if (opts.stackval && func && !is_fentry_call(insn) &&
3673 !has_valid_stack_frame(&state)) {
3674 WARN_INSN(insn, "call without frame pointer save/setup");
3675 return 1;
3676 }
3677
3678 if (insn->dead_end)
3679 return 0;
3680
3681 break;
3682
3683 case INSN_JUMP_CONDITIONAL:
3684 case INSN_JUMP_UNCONDITIONAL:
3685 if (is_sibling_call(insn)) {
3686 ret = validate_sibling_call(file, insn, &state);
3687 if (ret)
3688 return ret;
3689
3690 } else if (insn->jump_dest) {
3691 ret = validate_branch(file, func,
3692 insn->jump_dest, state);
3693 if (ret) {
3694 if (opts.backtrace)
3695 BT_FUNC("(branch)", insn);
3696 return ret;
3697 }
3698 }
3699
3700 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3701 return 0;
3702
3703 break;
3704
3705 case INSN_JUMP_DYNAMIC:
3706 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3707 if (is_sibling_call(insn)) {
3708 ret = validate_sibling_call(file, insn, &state);
3709 if (ret)
3710 return ret;
3711 }
3712
3713 if (insn->type == INSN_JUMP_DYNAMIC)
3714 return 0;
3715
3716 break;
3717
3718 case INSN_CONTEXT_SWITCH:
3719 if (func && (!next_insn || !next_insn->hint)) {
3720 WARN_INSN(insn, "unsupported instruction in callable function");
3721 return 1;
3722 }
3723 return 0;
3724
3725 case INSN_STAC:
3726 if (state.uaccess) {
3727 WARN_INSN(insn, "recursive UACCESS enable");
3728 return 1;
3729 }
3730
3731 state.uaccess = true;
3732 break;
3733
3734 case INSN_CLAC:
3735 if (!state.uaccess && func) {
3736 WARN_INSN(insn, "redundant UACCESS disable");
3737 return 1;
3738 }
3739
3740 if (func_uaccess_safe(func) && !state.uaccess_stack) {
3741 WARN_INSN(insn, "UACCESS-safe disables UACCESS");
3742 return 1;
3743 }
3744
3745 state.uaccess = false;
3746 break;
3747
3748 case INSN_STD:
3749 if (state.df) {
3750 WARN_INSN(insn, "recursive STD");
3751 return 1;
3752 }
3753
3754 state.df = true;
3755 break;
3756
3757 case INSN_CLD:
3758 if (!state.df && func) {
3759 WARN_INSN(insn, "redundant CLD");
3760 return 1;
3761 }
3762
3763 state.df = false;
3764 break;
3765
3766 default:
3767 break;
3768 }
3769
3770 if (insn->dead_end)
3771 return 0;
3772
3773 if (!next_insn) {
3774 if (state.cfi.cfa.base == CFI_UNDEFINED)
3775 return 0;
3776 WARN("%s: unexpected end of section", sec->name);
3777 return 1;
3778 }
3779
3780 prev_insn = insn;
3781 insn = next_insn;
3782 }
3783
3784 return 0;
3785 }
3786
3787 static int validate_unwind_hint(struct objtool_file *file,
3788 struct instruction *insn,
3789 struct insn_state *state)
3790 {
3791 if (insn->hint && !insn->visited && !insn->ignore) {
3792 int ret = validate_branch(file, insn_func(insn), insn, *state);
3793 if (ret && opts.backtrace)
3794 BT_FUNC("<=== (hint)", insn);
3795 return ret;
3796 }
3797
3798 return 0;
3799 }
3800
3801 static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
3802 {
3803 struct instruction *insn;
3804 struct insn_state state;
3805 int warnings = 0;
3806
3807 if (!file->hints)
3808 return 0;
3809
3810 init_insn_state(file, &state, sec);
3811
3812 if (sec) {
3813 sec_for_each_insn(file, sec, insn)
3814 warnings += validate_unwind_hint(file, insn, &state);
3815 } else {
3816 for_each_insn(file, insn)
3817 warnings += validate_unwind_hint(file, insn, &state);
3818 }
3819
3820 return warnings;
3821 }
3822
3823 /*
3824 * Validate rethunk entry constraint: must untrain RET before the first RET.
3825 *
3826 * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
3827 * before an actual RET instruction.
3828 */
3829 static int validate_unret(struct objtool_file *file, struct instruction *insn)
3830 {
3831 struct instruction *next, *dest;
3832 int ret, warnings = 0;
3833
3834 for (;;) {
3835 next = next_insn_to_validate(file, insn);
3836
3837 if (insn->visited & VISITED_UNRET)
3838 return 0;
3839
3840 insn->visited |= VISITED_UNRET;
3841
3842 if (!insn->ignore_alts && insn->alts) {
3843 struct alternative *alt;
3844 bool skip_orig = false;
3845
3846 for (alt = insn->alts; alt; alt = alt->next) {
3847 if (alt->skip_orig)
3848 skip_orig = true;
3849
3850 ret = validate_unret(file, alt->insn);
3851 if (ret) {
3852 if (opts.backtrace)
3853 BT_FUNC("(alt)", insn);
3854 return ret;
3855 }
3856 }
3857
3858 if (skip_orig)
3859 return 0;
3860 }
3861
3862 switch (insn->type) {
3863
3864 case INSN_CALL_DYNAMIC:
3865 case INSN_JUMP_DYNAMIC:
3866 case INSN_JUMP_DYNAMIC_CONDITIONAL:
3867 WARN_INSN(insn, "early indirect call");
3868 return 1;
3869
3870 case INSN_JUMP_UNCONDITIONAL:
3871 case INSN_JUMP_CONDITIONAL:
3872 if (!is_sibling_call(insn)) {
3873 if (!insn->jump_dest) {
3874 WARN_INSN(insn, "unresolved jump target after linking?!?");
3875 return -1;
3876 }
3877 ret = validate_unret(file, insn->jump_dest);
3878 if (ret) {
3879 if (opts.backtrace) {
3880 BT_FUNC("(branch%s)", insn,
3881 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
3882 }
3883 return ret;
3884 }
3885
3886 if (insn->type == INSN_JUMP_UNCONDITIONAL)
3887 return 0;
3888
3889 break;
3890 }
3891
3892 /* fallthrough */
3893 case INSN_CALL:
3894 dest = find_insn(file, insn_call_dest(insn)->sec,
3895 insn_call_dest(insn)->offset);
3896 if (!dest) {
3897 WARN("Unresolved function after linking!?: %s",
3898 insn_call_dest(insn)->name);
3899 return -1;
3900 }
3901
3902 ret = validate_unret(file, dest);
3903 if (ret) {
3904 if (opts.backtrace)
3905 BT_FUNC("(call)", insn);
3906 return ret;
3907 }
3908 /*
3909 * If a call returns without error, it must have seen UNTRAIN_RET.
3910 * Therefore any non-error return is a success.
3911 */
3912 return 0;
3913
3914 case INSN_RETURN:
3915 WARN_INSN(insn, "RET before UNTRAIN");
3916 return 1;
3917
3918 case INSN_NOP:
3919 if (insn->retpoline_safe)
3920 return 0;
3921 break;
3922
3923 default:
3924 break;
3925 }
3926
3927 if (!next) {
3928 WARN_INSN(insn, "teh end!");
3929 return -1;
3930 }
3931 insn = next;
3932 }
3933
3934 return warnings;
3935 }
3936
3937 /*
3938 * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
3939 * VALIDATE_UNRET_END before RET.
3940 */
3941 static int validate_unrets(struct objtool_file *file)
3942 {
3943 struct instruction *insn;
3944 int ret, warnings = 0;
3945
3946 for_each_insn(file, insn) {
3947 if (!insn->unret)
3948 continue;
3949
3950 ret = validate_unret(file, insn);
3951 if (ret < 0) {
3952 WARN_INSN(insn, "Failed UNRET validation");
3953 return ret;
3954 }
3955 warnings += ret;
3956 }
3957
3958 return warnings;
3959 }
3960
3961 static int validate_retpoline(struct objtool_file *file)
3962 {
3963 struct instruction *insn;
3964 int warnings = 0;
3965
3966 for_each_insn(file, insn) {
3967 if (insn->type != INSN_JUMP_DYNAMIC &&
3968 insn->type != INSN_CALL_DYNAMIC &&
3969 insn->type != INSN_RETURN)
3970 continue;
3971
3972 if (insn->retpoline_safe)
3973 continue;
3974
3975 if (insn->sec->init)
3976 continue;
3977
3978 if (insn->type == INSN_RETURN) {
3979 if (opts.rethunk) {
3980 WARN_INSN(insn, "'naked' return found in RETHUNK build");
3981 } else
3982 continue;
3983 } else {
3984 WARN_INSN(insn, "indirect %s found in RETPOLINE build",
3985 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
3986 }
3987
3988 warnings++;
3989 }
3990
3991 return warnings;
3992 }
3993
3994 static bool is_kasan_insn(struct instruction *insn)
3995 {
3996 return (insn->type == INSN_CALL &&
3997 !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
3998 }
3999
4000 static bool is_ubsan_insn(struct instruction *insn)
4001 {
4002 return (insn->type == INSN_CALL &&
4003 !strcmp(insn_call_dest(insn)->name,
4004 "__ubsan_handle_builtin_unreachable"));
4005 }
4006
4007 static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
4008 {
4009 int i;
4010 struct instruction *prev_insn;
4011
4012 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
4013 return true;
4014
4015 /*
4016 * Ignore alternative replacement instructions. This can happen
4017 * when a whitelisted function uses one of the ALTERNATIVE macros.
4018 */
4019 if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
4020 !strcmp(insn->sec->name, ".altinstr_aux"))
4021 return true;
4022
4023 /*
4024 * Whole archive runs might encounter dead code from weak symbols.
4025 * This is where the linker will have dropped the weak symbol in
4026 * favour of a regular symbol, but leaves the code in place.
4027 *
4028 * In this case we'll find a piece of code (whole function) that is not
4029 * covered by a !section symbol. Ignore them.
4030 */
4031 if (opts.link && !insn_func(insn)) {
4032 int size = find_symbol_hole_containing(insn->sec, insn->offset);
4033 unsigned long end = insn->offset + size;
4034
4035 if (!size) /* not a hole */
4036 return false;
4037
4038 if (size < 0) /* hole until the end */
4039 return true;
4040
4041 sec_for_each_insn_continue(file, insn) {
4042 /*
4043 * If we reach a visited instruction at or before the
4044 * end of the hole, ignore the unreachable.
4045 */
4046 if (insn->visited)
4047 return true;
4048
4049 if (insn->offset >= end)
4050 break;
4051
4052 /*
4053 * If this hole jumps to a .cold function, mark it ignore too.
4054 */
4055 if (insn->jump_dest && insn_func(insn->jump_dest) &&
4056 strstr(insn_func(insn->jump_dest)->name, ".cold")) {
4057 struct instruction *dest = insn->jump_dest;
4058 func_for_each_insn(file, insn_func(dest), dest)
4059 dest->ignore = true;
4060 }
4061 }
4062
4063 return false;
4064 }
4065
4066 if (!insn_func(insn))
4067 return false;
4068
4069 if (insn_func(insn)->static_call_tramp)
4070 return true;
4071
4072 /*
4073 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
4074 * __builtin_unreachable(). The BUG() macro has an unreachable() after
4075 * the UD2, which causes GCC's undefined trap logic to emit another UD2
4076 * (or occasionally a JMP to UD2).
4077 *
4078 * It may also insert a UD2 after calling a __noreturn function.
4079 */
4080 prev_insn = prev_insn_same_sec(file, insn);
4081 if (prev_insn->dead_end &&
4082 (insn->type == INSN_BUG ||
4083 (insn->type == INSN_JUMP_UNCONDITIONAL &&
4084 insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
4085 return true;
4086
4087 /*
4088 * Check if this (or a subsequent) instruction is related to
4089 * CONFIG_UBSAN or CONFIG_KASAN.
4090 *
4091 * End the search at 5 instructions to avoid going into the weeds.
4092 */
4093 for (i = 0; i < 5; i++) {
4094
4095 if (is_kasan_insn(insn) || is_ubsan_insn(insn))
4096 return true;
4097
4098 if (insn->type == INSN_JUMP_UNCONDITIONAL) {
4099 if (insn->jump_dest &&
4100 insn_func(insn->jump_dest) == insn_func(insn)) {
4101 insn = insn->jump_dest;
4102 continue;
4103 }
4104
4105 break;
4106 }
4107
4108 if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
4109 break;
4110
4111 insn = next_insn_same_sec(file, insn);
4112 }
4113
4114 return false;
4115 }
4116
4117 static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
4118 {
4119 struct instruction *insn, *prev;
4120 struct cfi_state *cfi;
4121
4122 insn = find_insn(file, func->sec, func->offset);
4123 if (!insn)
4124 return -1;
4125
4126 for (prev = prev_insn_same_sec(file, insn);
4127 prev;
4128 prev = prev_insn_same_sec(file, prev)) {
4129 u64 offset;
4130
4131 if (prev->type != INSN_NOP)
4132 return -1;
4133
4134 offset = func->offset - prev->offset;
4135
4136 if (offset > opts.prefix)
4137 return -1;
4138
4139 if (offset < opts.prefix)
4140 continue;
4141
4142 elf_create_prefix_symbol(file->elf, func, opts.prefix);
4143 break;
4144 }
4145
4146 if (!prev)
4147 return -1;
4148
4149 if (!insn->cfi) {
4150 /*
4151 * This can happen if stack validation isn't enabled or the
4152 * function is annotated with STACK_FRAME_NON_STANDARD.
4153 */
4154 return 0;
4155 }
4156
4157 /* Propagate insn->cfi to the prefix code */
4158 cfi = cfi_hash_find_or_add(insn->cfi);
4159 for (; prev != insn; prev = next_insn_same_sec(file, prev))
4160 prev->cfi = cfi;
4161
4162 return 0;
4163 }
4164
4165 static int add_prefix_symbols(struct objtool_file *file)
4166 {
4167 struct section *sec;
4168 struct symbol *func;
4169 int warnings = 0;
4170
4171 for_each_sec(file, sec) {
4172 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4173 continue;
4174
4175 sec_for_each_sym(sec, func) {
4176 if (func->type != STT_FUNC)
4177 continue;
4178
4179 add_prefix_symbol(file, func);
4180 }
4181 }
4182
4183 return warnings;
4184 }
4185
4186 static int validate_symbol(struct objtool_file *file, struct section *sec,
4187 struct symbol *sym, struct insn_state *state)
4188 {
4189 struct instruction *insn;
4190 int ret;
4191
4192 if (!sym->len) {
4193 WARN("%s() is missing an ELF size annotation", sym->name);
4194 return 1;
4195 }
4196
4197 if (sym->pfunc != sym || sym->alias != sym)
4198 return 0;
4199
4200 insn = find_insn(file, sec, sym->offset);
4201 if (!insn || insn->ignore || insn->visited)
4202 return 0;
4203
4204 state->uaccess = sym->uaccess_safe;
4205
4206 ret = validate_branch(file, insn_func(insn), insn, *state);
4207 if (ret && opts.backtrace)
4208 BT_FUNC("<=== (sym)", insn);
4209 return ret;
4210 }
4211
4212 static int validate_section(struct objtool_file *file, struct section *sec)
4213 {
4214 struct insn_state state;
4215 struct symbol *func;
4216 int warnings = 0;
4217
4218 sec_for_each_sym(sec, func) {
4219 if (func->type != STT_FUNC)
4220 continue;
4221
4222 init_insn_state(file, &state, sec);
4223 set_func_state(&state.cfi);
4224
4225 warnings += validate_symbol(file, sec, func, &state);
4226 }
4227
4228 return warnings;
4229 }
4230
4231 static int validate_noinstr_sections(struct objtool_file *file)
4232 {
4233 struct section *sec;
4234 int warnings = 0;
4235
4236 sec = find_section_by_name(file->elf, ".noinstr.text");
4237 if (sec) {
4238 warnings += validate_section(file, sec);
4239 warnings += validate_unwind_hints(file, sec);
4240 }
4241
4242 sec = find_section_by_name(file->elf, ".entry.text");
4243 if (sec) {
4244 warnings += validate_section(file, sec);
4245 warnings += validate_unwind_hints(file, sec);
4246 }
4247
4248 sec = find_section_by_name(file->elf, ".cpuidle.text");
4249 if (sec) {
4250 warnings += validate_section(file, sec);
4251 warnings += validate_unwind_hints(file, sec);
4252 }
4253
4254 return warnings;
4255 }
4256
4257 static int validate_functions(struct objtool_file *file)
4258 {
4259 struct section *sec;
4260 int warnings = 0;
4261
4262 for_each_sec(file, sec) {
4263 if (!(sec->sh.sh_flags & SHF_EXECINSTR))
4264 continue;
4265
4266 warnings += validate_section(file, sec);
4267 }
4268
4269 return warnings;
4270 }
4271
4272 static void mark_endbr_used(struct instruction *insn)
4273 {
4274 if (!list_empty(&insn->call_node))
4275 list_del_init(&insn->call_node);
4276 }
4277
4278 static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
4279 {
4280 struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
4281 struct instruction *first;
4282
4283 if (!sym)
4284 return false;
4285
4286 first = find_insn(file, sym->sec, sym->offset);
4287 if (!first)
4288 return false;
4289
4290 if (first->type != INSN_ENDBR && !first->noendbr)
4291 return false;
4292
4293 return insn->offset == sym->offset + sym->len;
4294 }
4295
4296 static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
4297 {
4298 struct instruction *dest;
4299 struct reloc *reloc;
4300 unsigned long off;
4301 int warnings = 0;
4302
4303 /*
4304 * Looking for function pointer load relocations. Ignore
4305 * direct/indirect branches:
4306 */
4307 switch (insn->type) {
4308 case INSN_CALL:
4309 case INSN_CALL_DYNAMIC:
4310 case INSN_JUMP_CONDITIONAL:
4311 case INSN_JUMP_UNCONDITIONAL:
4312 case INSN_JUMP_DYNAMIC:
4313 case INSN_JUMP_DYNAMIC_CONDITIONAL:
4314 case INSN_RETURN:
4315 case INSN_NOP:
4316 return 0;
4317 default:
4318 break;
4319 }
4320
4321 for (reloc = insn_reloc(file, insn);
4322 reloc;
4323 reloc = find_reloc_by_dest_range(file->elf, insn->sec,
4324 reloc->offset + 1,
4325 (insn->offset + insn->len) - (reloc->offset + 1))) {
4326
4327 /*
4328 * static_call_update() references the trampoline, which
4329 * doesn't have (or need) ENDBR. Skip warning in that case.
4330 */
4331 if (reloc->sym->static_call_tramp)
4332 continue;
4333
4334 off = reloc->sym->offset;
4335 if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
4336 off += arch_dest_reloc_offset(reloc->addend);
4337 else
4338 off += reloc->addend;
4339
4340 dest = find_insn(file, reloc->sym->sec, off);
4341 if (!dest)
4342 continue;
4343
4344 if (dest->type == INSN_ENDBR) {
4345 mark_endbr_used(dest);
4346 continue;
4347 }
4348
4349 if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
4350 /*
4351 * Anything from->to self is either _THIS_IP_ or
4352 * IRET-to-self.
4353 *
4354 * There is no sane way to annotate _THIS_IP_ since the
4355 * compiler treats the relocation as a constant and is
4356 * happy to fold in offsets, skewing any annotation we
4357 * do, leading to vast amounts of false-positives.
4358 *
4359 * There's also compiler generated _THIS_IP_ through
4360 * KCOV and such which we have no hope of annotating.
4361 *
4362 * As such, blanket accept self-references without
4363 * issue.
4364 */
4365 continue;
4366 }
4367
4368 /*
4369 * Accept anything ANNOTATE_NOENDBR.
4370 */
4371 if (dest->noendbr)
4372 continue;
4373
4374 /*
4375 * Accept if this is the instruction after a symbol
4376 * that is (no)endbr -- typical code-range usage.
4377 */
4378 if (noendbr_range(file, dest))
4379 continue;
4380
4381 WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
4382
4383 warnings++;
4384 }
4385
4386 return warnings;
4387 }
4388
4389 static int validate_ibt_data_reloc(struct objtool_file *file,
4390 struct reloc *reloc)
4391 {
4392 struct instruction *dest;
4393
4394 dest = find_insn(file, reloc->sym->sec,
4395 reloc->sym->offset + reloc->addend);
4396 if (!dest)
4397 return 0;
4398
4399 if (dest->type == INSN_ENDBR) {
4400 mark_endbr_used(dest);
4401 return 0;
4402 }
4403
4404 if (dest->noendbr)
4405 return 0;
4406
4407 WARN_FUNC("data relocation to !ENDBR: %s",
4408 reloc->sec->base, reloc->offset,
4409 offstr(dest->sec, dest->offset));
4410
4411 return 1;
4412 }
4413
4414 /*
4415 * Validate IBT rules and remove used ENDBR instructions from the seal list.
4416 * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
4417 * NOPs) later, in create_ibt_endbr_seal_sections().
4418 */
4419 static int validate_ibt(struct objtool_file *file)
4420 {
4421 struct section *sec;
4422 struct reloc *reloc;
4423 struct instruction *insn;
4424 int warnings = 0;
4425
4426 for_each_insn(file, insn)
4427 warnings += validate_ibt_insn(file, insn);
4428
4429 for_each_sec(file, sec) {
4430
4431 /* Already done by validate_ibt_insn() */
4432 if (sec->sh.sh_flags & SHF_EXECINSTR)
4433 continue;
4434
4435 if (!sec->reloc)
4436 continue;
4437
4438 /*
4439 * These sections can reference text addresses, but not with
4440 * the intent to indirect branch to them.
4441 */
4442 if ((!strncmp(sec->name, ".discard", 8) &&
4443 strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
4444 !strncmp(sec->name, ".debug", 6) ||
4445 !strcmp(sec->name, ".altinstructions") ||
4446 !strcmp(sec->name, ".ibt_endbr_seal") ||
4447 !strcmp(sec->name, ".orc_unwind_ip") ||
4448 !strcmp(sec->name, ".parainstructions") ||
4449 !strcmp(sec->name, ".retpoline_sites") ||
4450 !strcmp(sec->name, ".smp_locks") ||
4451 !strcmp(sec->name, ".static_call_sites") ||
4452 !strcmp(sec->name, "_error_injection_whitelist") ||
4453 !strcmp(sec->name, "_kprobe_blacklist") ||
4454 !strcmp(sec->name, "__bug_table") ||
4455 !strcmp(sec->name, "__ex_table") ||
4456 !strcmp(sec->name, "__jump_table") ||
4457 !strcmp(sec->name, "__mcount_loc") ||
4458 !strcmp(sec->name, ".kcfi_traps") ||
4459 strstr(sec->name, "__patchable_function_entries"))
4460 continue;
4461
4462 list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
4463 warnings += validate_ibt_data_reloc(file, reloc);
4464 }
4465
4466 return warnings;
4467 }
4468
4469 static int validate_sls(struct objtool_file *file)
4470 {
4471 struct instruction *insn, *next_insn;
4472 int warnings = 0;
4473
4474 for_each_insn(file, insn) {
4475 next_insn = next_insn_same_sec(file, insn);
4476
4477 if (insn->retpoline_safe)
4478 continue;
4479
4480 switch (insn->type) {
4481 case INSN_RETURN:
4482 if (!next_insn || next_insn->type != INSN_TRAP) {
4483 WARN_INSN(insn, "missing int3 after ret");
4484 warnings++;
4485 }
4486
4487 break;
4488 case INSN_JUMP_DYNAMIC:
4489 if (!next_insn || next_insn->type != INSN_TRAP) {
4490 WARN_INSN(insn, "missing int3 after indirect jump");
4491 warnings++;
4492 }
4493 break;
4494 default:
4495 break;
4496 }
4497 }
4498
4499 return warnings;
4500 }
4501
4502 static int validate_reachable_instructions(struct objtool_file *file)
4503 {
4504 struct instruction *insn;
4505
4506 if (file->ignore_unreachables)
4507 return 0;
4508
4509 for_each_insn(file, insn) {
4510 if (insn->visited || ignore_unreachable_insn(file, insn))
4511 continue;
4512
4513 WARN_INSN(insn, "unreachable instruction");
4514 return 1;
4515 }
4516
4517 return 0;
4518 }
4519
4520 int check(struct objtool_file *file)
4521 {
4522 int ret, warnings = 0;
4523
4524 arch_initial_func_cfi_state(&initial_func_cfi);
4525 init_cfi_state(&init_cfi);
4526 init_cfi_state(&func_cfi);
4527 set_func_state(&func_cfi);
4528
4529 if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
4530 goto out;
4531
4532 cfi_hash_add(&init_cfi);
4533 cfi_hash_add(&func_cfi);
4534
4535 ret = decode_sections(file);
4536 if (ret < 0)
4537 goto out;
4538
4539 warnings += ret;
4540
4541 if (!nr_insns)
4542 goto out;
4543
4544 if (opts.retpoline) {
4545 ret = validate_retpoline(file);
4546 if (ret < 0)
4547 return ret;
4548 warnings += ret;
4549 }
4550
4551 if (opts.stackval || opts.orc || opts.uaccess) {
4552 ret = validate_functions(file);
4553 if (ret < 0)
4554 goto out;
4555 warnings += ret;
4556
4557 ret = validate_unwind_hints(file, NULL);
4558 if (ret < 0)
4559 goto out;
4560 warnings += ret;
4561
4562 if (!warnings) {
4563 ret = validate_reachable_instructions(file);
4564 if (ret < 0)
4565 goto out;
4566 warnings += ret;
4567 }
4568
4569 } else if (opts.noinstr) {
4570 ret = validate_noinstr_sections(file);
4571 if (ret < 0)
4572 goto out;
4573 warnings += ret;
4574 }
4575
4576 if (opts.unret) {
4577 /*
4578 * Must be after validate_branch() and friends, it plays
4579 * further games with insn->visited.
4580 */
4581 ret = validate_unrets(file);
4582 if (ret < 0)
4583 return ret;
4584 warnings += ret;
4585 }
4586
4587 if (opts.ibt) {
4588 ret = validate_ibt(file);
4589 if (ret < 0)
4590 goto out;
4591 warnings += ret;
4592 }
4593
4594 if (opts.sls) {
4595 ret = validate_sls(file);
4596 if (ret < 0)
4597 goto out;
4598 warnings += ret;
4599 }
4600
4601 if (opts.static_call) {
4602 ret = create_static_call_sections(file);
4603 if (ret < 0)
4604 goto out;
4605 warnings += ret;
4606 }
4607
4608 if (opts.retpoline) {
4609 ret = create_retpoline_sites_sections(file);
4610 if (ret < 0)
4611 goto out;
4612 warnings += ret;
4613 }
4614
4615 if (opts.cfi) {
4616 ret = create_cfi_sections(file);
4617 if (ret < 0)
4618 goto out;
4619 warnings += ret;
4620 }
4621
4622 if (opts.rethunk) {
4623 ret = create_return_sites_sections(file);
4624 if (ret < 0)
4625 goto out;
4626 warnings += ret;
4627
4628 if (opts.hack_skylake) {
4629 ret = create_direct_call_sections(file);
4630 if (ret < 0)
4631 goto out;
4632 warnings += ret;
4633 }
4634 }
4635
4636 if (opts.mcount) {
4637 ret = create_mcount_loc_sections(file);
4638 if (ret < 0)
4639 goto out;
4640 warnings += ret;
4641 }
4642
4643 if (opts.prefix) {
4644 ret = add_prefix_symbols(file);
4645 if (ret < 0)
4646 return ret;
4647 warnings += ret;
4648 }
4649
4650 if (opts.ibt) {
4651 ret = create_ibt_endbr_seal_sections(file);
4652 if (ret < 0)
4653 goto out;
4654 warnings += ret;
4655 }
4656
4657 if (opts.orc && nr_insns) {
4658 ret = orc_create(file);
4659 if (ret < 0)
4660 goto out;
4661 warnings += ret;
4662 }
4663
4664
4665 if (opts.stats) {
4666 printf("nr_insns_visited: %ld\n", nr_insns_visited);
4667 printf("nr_cfi: %ld\n", nr_cfi);
4668 printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
4669 printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
4670 }
4671
4672 out:
4673 /*
4674 * For now, don't fail the kernel build on fatal warnings. These
4675 * errors are still fairly common due to the growing matrix of
4676 * supported toolchains and their recent pace of change.
4677 */
4678 return 0;
4679 }