]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/kernel/unwind_orc.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / arch / x86 / kernel / unwind_orc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/module.h>
3 #include <linux/sort.h>
4 #include <asm/ptrace.h>
5 #include <asm/stacktrace.h>
6 #include <asm/unwind.h>
7 #include <asm/orc_types.h>
8 #include <asm/orc_lookup.h>
9
10 #define orc_warn(fmt, ...) \
11 printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
12
13 #define orc_warn_current(args...) \
14 ({ \
15 if (state->task == current) \
16 orc_warn(args); \
17 })
18
19 extern int __start_orc_unwind_ip[];
20 extern int __stop_orc_unwind_ip[];
21 extern struct orc_entry __start_orc_unwind[];
22 extern struct orc_entry __stop_orc_unwind[];
23
24 static bool orc_init __ro_after_init;
25 static unsigned int lookup_num_blocks __ro_after_init;
26
27 static inline unsigned long orc_ip(const int *ip)
28 {
29 return (unsigned long)ip + *ip;
30 }
31
32 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
33 unsigned int num_entries, unsigned long ip)
34 {
35 int *first = ip_table;
36 int *last = ip_table + num_entries - 1;
37 int *mid = first, *found = first;
38
39 if (!num_entries)
40 return NULL;
41
42 /*
43 * Do a binary range search to find the rightmost duplicate of a given
44 * starting address. Some entries are section terminators which are
45 * "weak" entries for ensuring there are no gaps. They should be
46 * ignored when they conflict with a real entry.
47 */
48 while (first <= last) {
49 mid = first + ((last - first) / 2);
50
51 if (orc_ip(mid) <= ip) {
52 found = mid;
53 first = mid + 1;
54 } else
55 last = mid - 1;
56 }
57
58 return u_table + (found - ip_table);
59 }
60
61 #ifdef CONFIG_MODULES
62 static struct orc_entry *orc_module_find(unsigned long ip)
63 {
64 struct module *mod;
65
66 mod = __module_address(ip);
67 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
68 return NULL;
69 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
70 mod->arch.num_orcs, ip);
71 }
72 #else
73 static struct orc_entry *orc_module_find(unsigned long ip)
74 {
75 return NULL;
76 }
77 #endif
78
79 #ifdef CONFIG_DYNAMIC_FTRACE
80 static struct orc_entry *orc_find(unsigned long ip);
81
82 /*
83 * Ftrace dynamic trampolines do not have orc entries of their own.
84 * But they are copies of the ftrace entries that are static and
85 * defined in ftrace_*.S, which do have orc entries.
86 *
87 * If the unwinder comes across a ftrace trampoline, then find the
88 * ftrace function that was used to create it, and use that ftrace
89 * function's orc entry, as the placement of the return code in
90 * the stack will be identical.
91 */
92 static struct orc_entry *orc_ftrace_find(unsigned long ip)
93 {
94 struct ftrace_ops *ops;
95 unsigned long caller;
96
97 ops = ftrace_ops_trampoline(ip);
98 if (!ops)
99 return NULL;
100
101 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
102 caller = (unsigned long)ftrace_regs_call;
103 else
104 caller = (unsigned long)ftrace_call;
105
106 /* Prevent unlikely recursion */
107 if (ip == caller)
108 return NULL;
109
110 return orc_find(caller);
111 }
112 #else
113 static struct orc_entry *orc_ftrace_find(unsigned long ip)
114 {
115 return NULL;
116 }
117 #endif
118
119 /*
120 * If we crash with IP==0, the last successfully executed instruction
121 * was probably an indirect function call with a NULL function pointer,
122 * and we don't have unwind information for NULL.
123 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
124 * pointer into its parent and then continue normally from there.
125 */
126 static struct orc_entry null_orc_entry = {
127 .sp_offset = sizeof(long),
128 .sp_reg = ORC_REG_SP,
129 .bp_reg = ORC_REG_UNDEFINED,
130 .type = ORC_TYPE_CALL
131 };
132
133 /* Fake frame pointer entry -- used as a fallback for generated code */
134 static struct orc_entry orc_fp_entry = {
135 .type = ORC_TYPE_CALL,
136 .sp_reg = ORC_REG_BP,
137 .sp_offset = 16,
138 .bp_reg = ORC_REG_PREV_SP,
139 .bp_offset = -16,
140 .end = 0,
141 };
142
143 static struct orc_entry *orc_find(unsigned long ip)
144 {
145 static struct orc_entry *orc;
146
147 if (ip == 0)
148 return &null_orc_entry;
149
150 /* For non-init vmlinux addresses, use the fast lookup table: */
151 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
152 unsigned int idx, start, stop;
153
154 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
155
156 if (unlikely((idx >= lookup_num_blocks-1))) {
157 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
158 idx, lookup_num_blocks, (void *)ip);
159 return NULL;
160 }
161
162 start = orc_lookup[idx];
163 stop = orc_lookup[idx + 1] + 1;
164
165 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
166 (__start_orc_unwind + stop > __stop_orc_unwind))) {
167 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
168 idx, lookup_num_blocks, start, stop, (void *)ip);
169 return NULL;
170 }
171
172 return __orc_find(__start_orc_unwind_ip + start,
173 __start_orc_unwind + start, stop - start, ip);
174 }
175
176 /* vmlinux .init slow lookup: */
177 if (init_kernel_text(ip))
178 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
179 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
180
181 /* Module lookup: */
182 orc = orc_module_find(ip);
183 if (orc)
184 return orc;
185
186 return orc_ftrace_find(ip);
187 }
188
189 #ifdef CONFIG_MODULES
190
191 static DEFINE_MUTEX(sort_mutex);
192 static int *cur_orc_ip_table = __start_orc_unwind_ip;
193 static struct orc_entry *cur_orc_table = __start_orc_unwind;
194
195 static void orc_sort_swap(void *_a, void *_b, int size)
196 {
197 struct orc_entry *orc_a, *orc_b;
198 struct orc_entry orc_tmp;
199 int *a = _a, *b = _b, tmp;
200 int delta = _b - _a;
201
202 /* Swap the .orc_unwind_ip entries: */
203 tmp = *a;
204 *a = *b + delta;
205 *b = tmp - delta;
206
207 /* Swap the corresponding .orc_unwind entries: */
208 orc_a = cur_orc_table + (a - cur_orc_ip_table);
209 orc_b = cur_orc_table + (b - cur_orc_ip_table);
210 orc_tmp = *orc_a;
211 *orc_a = *orc_b;
212 *orc_b = orc_tmp;
213 }
214
215 static int orc_sort_cmp(const void *_a, const void *_b)
216 {
217 struct orc_entry *orc_a;
218 const int *a = _a, *b = _b;
219 unsigned long a_val = orc_ip(a);
220 unsigned long b_val = orc_ip(b);
221
222 if (a_val > b_val)
223 return 1;
224 if (a_val < b_val)
225 return -1;
226
227 /*
228 * The "weak" section terminator entries need to always be on the left
229 * to ensure the lookup code skips them in favor of real entries.
230 * These terminator entries exist to handle any gaps created by
231 * whitelisted .o files which didn't get objtool generation.
232 */
233 orc_a = cur_orc_table + (a - cur_orc_ip_table);
234 return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
235 }
236
237 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
238 void *_orc, size_t orc_size)
239 {
240 int *orc_ip = _orc_ip;
241 struct orc_entry *orc = _orc;
242 unsigned int num_entries = orc_ip_size / sizeof(int);
243
244 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
245 orc_size % sizeof(*orc) != 0 ||
246 num_entries != orc_size / sizeof(*orc));
247
248 /*
249 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
250 * associate an .orc_unwind_ip table entry with its corresponding
251 * .orc_unwind entry so they can both be swapped.
252 */
253 mutex_lock(&sort_mutex);
254 cur_orc_ip_table = orc_ip;
255 cur_orc_table = orc;
256 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
257 mutex_unlock(&sort_mutex);
258
259 mod->arch.orc_unwind_ip = orc_ip;
260 mod->arch.orc_unwind = orc;
261 mod->arch.num_orcs = num_entries;
262 }
263 #endif
264
265 void __init unwind_init(void)
266 {
267 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
268 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
269 size_t num_entries = orc_ip_size / sizeof(int);
270 struct orc_entry *orc;
271 int i;
272
273 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
274 orc_size % sizeof(struct orc_entry) != 0 ||
275 num_entries != orc_size / sizeof(struct orc_entry)) {
276 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
277 return;
278 }
279
280 /*
281 * Note, the orc_unwind and orc_unwind_ip tables were already
282 * sorted at build time via the 'sorttable' tool.
283 * It's ready for binary search straight away, no need to sort it.
284 */
285
286 /* Initialize the fast lookup table: */
287 lookup_num_blocks = orc_lookup_end - orc_lookup;
288 for (i = 0; i < lookup_num_blocks-1; i++) {
289 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
290 num_entries,
291 LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
292 if (!orc) {
293 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
294 return;
295 }
296
297 orc_lookup[i] = orc - __start_orc_unwind;
298 }
299
300 /* Initialize the ending block: */
301 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
302 LOOKUP_STOP_IP);
303 if (!orc) {
304 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
305 return;
306 }
307 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
308
309 orc_init = true;
310 }
311
312 unsigned long unwind_get_return_address(struct unwind_state *state)
313 {
314 if (unwind_done(state))
315 return 0;
316
317 return __kernel_text_address(state->ip) ? state->ip : 0;
318 }
319 EXPORT_SYMBOL_GPL(unwind_get_return_address);
320
321 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
322 {
323 if (unwind_done(state))
324 return NULL;
325
326 if (state->regs)
327 return &state->regs->ip;
328
329 if (state->sp)
330 return (unsigned long *)state->sp - 1;
331
332 return NULL;
333 }
334
335 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
336 size_t len)
337 {
338 struct stack_info *info = &state->stack_info;
339 void *addr = (void *)_addr;
340
341 if (!on_stack(info, addr, len) &&
342 (get_stack_info(addr, state->task, info, &state->stack_mask)))
343 return false;
344
345 return true;
346 }
347
348 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
349 unsigned long *val)
350 {
351 if (!stack_access_ok(state, addr, sizeof(long)))
352 return false;
353
354 *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
355 return true;
356 }
357
358 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
359 unsigned long *ip, unsigned long *sp)
360 {
361 struct pt_regs *regs = (struct pt_regs *)addr;
362
363 /* x86-32 support will be more complicated due to the &regs->sp hack */
364 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
365
366 if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
367 return false;
368
369 *ip = regs->ip;
370 *sp = regs->sp;
371 return true;
372 }
373
374 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
375 unsigned long *ip, unsigned long *sp)
376 {
377 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
378
379 if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
380 return false;
381
382 *ip = regs->ip;
383 *sp = regs->sp;
384 return true;
385 }
386
387 /*
388 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
389 * value from state->regs.
390 *
391 * Otherwise, if state->regs just points to IRET regs, and the previous frame
392 * had full regs, it's safe to get the value from the previous regs. This can
393 * happen when early/late IRQ entry code gets interrupted by an NMI.
394 */
395 static bool get_reg(struct unwind_state *state, unsigned int reg_off,
396 unsigned long *val)
397 {
398 unsigned int reg = reg_off/8;
399
400 if (!state->regs)
401 return false;
402
403 if (state->full_regs) {
404 *val = ((unsigned long *)state->regs)[reg];
405 return true;
406 }
407
408 if (state->prev_regs) {
409 *val = ((unsigned long *)state->prev_regs)[reg];
410 return true;
411 }
412
413 return false;
414 }
415
416 bool unwind_next_frame(struct unwind_state *state)
417 {
418 unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
419 enum stack_type prev_type = state->stack_info.type;
420 struct orc_entry *orc;
421 bool indirect = false;
422
423 if (unwind_done(state))
424 return false;
425
426 /* Don't let modules unload while we're reading their ORC data. */
427 preempt_disable();
428
429 /* End-of-stack check for user tasks: */
430 if (state->regs && user_mode(state->regs))
431 goto the_end;
432
433 /*
434 * Find the orc_entry associated with the text address.
435 *
436 * Decrement call return addresses by one so they work for sibling
437 * calls and calls to noreturn functions.
438 */
439 orc = orc_find(state->signal ? state->ip : state->ip - 1);
440 if (!orc) {
441 /*
442 * As a fallback, try to assume this code uses a frame pointer.
443 * This is useful for generated code, like BPF, which ORC
444 * doesn't know about. This is just a guess, so the rest of
445 * the unwind is no longer considered reliable.
446 */
447 orc = &orc_fp_entry;
448 state->error = true;
449 }
450
451 /* End-of-stack check for kernel threads: */
452 if (orc->sp_reg == ORC_REG_UNDEFINED) {
453 if (!orc->end)
454 goto err;
455
456 goto the_end;
457 }
458
459 /* Find the previous frame's stack: */
460 switch (orc->sp_reg) {
461 case ORC_REG_SP:
462 sp = state->sp + orc->sp_offset;
463 break;
464
465 case ORC_REG_BP:
466 sp = state->bp + orc->sp_offset;
467 break;
468
469 case ORC_REG_SP_INDIRECT:
470 sp = state->sp + orc->sp_offset;
471 indirect = true;
472 break;
473
474 case ORC_REG_BP_INDIRECT:
475 sp = state->bp + orc->sp_offset;
476 indirect = true;
477 break;
478
479 case ORC_REG_R10:
480 if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
481 orc_warn_current("missing R10 value at %pB\n",
482 (void *)state->ip);
483 goto err;
484 }
485 break;
486
487 case ORC_REG_R13:
488 if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
489 orc_warn_current("missing R13 value at %pB\n",
490 (void *)state->ip);
491 goto err;
492 }
493 break;
494
495 case ORC_REG_DI:
496 if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
497 orc_warn_current("missing RDI value at %pB\n",
498 (void *)state->ip);
499 goto err;
500 }
501 break;
502
503 case ORC_REG_DX:
504 if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
505 orc_warn_current("missing DX value at %pB\n",
506 (void *)state->ip);
507 goto err;
508 }
509 break;
510
511 default:
512 orc_warn("unknown SP base reg %d at %pB\n",
513 orc->sp_reg, (void *)state->ip);
514 goto err;
515 }
516
517 if (indirect) {
518 if (!deref_stack_reg(state, sp, &sp))
519 goto err;
520 }
521
522 /* Find IP, SP and possibly regs: */
523 switch (orc->type) {
524 case ORC_TYPE_CALL:
525 ip_p = sp - sizeof(long);
526
527 if (!deref_stack_reg(state, ip_p, &state->ip))
528 goto err;
529
530 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
531 state->ip, (void *)ip_p);
532
533 state->sp = sp;
534 state->regs = NULL;
535 state->prev_regs = NULL;
536 state->signal = false;
537 break;
538
539 case ORC_TYPE_REGS:
540 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
541 orc_warn_current("can't access registers at %pB\n",
542 (void *)orig_ip);
543 goto err;
544 }
545
546 state->regs = (struct pt_regs *)sp;
547 state->prev_regs = NULL;
548 state->full_regs = true;
549 state->signal = true;
550 break;
551
552 case ORC_TYPE_REGS_IRET:
553 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
554 orc_warn_current("can't access iret registers at %pB\n",
555 (void *)orig_ip);
556 goto err;
557 }
558
559 if (state->full_regs)
560 state->prev_regs = state->regs;
561 state->regs = (void *)sp - IRET_FRAME_OFFSET;
562 state->full_regs = false;
563 state->signal = true;
564 break;
565
566 default:
567 orc_warn("unknown .orc_unwind entry type %d at %pB\n",
568 orc->type, (void *)orig_ip);
569 goto err;
570 }
571
572 /* Find BP: */
573 switch (orc->bp_reg) {
574 case ORC_REG_UNDEFINED:
575 if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
576 state->bp = tmp;
577 break;
578
579 case ORC_REG_PREV_SP:
580 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
581 goto err;
582 break;
583
584 case ORC_REG_BP:
585 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
586 goto err;
587 break;
588
589 default:
590 orc_warn("unknown BP base reg %d for ip %pB\n",
591 orc->bp_reg, (void *)orig_ip);
592 goto err;
593 }
594
595 /* Prevent a recursive loop due to bad ORC data: */
596 if (state->stack_info.type == prev_type &&
597 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
598 state->sp <= prev_sp) {
599 orc_warn_current("stack going in the wrong direction? at %pB\n",
600 (void *)orig_ip);
601 goto err;
602 }
603
604 preempt_enable();
605 return true;
606
607 err:
608 state->error = true;
609
610 the_end:
611 preempt_enable();
612 state->stack_info.type = STACK_TYPE_UNKNOWN;
613 return false;
614 }
615 EXPORT_SYMBOL_GPL(unwind_next_frame);
616
617 void __unwind_start(struct unwind_state *state, struct task_struct *task,
618 struct pt_regs *regs, unsigned long *first_frame)
619 {
620 memset(state, 0, sizeof(*state));
621 state->task = task;
622
623 if (!orc_init)
624 goto err;
625
626 /*
627 * Refuse to unwind the stack of a task while it's executing on another
628 * CPU. This check is racy, but that's ok: the unwinder has other
629 * checks to prevent it from going off the rails.
630 */
631 if (task_on_another_cpu(task))
632 goto err;
633
634 if (regs) {
635 if (user_mode(regs))
636 goto the_end;
637
638 state->ip = regs->ip;
639 state->sp = regs->sp;
640 state->bp = regs->bp;
641 state->regs = regs;
642 state->full_regs = true;
643 state->signal = true;
644
645 } else if (task == current) {
646 asm volatile("lea (%%rip), %0\n\t"
647 "mov %%rsp, %1\n\t"
648 "mov %%rbp, %2\n\t"
649 : "=r" (state->ip), "=r" (state->sp),
650 "=r" (state->bp));
651
652 } else {
653 struct inactive_task_frame *frame = (void *)task->thread.sp;
654
655 state->sp = task->thread.sp;
656 state->bp = READ_ONCE_NOCHECK(frame->bp);
657 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
658 }
659
660 if (get_stack_info((unsigned long *)state->sp, state->task,
661 &state->stack_info, &state->stack_mask)) {
662 /*
663 * We weren't on a valid stack. It's possible that
664 * we overflowed a valid stack into a guard page.
665 * See if the next page up is valid so that we can
666 * generate some kind of backtrace if this happens.
667 */
668 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
669 state->error = true;
670 if (get_stack_info(next_page, state->task, &state->stack_info,
671 &state->stack_mask))
672 return;
673 }
674
675 /*
676 * The caller can provide the address of the first frame directly
677 * (first_frame) or indirectly (regs->sp) to indicate which stack frame
678 * to start unwinding at. Skip ahead until we reach it.
679 */
680
681 /* When starting from regs, skip the regs frame: */
682 if (regs) {
683 unwind_next_frame(state);
684 return;
685 }
686
687 /* Otherwise, skip ahead to the user-specified starting frame: */
688 while (!unwind_done(state) &&
689 (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
690 state->sp < (unsigned long)first_frame))
691 unwind_next_frame(state);
692
693 return;
694
695 err:
696 state->error = true;
697 the_end:
698 state->stack_info.type = STACK_TYPE_UNKNOWN;
699 }
700 EXPORT_SYMBOL_GPL(__unwind_start);