]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.suse/stack-unwind
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.suse / stack-unwind
1 Subject: DWARF2 EH-frame based stack unwinding
2 From: jbeulich@novell.com
3 Patch-mainline: no
4
5 ---
6 Makefile | 5
7 arch/x86/Kconfig | 2
8 arch/x86/Makefile | 2
9 arch/x86/kernel/entry_32.S | 32
10 arch/x86/kernel/entry_64.S | 33
11 arch/x86/kernel/traps_32.c | 82 ++
12 arch/x86/kernel/traps_64.c | 85 ++
13 include/asm-generic/vmlinux.lds.h | 22
14 include/asm-x86/unwind.h | 159 ++++
15 include/linux/unwind.h | 63 +
16 kernel/Makefile | 1
17 kernel/unwind.c | 1303 ++++++++++++++++++++++++++++++++++++++
18 lib/Kconfig.debug | 18
19 13 files changed, 1804 insertions(+), 3 deletions(-)
20
21 --- a/Makefile
22 +++ b/Makefile
23 @@ -553,6 +553,11 @@ else
24 KBUILD_CFLAGS += -fomit-frame-pointer
25 endif
26
27 +ifdef CONFIG_UNWIND_INFO
28 +KBUILD_CFLAGS += -fasynchronous-unwind-tables
29 +LDFLAGS_vmlinux += --eh-frame-hdr
30 +endif
31 +
32 ifdef CONFIG_DEBUG_INFO
33 KBUILD_CFLAGS += -g
34 KBUILD_AFLAGS += -gdwarf-2
35 --- a/arch/x86/Kconfig
36 +++ b/arch/x86/Kconfig
37 @@ -373,7 +373,7 @@ config X86_RDC321X
38 config SCHED_NO_NO_OMIT_FRAME_POINTER
39 def_bool y
40 prompt "Single-depth WCHAN output"
41 - depends on X86_32
42 + depends on X86_32 && !STACK_UNWIND
43 help
44 Calculate simpler /proc/<PID>/wchan values. If this option
45 is disabled then wchan values will recurse back to the
46 --- a/arch/x86/Makefile
47 +++ b/arch/x86/Makefile
48 @@ -98,7 +98,9 @@ KBUILD_CFLAGS += -pipe
49 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
50 KBUILD_CFLAGS += -Wno-sign-compare
51 #
52 +ifneq ($(CONFIG_UNWIND_INFO),y)
53 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
54 +endif
55 # prevent gcc from generating any FP code by mistake
56 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
57
58 --- a/arch/x86/kernel/entry_32.S
59 +++ b/arch/x86/kernel/entry_32.S
60 @@ -1067,6 +1067,38 @@ ENTRY(spurious_interrupt_bug)
61 CFI_ENDPROC
62 END(spurious_interrupt_bug)
63
64 +#ifdef CONFIG_STACK_UNWIND
65 +ENTRY(arch_unwind_init_running)
66 + CFI_STARTPROC
67 + movl 4(%esp), %edx
68 + movl (%esp), %ecx
69 + leal 4(%esp), %eax
70 + movl %ebx, PT_EBX(%edx)
71 + xorl %ebx, %ebx
72 + movl %ebx, PT_ECX(%edx)
73 + movl %ebx, PT_EDX(%edx)
74 + movl %esi, PT_ESI(%edx)
75 + movl %edi, PT_EDI(%edx)
76 + movl %ebp, PT_EBP(%edx)
77 + movl %ebx, PT_EAX(%edx)
78 + movl $__USER_DS, PT_DS(%edx)
79 + movl $__USER_DS, PT_ES(%edx)
80 + movl $__KERNEL_PERCPU, PT_FS(%edx)
81 + movl %ebx, PT_ORIG_EAX(%edx)
82 + movl %ecx, PT_EIP(%edx)
83 + movl 12(%esp), %ecx
84 + movl $__KERNEL_CS, PT_CS(%edx)
85 + movl %ebx, PT_EFLAGS(%edx)
86 + movl %eax, PT_OLDESP(%edx)
87 + movl 8(%esp), %eax
88 + movl %ecx, 8(%esp)
89 + movl PT_EBX(%edx), %ebx
90 + movl $__KERNEL_DS, PT_OLDSS(%edx)
91 + jmpl *%eax
92 + CFI_ENDPROC
93 +ENDPROC(arch_unwind_init_running)
94 +#endif
95 +
96 ENTRY(kernel_thread_helper)
97 pushl $0 # fake return address for unwinder
98 CFI_STARTPROC
99 --- a/arch/x86/kernel/entry_64.S
100 +++ b/arch/x86/kernel/entry_64.S
101 @@ -1368,6 +1368,39 @@ KPROBE_ENTRY(ignore_sysret)
102 CFI_ENDPROC
103 ENDPROC(ignore_sysret)
104
105 +#ifdef CONFIG_STACK_UNWIND
106 +ENTRY(arch_unwind_init_running)
107 + CFI_STARTPROC
108 + movq %r15, R15(%rdi)
109 + movq %r14, R14(%rdi)
110 + xchgq %rsi, %rdx
111 + movq %r13, R13(%rdi)
112 + movq %r12, R12(%rdi)
113 + xorl %eax, %eax
114 + movq %rbp, RBP(%rdi)
115 + movq %rbx, RBX(%rdi)
116 + movq (%rsp), %rcx
117 + movq %rax, R11(%rdi)
118 + movq %rax, R10(%rdi)
119 + movq %rax, R9(%rdi)
120 + movq %rax, R8(%rdi)
121 + movq %rax, RAX(%rdi)
122 + movq %rax, RCX(%rdi)
123 + movq %rax, RDX(%rdi)
124 + movq %rax, RSI(%rdi)
125 + movq %rax, RDI(%rdi)
126 + movq %rax, ORIG_RAX(%rdi)
127 + movq %rcx, RIP(%rdi)
128 + leaq 8(%rsp), %rcx
129 + movq $__KERNEL_CS, CS(%rdi)
130 + movq %rax, EFLAGS(%rdi)
131 + movq %rcx, RSP(%rdi)
132 + movq $__KERNEL_DS, SS(%rdi)
133 + jmpq *%rdx
134 + CFI_ENDPROC
135 +ENDPROC(arch_unwind_init_running)
136 +#endif
137 +
138 #ifdef CONFIG_XEN
139 ENTRY(xen_hypervisor_callback)
140 zeroentry xen_do_hypervisor_callback
141 --- a/arch/x86/kernel/traps_32.c
142 +++ b/arch/x86/kernel/traps_32.c
143 @@ -85,6 +85,11 @@ gate_desc idt_table[256]
144 int panic_on_unrecovered_nmi;
145 int kstack_depth_to_print = 24;
146 static unsigned int code_bytes = 64;
147 +#ifdef CONFIG_STACK_UNWIND
148 +static int call_trace = 1;
149 +#else
150 +#define call_trace (-1)
151 +#endif
152 static int ignore_nmis;
153 static int die_counter;
154
155 @@ -155,6 +160,33 @@ print_context_stack(struct thread_info *
156 return bp;
157 }
158
159 +struct ops_and_data {
160 + const struct stacktrace_ops *ops;
161 + void *data;
162 +};
163 +
164 +static asmlinkage int
165 +dump_trace_unwind(struct unwind_frame_info *info, void *data)
166 +{
167 + struct ops_and_data *oad = (struct ops_and_data *)data;
168 + int n = 0;
169 + unsigned long sp = UNW_SP(info);
170 +
171 + if (arch_unw_user_mode(info))
172 + return -1;
173 + while (unwind(info) == 0 && UNW_PC(info)) {
174 + n++;
175 + oad->ops->address(oad->data, UNW_PC(info), 1);
176 + if (arch_unw_user_mode(info))
177 + break;
178 + if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
179 + && sp > UNW_SP(info))
180 + break;
181 + sp = UNW_SP(info);
182 + }
183 + return n;
184 +}
185 +
186 void dump_trace(struct task_struct *task, struct pt_regs *regs,
187 unsigned long *stack, unsigned long bp,
188 const struct stacktrace_ops *ops, void *data)
189 @@ -162,6 +194,40 @@ void dump_trace(struct task_struct *task
190 if (!task)
191 task = current;
192
193 + if (call_trace >= 0) {
194 + int unw_ret = 0;
195 + struct unwind_frame_info info;
196 + struct ops_and_data oad = { .ops = ops, .data = data };
197 +
198 + if (regs) {
199 + if (unwind_init_frame_info(&info, task, regs) == 0)
200 + unw_ret = dump_trace_unwind(&info, &oad);
201 + } else if (task == current)
202 + unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
203 + else {
204 + if (unwind_init_blocked(&info, task) == 0)
205 + unw_ret = dump_trace_unwind(&info, &oad);
206 + }
207 + if (unw_ret > 0) {
208 + if (call_trace == 1 && !arch_unw_user_mode(&info)) {
209 + ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
210 + UNW_PC(&info));
211 + if (UNW_SP(&info) >= PAGE_OFFSET) {
212 + ops->warning(data, "Leftover inexact backtrace:\n");
213 + stack = (void *)UNW_SP(&info);
214 + if (!stack)
215 + return;
216 + bp = UNW_FP(&info);
217 + } else
218 + ops->warning(data, "Full inexact backtrace again:\n");
219 + } else if (call_trace >= 1)
220 + return;
221 + else
222 + ops->warning(data, "Full inexact backtrace again:\n");
223 + } else
224 + ops->warning(data, "Inexact backtrace:\n");
225 + }
226 +
227 if (!stack) {
228 unsigned long dummy;
229 stack = &dummy;
230 @@ -1302,3 +1368,19 @@ static int __init code_bytes_setup(char
231 return 1;
232 }
233 __setup("code_bytes=", code_bytes_setup);
234 +
235 +#ifdef CONFIG_STACK_UNWIND
236 +static int __init call_trace_setup(char *s)
237 +{
238 + if (strcmp(s, "old") == 0)
239 + call_trace = -1;
240 + else if (strcmp(s, "both") == 0)
241 + call_trace = 0;
242 + else if (strcmp(s, "newfallback") == 0)
243 + call_trace = 1;
244 + else if (strcmp(s, "new") == 2)
245 + call_trace = 2;
246 + return 1;
247 +}
248 +__setup("call_trace=", call_trace_setup);
249 +#endif
250 --- a/arch/x86/kernel/traps_64.c
251 +++ b/arch/x86/kernel/traps_64.c
252 @@ -58,6 +58,11 @@
253 int panic_on_unrecovered_nmi;
254 int kstack_depth_to_print = 12;
255 static unsigned int code_bytes = 64;
256 +#ifdef CONFIG_STACK_UNWIND
257 +static int call_trace = 1;
258 +#else
259 +#define call_trace (-1)
260 +#endif
261 static int ignore_nmis;
262 static int die_counter;
263
264 @@ -162,6 +167,32 @@ static unsigned long *in_exception_stack
265 return NULL;
266 }
267
268 +struct ops_and_data {
269 + const struct stacktrace_ops *ops;
270 + void *data;
271 +};
272 +
273 +static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
274 +{
275 + struct ops_and_data *oad = (struct ops_and_data *)context;
276 + int n = 0;
277 + unsigned long sp = UNW_SP(info);
278 +
279 + if (arch_unw_user_mode(info))
280 + return -1;
281 + while (unwind(info) == 0 && UNW_PC(info)) {
282 + n++;
283 + oad->ops->address(oad->data, UNW_PC(info), 1);
284 + if (arch_unw_user_mode(info))
285 + break;
286 + if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
287 + && sp > UNW_SP(info))
288 + break;
289 + sp = UNW_SP(info);
290 + }
291 + return n;
292 +}
293 +
294 /*
295 * x86-64 can have up to three kernel stacks:
296 * process stack
297 @@ -226,6 +257,42 @@ void dump_trace(struct task_struct *task
298 if (!task)
299 task = current;
300
301 + if (call_trace >= 0) {
302 + int unw_ret = 0;
303 + struct unwind_frame_info info;
304 + struct ops_and_data oad = { .ops = ops, .data = data };
305 +
306 + if (regs) {
307 + if (unwind_init_frame_info(&info, task, regs) == 0)
308 + unw_ret = dump_trace_unwind(&info, &oad);
309 + } else if (task == current)
310 + unw_ret = unwind_init_running(&info, dump_trace_unwind, &oad);
311 + else {
312 + if (unwind_init_blocked(&info, task) == 0)
313 + unw_ret = dump_trace_unwind(&info, &oad);
314 + }
315 + if (unw_ret > 0) {
316 + if (call_trace == 1 && !arch_unw_user_mode(&info)) {
317 + ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
318 + UNW_PC(&info));
319 + if ((long)UNW_SP(&info) < 0) {
320 + ops->warning(data, "Leftover inexact backtrace:\n");
321 + stack = (unsigned long *)UNW_SP(&info);
322 + if (!stack) {
323 + put_cpu();
324 + return;
325 + }
326 + } else
327 + ops->warning(data, "Full inexact backtrace again:\n");
328 + } else if (call_trace >= 1) {
329 + put_cpu();
330 + return;
331 + } else
332 + ops->warning(data, "Full inexact backtrace again:\n");
333 + } else
334 + ops->warning(data, "Inexact backtrace:\n");
335 + }
336 +
337 if (!stack) {
338 unsigned long dummy;
339 stack = &dummy;
340 @@ -1214,3 +1281,21 @@ static int __init code_bytes_setup(char
341 return 1;
342 }
343 __setup("code_bytes=", code_bytes_setup);
344 +
345 +#ifdef CONFIG_STACK_UNWIND
346 +static int __init call_trace_setup(char *s)
347 +{
348 + if (!s)
349 + return -EINVAL;
350 + if (strcmp(s, "old") == 0)
351 + call_trace = -1;
352 + else if (strcmp(s, "both") == 0)
353 + call_trace = 0;
354 + else if (strcmp(s, "newfallback") == 0)
355 + call_trace = 1;
356 + else if (strcmp(s, "new") == 0)
357 + call_trace = 2;
358 + return 0;
359 +}
360 +early_param("call_trace", call_trace_setup);
361 +#endif
362 --- a/include/asm-generic/vmlinux.lds.h
363 +++ b/include/asm-generic/vmlinux.lds.h
364 @@ -196,6 +196,8 @@
365 MEM_KEEP(exit.rodata) \
366 } \
367 \
368 + EH_FRAME \
369 + \
370 /* Built-in module parameters. */ \
371 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
372 VMLINUX_SYMBOL(__start___param) = .; \
373 @@ -299,6 +301,26 @@
374 CPU_DISCARD(exit.text) \
375 MEM_DISCARD(exit.text)
376
377 +#ifdef CONFIG_STACK_UNWIND
378 +#define EH_FRAME \
379 + /* Unwind data binary search table */ \
380 + . = ALIGN(8); \
381 + .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \
382 + VMLINUX_SYMBOL(__start_unwind_hdr) = .; \
383 + *(.eh_frame_hdr) \
384 + VMLINUX_SYMBOL(__end_unwind_hdr) = .; \
385 + } \
386 + /* Unwind data */ \
387 + . = ALIGN(8); \
388 + .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \
389 + VMLINUX_SYMBOL(__start_unwind) = .; \
390 + *(.eh_frame) \
391 + VMLINUX_SYMBOL(__end_unwind) = .; \
392 + }
393 +#else
394 +#define EH_FRAME
395 +#endif
396 +
397 /* DWARF debug sections.
398 Symbols in the DWARF debugging sections are relative to
399 the beginning of the section so we begin them at 0. */
400 --- a/include/asm-x86/unwind.h
401 +++ b/include/asm-x86/unwind.h
402 @@ -1,6 +1,163 @@
403 #ifndef _ASM_X86_UNWIND_H
404 #define _ASM_X86_UNWIND_H
405
406 +/*
407 + * Copyright (C) 2002-2007 Novell, Inc.
408 + * Jan Beulich <jbeulich@novell.com>
409 + * This code is released under version 2 of the GNU GPL.
410 + */
411 +
412 +#ifdef CONFIG_STACK_UNWIND
413 +
414 +#include <linux/sched.h>
415 +#include <linux/uaccess.h>
416 +#include <asm/ptrace.h>
417 +
418 +struct unwind_frame_info
419 +{
420 + struct pt_regs regs;
421 + struct task_struct *task;
422 + unsigned call_frame:1;
423 +};
424 +
425 +#ifdef CONFIG_X86_64
426 +
427 +#include <asm/vsyscall.h>
428 +
429 +#define UNW_PC(frame) (frame)->regs.ip
430 +#define UNW_SP(frame) (frame)->regs.sp
431 +#ifdef CONFIG_FRAME_POINTER
432 +#define UNW_FP(frame) (frame)->regs.bp
433 +#define FRAME_RETADDR_OFFSET 8
434 +#define FRAME_LINK_OFFSET 0
435 +#define STACK_BOTTOM(tsk) (((tsk)->thread.sp0 - 1) & ~(THREAD_SIZE - 1))
436 +#define TSK_STACK_TOP(tsk) ((tsk)->thread.sp0)
437 +#endif
438 +/* Might need to account for the special exception and interrupt handling
439 + stacks here, since normally
440 + EXCEPTION_STACK_ORDER < THREAD_ORDER < IRQSTACK_ORDER,
441 + but the construct is needed only for getting across the stack switch to
442 + the interrupt stack - thus considering the IRQ stack itself is unnecessary,
443 + and the overhead of comparing against all exception handling stacks seems
444 + not desirable. */
445 +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
446 +
447 +#define UNW_REGISTER_INFO \
448 + PTREGS_INFO(ax), \
449 + PTREGS_INFO(dx), \
450 + PTREGS_INFO(cx), \
451 + PTREGS_INFO(bx), \
452 + PTREGS_INFO(si), \
453 + PTREGS_INFO(di), \
454 + PTREGS_INFO(bp), \
455 + PTREGS_INFO(sp), \
456 + PTREGS_INFO(r8), \
457 + PTREGS_INFO(r9), \
458 + PTREGS_INFO(r10), \
459 + PTREGS_INFO(r11), \
460 + PTREGS_INFO(r12), \
461 + PTREGS_INFO(r13), \
462 + PTREGS_INFO(r14), \
463 + PTREGS_INFO(r15), \
464 + PTREGS_INFO(ip)
465 +
466 +#else
467 +
468 +#include <asm/fixmap.h>
469 +
470 +#define UNW_PC(frame) (frame)->regs.ip
471 +#define UNW_SP(frame) (frame)->regs.sp
472 +#ifdef CONFIG_FRAME_POINTER
473 +#define UNW_FP(frame) (frame)->regs.bp
474 +#define FRAME_RETADDR_OFFSET 4
475 +#define FRAME_LINK_OFFSET 0
476 +#define STACK_BOTTOM(tsk) STACK_LIMIT((tsk)->thread.sp0)
477 +#define TSK_STACK_TOP(tsk) ((tsk)->thread.sp0)
478 +#else
479 +#define UNW_FP(frame) ((void)(frame), 0UL)
480 +#endif
481 +#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
482 +
483 +#define UNW_REGISTER_INFO \
484 + PTREGS_INFO(ax), \
485 + PTREGS_INFO(cx), \
486 + PTREGS_INFO(dx), \
487 + PTREGS_INFO(bx), \
488 + PTREGS_INFO(sp), \
489 + PTREGS_INFO(bp), \
490 + PTREGS_INFO(si), \
491 + PTREGS_INFO(di), \
492 + PTREGS_INFO(ip)
493 +
494 +#endif
495 +
496 +#define UNW_DEFAULT_RA(raItem, dataAlign) \
497 + ((raItem).where == Memory && \
498 + !((raItem).value * (dataAlign) + sizeof(void *)))
499 +
500 +static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
501 + /*const*/ struct pt_regs *regs)
502 +{
503 +#ifdef CONFIG_X86_64
504 + info->regs = *regs;
505 +#else
506 + if (user_mode_vm(regs))
507 + info->regs = *regs;
508 + else {
509 + memcpy(&info->regs, regs, offsetof(struct pt_regs, sp));
510 + info->regs.sp = (unsigned long)&regs->sp;
511 + info->regs.ss = __KERNEL_DS;
512 + }
513 +#endif
514 +}
515 +
516 +static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
517 +{
518 +#ifdef CONFIG_X86_64
519 + extern const char thread_return[];
520 +
521 + memset(&info->regs, 0, sizeof(info->regs));
522 + info->regs.ip = (unsigned long)thread_return;
523 + info->regs.cs = __KERNEL_CS;
524 + probe_kernel_address(info->task->thread.sp, info->regs.bp);
525 + info->regs.sp = info->task->thread.sp;
526 + info->regs.ss = __KERNEL_DS;
527 +#else
528 + memset(&info->regs, 0, sizeof(info->regs));
529 + info->regs.ip = info->task->thread.ip;
530 + info->regs.cs = __KERNEL_CS;
531 + probe_kernel_address(info->task->thread.sp, info->regs.bp);
532 + info->regs.sp = info->task->thread.sp;
533 + info->regs.ss = __KERNEL_DS;
534 + info->regs.ds = __USER_DS;
535 + info->regs.es = __USER_DS;
536 +#endif
537 +}
538 +
539 +extern asmlinkage int
540 +arch_unwind_init_running(struct unwind_frame_info *,
541 + asmlinkage int (*callback)(struct unwind_frame_info *,
542 + void *arg),
543 + void *arg);
544 +
545 +static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info)
546 +{
547 +#ifdef CONFIG_X86_64
548 + return user_mode(&info->regs)
549 + || (long)info->regs.ip >= 0
550 + || (info->regs.ip >= VSYSCALL_START && info->regs.ip < VSYSCALL_END)
551 + || (long)info->regs.sp >= 0;
552 +#else
553 + return user_mode_vm(&info->regs)
554 + || info->regs.ip < PAGE_OFFSET
555 + || (info->regs.ip >= __fix_to_virt(FIX_VDSO)
556 + && info->regs.ip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
557 + || info->regs.sp < PAGE_OFFSET;
558 +#endif
559 +}
560 +
561 +#else
562 +
563 #define UNW_PC(frame) ((void)(frame), 0UL)
564 #define UNW_SP(frame) ((void)(frame), 0UL)
565 #define UNW_FP(frame) ((void)(frame), 0UL)
566 @@ -10,4 +167,6 @@ static inline int arch_unw_user_mode(con
567 return 0;
568 }
569
570 +#endif
571 +
572 #endif /* _ASM_X86_UNWIND_H */
573 --- a/include/linux/unwind.h
574 +++ b/include/linux/unwind.h
575 @@ -14,6 +14,63 @@
576
577 struct module;
578
579 +#ifdef CONFIG_STACK_UNWIND
580 +
581 +#include <asm/unwind.h>
582 +
583 +#ifndef ARCH_UNWIND_SECTION_NAME
584 +#define ARCH_UNWIND_SECTION_NAME ".eh_frame"
585 +#endif
586 +
587 +/*
588 + * Initialize unwind support.
589 + */
590 +extern void unwind_init(void);
591 +extern void unwind_setup(void);
592 +
593 +#ifdef CONFIG_MODULES
594 +
595 +extern void *unwind_add_table(struct module *,
596 + const void *table_start,
597 + unsigned long table_size);
598 +
599 +extern void unwind_remove_table(void *handle, int init_only);
600 +
601 +#endif
602 +
603 +extern int unwind_init_frame_info(struct unwind_frame_info *,
604 + struct task_struct *,
605 + /*const*/ struct pt_regs *);
606 +
607 +/*
608 + * Prepare to unwind a blocked task.
609 + */
610 +extern int unwind_init_blocked(struct unwind_frame_info *,
611 + struct task_struct *);
612 +
613 +/*
614 + * Prepare to unwind the currently running thread.
615 + */
616 +extern int unwind_init_running(struct unwind_frame_info *,
617 + asmlinkage int (*callback)(struct unwind_frame_info *,
618 + void *arg),
619 + void *arg);
620 +
621 +/*
622 + * Unwind to previous to frame. Returns 0 if successful, negative
623 + * number in case of an error.
624 + */
625 +extern int unwind(struct unwind_frame_info *);
626 +
627 +/*
628 + * Unwind until the return pointer is in user-land (or until an error
629 + * occurs). Returns 0 if successful, negative number in case of
630 + * error.
631 + */
632 +extern int unwind_to_user(struct unwind_frame_info *);
633 +
634 +#else
635 +
636 struct unwind_frame_info {};
637
638 static inline void unwind_init(void) {}
639 @@ -28,12 +85,12 @@ static inline void *unwind_add_table(str
640 return NULL;
641 }
642
643 +#endif
644 +
645 static inline void unwind_remove_table(void *handle, int init_only)
646 {
647 }
648
649 -#endif
650 -
651 static inline int unwind_init_frame_info(struct unwind_frame_info *info,
652 struct task_struct *tsk,
653 const struct pt_regs *regs)
654 @@ -65,4 +122,6 @@ static inline int unwind_to_user(struct
655 return -ENOSYS;
656 }
657
658 +#endif
659 +
660 #endif /* _LINUX_UNWIND_H */
661 --- a/kernel/Makefile
662 +++ b/kernel/Makefile
663 @@ -46,6 +46,7 @@ obj-$(CONFIG_PROVE_LOCKING) += spinlock.
664 obj-$(CONFIG_UID16) += uid16.o
665 obj-$(CONFIG_MODULES) += module.o
666 obj-$(CONFIG_KALLSYMS) += kallsyms.o
667 +obj-$(CONFIG_STACK_UNWIND) += unwind.o
668 obj-$(CONFIG_PM) += power/
669 obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
670 obj-$(CONFIG_KEXEC) += kexec.o
671 --- /dev/null
672 +++ b/kernel/unwind.c
673 @@ -0,0 +1,1303 @@
674 +/*
675 + * Copyright (C) 2002-2006 Novell, Inc.
676 + * Jan Beulich <jbeulich@novell.com>
677 + * This code is released under version 2 of the GNU GPL.
678 + *
679 + * A simple API for unwinding kernel stacks. This is used for
680 + * debugging and error reporting purposes. The kernel doesn't need
681 + * full-blown stack unwinding with all the bells and whistles, so there
682 + * is not much point in implementing the full Dwarf2 unwind API.
683 + */
684 +
685 +#include <linux/unwind.h>
686 +#include <linux/module.h>
687 +#include <linux/bootmem.h>
688 +#include <linux/sort.h>
689 +#include <linux/stop_machine.h>
690 +#include <linux/uaccess.h>
691 +#include <asm/sections.h>
692 +#include <asm/unaligned.h>
693 +
694 +extern const char __start_unwind[], __end_unwind[];
695 +extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
696 +
697 +#define MAX_STACK_DEPTH 8
698 +
699 +#define EXTRA_INFO(f) { \
700 + BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
701 + % FIELD_SIZEOF(struct unwind_frame_info, f)) \
702 + + offsetof(struct unwind_frame_info, f) \
703 + / FIELD_SIZEOF(struct unwind_frame_info, f), \
704 + FIELD_SIZEOF(struct unwind_frame_info, f) \
705 + }
706 +#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
707 +
708 +static const struct {
709 + unsigned offs:BITS_PER_LONG / 2;
710 + unsigned width:BITS_PER_LONG / 2;
711 +} reg_info[] = {
712 + UNW_REGISTER_INFO
713 +};
714 +
715 +#undef PTREGS_INFO
716 +#undef EXTRA_INFO
717 +
718 +#ifndef REG_INVALID
719 +#define REG_INVALID(r) (reg_info[r].width == 0)
720 +#endif
721 +
722 +#define DW_CFA_nop 0x00
723 +#define DW_CFA_set_loc 0x01
724 +#define DW_CFA_advance_loc1 0x02
725 +#define DW_CFA_advance_loc2 0x03
726 +#define DW_CFA_advance_loc4 0x04
727 +#define DW_CFA_offset_extended 0x05
728 +#define DW_CFA_restore_extended 0x06
729 +#define DW_CFA_undefined 0x07
730 +#define DW_CFA_same_value 0x08
731 +#define DW_CFA_register 0x09
732 +#define DW_CFA_remember_state 0x0a
733 +#define DW_CFA_restore_state 0x0b
734 +#define DW_CFA_def_cfa 0x0c
735 +#define DW_CFA_def_cfa_register 0x0d
736 +#define DW_CFA_def_cfa_offset 0x0e
737 +#define DW_CFA_def_cfa_expression 0x0f
738 +#define DW_CFA_expression 0x10
739 +#define DW_CFA_offset_extended_sf 0x11
740 +#define DW_CFA_def_cfa_sf 0x12
741 +#define DW_CFA_def_cfa_offset_sf 0x13
742 +#define DW_CFA_val_offset 0x14
743 +#define DW_CFA_val_offset_sf 0x15
744 +#define DW_CFA_val_expression 0x16
745 +#define DW_CFA_lo_user 0x1c
746 +#define DW_CFA_GNU_window_save 0x2d
747 +#define DW_CFA_GNU_args_size 0x2e
748 +#define DW_CFA_GNU_negative_offset_extended 0x2f
749 +#define DW_CFA_hi_user 0x3f
750 +
751 +#define DW_EH_PE_FORM 0x07
752 +#define DW_EH_PE_native 0x00
753 +#define DW_EH_PE_leb128 0x01
754 +#define DW_EH_PE_data2 0x02
755 +#define DW_EH_PE_data4 0x03
756 +#define DW_EH_PE_data8 0x04
757 +#define DW_EH_PE_signed 0x08
758 +#define DW_EH_PE_ADJUST 0x70
759 +#define DW_EH_PE_abs 0x00
760 +#define DW_EH_PE_pcrel 0x10
761 +#define DW_EH_PE_textrel 0x20
762 +#define DW_EH_PE_datarel 0x30
763 +#define DW_EH_PE_funcrel 0x40
764 +#define DW_EH_PE_aligned 0x50
765 +#define DW_EH_PE_indirect 0x80
766 +#define DW_EH_PE_omit 0xff
767 +
768 +typedef unsigned long uleb128_t;
769 +typedef signed long sleb128_t;
770 +#define sleb128abs __builtin_labs
771 +
772 +static struct unwind_table {
773 + struct {
774 + unsigned long pc;
775 + unsigned long range;
776 + } core, init;
777 + const void *address;
778 + unsigned long size;
779 + const unsigned char *header;
780 + unsigned long hdrsz;
781 + struct unwind_table *link;
782 + const char *name;
783 +} root_table;
784 +
785 +struct unwind_item {
786 + enum item_location {
787 + Nowhere,
788 + Memory,
789 + Register,
790 + Value
791 + } where;
792 + uleb128_t value;
793 +};
794 +
795 +struct unwind_state {
796 + uleb128_t loc, org;
797 + const u8 *cieStart, *cieEnd;
798 + uleb128_t codeAlign;
799 + sleb128_t dataAlign;
800 + struct cfa {
801 + uleb128_t reg, offs;
802 + } cfa;
803 + struct unwind_item regs[ARRAY_SIZE(reg_info)];
804 + unsigned stackDepth:8;
805 + unsigned version:8;
806 + const u8 *label;
807 + const u8 *stack[MAX_STACK_DEPTH];
808 +};
809 +
810 +static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
811 +
812 +static unsigned unwind_debug;
813 +static int __init unwind_debug_setup(char *s)
814 +{
815 + unwind_debug = simple_strtoul(s, NULL, 0);
816 + return 1;
817 +}
818 +__setup("unwind_debug=", unwind_debug_setup);
819 +#define dprintk(lvl, fmt, args...) \
820 + ((void)(lvl > unwind_debug \
821 + || printk(KERN_DEBUG "unwind: " fmt "\n", ##args)))
822 +
823 +static struct unwind_table *find_table(unsigned long pc)
824 +{
825 + struct unwind_table *table;
826 +
827 + for (table = &root_table; table; table = table->link)
828 + if ((pc >= table->core.pc
829 + && pc < table->core.pc + table->core.range)
830 + || (pc >= table->init.pc
831 + && pc < table->init.pc + table->init.range))
832 + break;
833 +
834 + return table;
835 +}
836 +
837 +static unsigned long read_pointer(const u8 **pLoc,
838 + const void *end,
839 + signed ptrType,
840 + unsigned long text_base,
841 + unsigned long data_base);
842 +
843 +static void init_unwind_table(struct unwind_table *table,
844 + const char *name,
845 + const void *core_start,
846 + unsigned long core_size,
847 + const void *init_start,
848 + unsigned long init_size,
849 + const void *table_start,
850 + unsigned long table_size,
851 + const u8 *header_start,
852 + unsigned long header_size)
853 +{
854 + const u8 *ptr = header_start + 4;
855 + const u8 *end = header_start + header_size;
856 +
857 + table->core.pc = (unsigned long)core_start;
858 + table->core.range = core_size;
859 + table->init.pc = (unsigned long)init_start;
860 + table->init.range = init_size;
861 + table->address = table_start;
862 + table->size = table_size;
863 + /* See if the linker provided table looks valid. */
864 + if (header_size <= 4
865 + || header_start[0] != 1
866 + || (void *)read_pointer(&ptr, end, header_start[1], 0, 0)
867 + != table_start
868 + || !read_pointer(&ptr, end, header_start[2], 0, 0)
869 + || !read_pointer(&ptr, end, header_start[3], 0,
870 + (unsigned long)header_start)
871 + || !read_pointer(&ptr, end, header_start[3], 0,
872 + (unsigned long)header_start))
873 + header_start = NULL;
874 + table->hdrsz = header_size;
875 + smp_wmb();
876 + table->header = header_start;
877 + table->link = NULL;
878 + table->name = name;
879 +}
880 +
881 +void __init unwind_init(void)
882 +{
883 + init_unwind_table(&root_table, "kernel",
884 + _text, _end - _text,
885 + NULL, 0,
886 + __start_unwind, __end_unwind - __start_unwind,
887 + __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);
888 +}
889 +
890 +static const u32 bad_cie, not_fde;
891 +static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
892 +static signed fde_pointer_type(const u32 *cie);
893 +
894 +struct eh_frame_hdr_table_entry {
895 + unsigned long start, fde;
896 +};
897 +
898 +static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
899 +{
900 + const struct eh_frame_hdr_table_entry *e1 = p1;
901 + const struct eh_frame_hdr_table_entry *e2 = p2;
902 +
903 + return (e1->start > e2->start) - (e1->start < e2->start);
904 +}
905 +
906 +static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
907 +{
908 + struct eh_frame_hdr_table_entry *e1 = p1;
909 + struct eh_frame_hdr_table_entry *e2 = p2;
910 + unsigned long v;
911 +
912 + v = e1->start;
913 + e1->start = e2->start;
914 + e2->start = v;
915 + v = e1->fde;
916 + e1->fde = e2->fde;
917 + e2->fde = v;
918 +}
919 +
920 +static void __init setup_unwind_table(struct unwind_table *table,
921 + void *(*alloc)(unsigned long))
922 +{
923 + const u8 *ptr;
924 + unsigned long tableSize = table->size, hdrSize;
925 + unsigned n;
926 + const u32 *fde;
927 + struct {
928 + u8 version;
929 + u8 eh_frame_ptr_enc;
930 + u8 fde_count_enc;
931 + u8 table_enc;
932 + unsigned long eh_frame_ptr;
933 + unsigned int fde_count;
934 + struct eh_frame_hdr_table_entry table[];
935 + } __attribute__((__packed__)) *header;
936 +
937 + if (table->header)
938 + return;
939 +
940 + if (table->hdrsz)
941 + printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n",
942 + table->name);
943 +
944 + if (tableSize & (sizeof(*fde) - 1))
945 + return;
946 +
947 + for (fde = table->address, n = 0;
948 + tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
949 + tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
950 + const u32 *cie = cie_for_fde(fde, table);
951 + signed ptrType;
952 +
953 + if (cie == &not_fde)
954 + continue;
955 + if (cie == NULL
956 + || cie == &bad_cie
957 + || (ptrType = fde_pointer_type(cie)) < 0)
958 + return;
959 + ptr = (const u8 *)(fde + 2);
960 + if (!read_pointer(&ptr,
961 + (const u8 *)(fde + 1) + *fde,
962 + ptrType, 0, 0))
963 + return;
964 + ++n;
965 + }
966 +
967 + if (tableSize || !n)
968 + return;
969 +
970 + hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
971 + + 2 * n * sizeof(unsigned long);
972 + dprintk(2, "Binary lookup table size for %s: %lu bytes", table->name, hdrSize);
973 + header = alloc(hdrSize);
974 + if (!header)
975 + return;
976 + header->version = 1;
977 + header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native;
978 + header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4;
979 + header->table_enc = DW_EH_PE_abs|DW_EH_PE_native;
980 + put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
981 + BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
982 + % __alignof(typeof(header->fde_count)));
983 + header->fde_count = n;
984 +
985 + BUILD_BUG_ON(offsetof(typeof(*header), table)
986 + % __alignof(typeof(*header->table)));
987 + for (fde = table->address, tableSize = table->size, n = 0;
988 + tableSize;
989 + tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
990 + const u32 *cie = fde + 1 - fde[1] / sizeof(*fde);
991 +
992 + if (!fde[1])
993 + continue; /* this is a CIE */
994 + ptr = (const u8 *)(fde + 2);
995 + header->table[n].start = read_pointer(&ptr,
996 + (const u8 *)(fde + 1) + *fde,
997 + fde_pointer_type(cie), 0, 0);
998 + header->table[n].fde = (unsigned long)fde;
999 + ++n;
1000 + }
1001 + WARN_ON(n != header->fde_count);
1002 +
1003 + sort(header->table,
1004 + n,
1005 + sizeof(*header->table),
1006 + cmp_eh_frame_hdr_table_entries,
1007 + swap_eh_frame_hdr_table_entries);
1008 +
1009 + table->hdrsz = hdrSize;
1010 + smp_wmb();
1011 + table->header = (const void *)header;
1012 +}
1013 +
1014 +static void *__init balloc(unsigned long sz)
1015 +{
1016 + return __alloc_bootmem_nopanic(sz,
1017 + sizeof(unsigned int),
1018 + __pa(MAX_DMA_ADDRESS));
1019 +}
1020 +
1021 +void __init unwind_setup(void)
1022 +{
1023 + setup_unwind_table(&root_table, balloc);
1024 +}
1025 +
1026 +#ifdef CONFIG_MODULES
1027 +
1028 +static struct unwind_table *last_table;
1029 +
1030 +/* Must be called with module_mutex held. */
1031 +void *unwind_add_table(struct module *module,
1032 + const void *table_start,
1033 + unsigned long table_size)
1034 +{
1035 + struct unwind_table *table;
1036 +
1037 + if (table_size <= 0)
1038 + return NULL;
1039 +
1040 + table = kmalloc(sizeof(*table), GFP_KERNEL);
1041 + if (!table)
1042 + return NULL;
1043 +
1044 + init_unwind_table(table, module->name,
1045 + module->module_core, module->core_size,
1046 + module->module_init, module->init_size,
1047 + table_start, table_size,
1048 + NULL, 0);
1049 +
1050 + if (last_table)
1051 + last_table->link = table;
1052 + else
1053 + root_table.link = table;
1054 + last_table = table;
1055 +
1056 + return table;
1057 +}
1058 +
1059 +struct unlink_table_info
1060 +{
1061 + struct unwind_table *table;
1062 + int init_only;
1063 +};
1064 +
1065 +static int unlink_table(void *arg)
1066 +{
1067 + struct unlink_table_info *info = arg;
1068 + struct unwind_table *table = info->table, *prev;
1069 +
1070 + for (prev = &root_table; prev->link && prev->link != table; prev = prev->link)
1071 + ;
1072 +
1073 + if (prev->link) {
1074 + if (info->init_only) {
1075 + table->init.pc = 0;
1076 + table->init.range = 0;
1077 + info->table = NULL;
1078 + } else {
1079 + prev->link = table->link;
1080 + if (!prev->link)
1081 + last_table = prev;
1082 + }
1083 + } else
1084 + info->table = NULL;
1085 +
1086 + return 0;
1087 +}
1088 +
1089 +/* Must be called with module_mutex held. */
1090 +void unwind_remove_table(void *handle, int init_only)
1091 +{
1092 + struct unwind_table *table = handle;
1093 + struct unlink_table_info info;
1094 +
1095 + if (!table || table == &root_table)
1096 + return;
1097 +
1098 + if (init_only && table == last_table) {
1099 + table->init.pc = 0;
1100 + table->init.range = 0;
1101 + return;
1102 + }
1103 +
1104 + info.table = table;
1105 + info.init_only = init_only;
1106 + stop_machine(unlink_table, &info, NULL);
1107 +
1108 + if (info.table)
1109 + kfree(table);
1110 +}
1111 +
1112 +#endif /* CONFIG_MODULES */
1113 +
1114 +static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
1115 +{
1116 + const u8 *cur = *pcur;
1117 + uleb128_t value;
1118 + unsigned shift;
1119 +
1120 + for (shift = 0, value = 0; cur < end; shift += 7) {
1121 + if (shift + 7 > 8 * sizeof(value)
1122 + && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
1123 + cur = end + 1;
1124 + break;
1125 + }
1126 + value |= (uleb128_t)(*cur & 0x7f) << shift;
1127 + if (!(*cur++ & 0x80))
1128 + break;
1129 + }
1130 + *pcur = cur;
1131 +
1132 + return value;
1133 +}
1134 +
1135 +static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
1136 +{
1137 + const u8 *cur = *pcur;
1138 + sleb128_t value;
1139 + unsigned shift;
1140 +
1141 + for (shift = 0, value = 0; cur < end; shift += 7) {
1142 + if (shift + 7 > 8 * sizeof(value)
1143 + && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
1144 + cur = end + 1;
1145 + break;
1146 + }
1147 + value |= (sleb128_t)(*cur & 0x7f) << shift;
1148 + if (!(*cur & 0x80)) {
1149 + value |= -(*cur++ & 0x40) << shift;
1150 + break;
1151 + }
1152 + }
1153 + *pcur = cur;
1154 +
1155 + return value;
1156 +}
1157 +
1158 +static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
1159 +{
1160 + const u32 *cie;
1161 +
1162 + if (!*fde || (*fde & (sizeof(*fde) - 1)))
1163 + return &bad_cie;
1164 + if (!fde[1])
1165 + return &not_fde; /* this is a CIE */
1166 + if ((fde[1] & (sizeof(*fde) - 1))
1167 + || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address)
1168 + return NULL; /* this is not a valid FDE */
1169 + cie = fde + 1 - fde[1] / sizeof(*fde);
1170 + if (*cie <= sizeof(*cie) + 4
1171 + || *cie >= fde[1] - sizeof(*fde)
1172 + || (*cie & (sizeof(*cie) - 1))
1173 + || cie[1])
1174 + return NULL; /* this is not a (valid) CIE */
1175 + return cie;
1176 +}
1177 +
1178 +static unsigned long read_pointer(const u8 **pLoc,
1179 + const void *end,
1180 + signed ptrType,
1181 + unsigned long text_base,
1182 + unsigned long data_base)
1183 +{
1184 + unsigned long value = 0;
1185 + union {
1186 + const u8 *p8;
1187 + const u16 *p16u;
1188 + const s16 *p16s;
1189 + const u32 *p32u;
1190 + const s32 *p32s;
1191 + const unsigned long *pul;
1192 + } ptr;
1193 +
1194 + if (ptrType < 0 || ptrType == DW_EH_PE_omit) {
1195 + dprintk(1, "Invalid pointer encoding %02X (%p,%p).", ptrType, *pLoc, end);
1196 + return 0;
1197 + }
1198 + ptr.p8 = *pLoc;
1199 + switch (ptrType & DW_EH_PE_FORM) {
1200 + case DW_EH_PE_data2:
1201 + if (end < (const void *)(ptr.p16u + 1)) {
1202 + dprintk(1, "Data16 overrun (%p,%p).", ptr.p8, end);
1203 + return 0;
1204 + }
1205 + if (ptrType & DW_EH_PE_signed)
1206 + value = get_unaligned(ptr.p16s++);
1207 + else
1208 + value = get_unaligned(ptr.p16u++);
1209 + break;
1210 + case DW_EH_PE_data4:
1211 +#ifdef CONFIG_64BIT
1212 + if (end < (const void *)(ptr.p32u + 1)) {
1213 + dprintk(1, "Data32 overrun (%p,%p).", ptr.p8, end);
1214 + return 0;
1215 + }
1216 + if (ptrType & DW_EH_PE_signed)
1217 + value = get_unaligned(ptr.p32s++);
1218 + else
1219 + value = get_unaligned(ptr.p32u++);
1220 + break;
1221 + case DW_EH_PE_data8:
1222 + BUILD_BUG_ON(sizeof(u64) != sizeof(value));
1223 +#else
1224 + BUILD_BUG_ON(sizeof(u32) != sizeof(value));
1225 +#endif
1226 + case DW_EH_PE_native:
1227 + if (end < (const void *)(ptr.pul + 1)) {
1228 + dprintk(1, "DataUL overrun (%p,%p).", ptr.p8, end);
1229 + return 0;
1230 + }
1231 + value = get_unaligned(ptr.pul++);
1232 + break;
1233 + case DW_EH_PE_leb128:
1234 + BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
1235 + value = ptrType & DW_EH_PE_signed
1236 + ? get_sleb128(&ptr.p8, end)
1237 + : get_uleb128(&ptr.p8, end);
1238 + if ((const void *)ptr.p8 > end) {
1239 + dprintk(1, "DataLEB overrun (%p,%p).", ptr.p8, end);
1240 + return 0;
1241 + }
1242 + break;
1243 + default:
1244 + dprintk(2, "Cannot decode pointer type %02X (%p,%p).",
1245 + ptrType, ptr.p8, end);
1246 + return 0;
1247 + }
1248 + switch (ptrType & DW_EH_PE_ADJUST) {
1249 + case DW_EH_PE_abs:
1250 + break;
1251 + case DW_EH_PE_pcrel:
1252 + value += (unsigned long)*pLoc;
1253 + break;
1254 + case DW_EH_PE_textrel:
1255 + if (likely(text_base)) {
1256 + value += text_base;
1257 + break;
1258 + }
1259 + dprintk(2, "Text-relative encoding %02X (%p,%p), but zero text base.",
1260 + ptrType, *pLoc, end);
1261 + return 0;
1262 + case DW_EH_PE_datarel:
1263 + if (likely(data_base)) {
1264 + value += data_base;
1265 + break;
1266 + }
1267 + dprintk(2, "Data-relative encoding %02X (%p,%p), but zero data base.",
1268 + ptrType, *pLoc, end);
1269 + return 0;
1270 + default:
1271 + dprintk(2, "Cannot adjust pointer type %02X (%p,%p).",
1272 + ptrType, *pLoc, end);
1273 + return 0;
1274 + }
1275 + if ((ptrType & DW_EH_PE_indirect)
1276 + && probe_kernel_address(value, value)) {
1277 + dprintk(1, "Cannot read indirect value %lx (%p,%p).",
1278 + value, *pLoc, end);
1279 + return 0;
1280 + }
1281 + *pLoc = ptr.p8;
1282 +
1283 + return value;
1284 +}
1285 +
1286 +static signed fde_pointer_type(const u32 *cie)
1287 +{
1288 + const u8 *ptr = (const u8 *)(cie + 2);
1289 + unsigned version = *ptr;
1290 +
1291 + if (version != 1)
1292 + return -1; /* unsupported */
1293 + if (*++ptr) {
1294 + const char *aug;
1295 + const u8 *end = (const u8 *)(cie + 1) + *cie;
1296 + uleb128_t len;
1297 +
1298 + /* check if augmentation size is first (and thus present) */
1299 + if (*ptr != 'z')
1300 + return -1;
1301 + /* check if augmentation string is nul-terminated */
1302 + if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
1303 + return -1;
1304 + ++ptr; /* skip terminator */
1305 + get_uleb128(&ptr, end); /* skip code alignment */
1306 + get_sleb128(&ptr, end); /* skip data alignment */
1307 + /* skip return address column */
1308 + version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
1309 + len = get_uleb128(&ptr, end); /* augmentation length */
1310 + if (ptr + len < ptr || ptr + len > end)
1311 + return -1;
1312 + end = ptr + len;
1313 + while (*++aug) {
1314 + if (ptr >= end)
1315 + return -1;
1316 + switch (*aug) {
1317 + case 'L':
1318 + ++ptr;
1319 + break;
1320 + case 'P': {
1321 + signed ptrType = *ptr++;
1322 +
1323 + if (!read_pointer(&ptr, end, ptrType, 0, 0)
1324 + || ptr > end)
1325 + return -1;
1326 + }
1327 + break;
1328 + case 'R':
1329 + return *ptr;
1330 + default:
1331 + return -1;
1332 + }
1333 + }
1334 + }
1335 + return DW_EH_PE_native|DW_EH_PE_abs;
1336 +}
1337 +
1338 +static int advance_loc(unsigned long delta, struct unwind_state *state)
1339 +{
1340 + state->loc += delta * state->codeAlign;
1341 +
1342 + return delta > 0;
1343 +}
1344 +
1345 +static void set_rule(uleb128_t reg,
1346 + enum item_location where,
1347 + uleb128_t value,
1348 + struct unwind_state *state)
1349 +{
1350 + if (reg < ARRAY_SIZE(state->regs)) {
1351 + state->regs[reg].where = where;
1352 + state->regs[reg].value = value;
1353 + }
1354 +}
1355 +
1356 +static int processCFI(const u8 *start,
1357 + const u8 *end,
1358 + unsigned long targetLoc,
1359 + signed ptrType,
1360 + struct unwind_state *state)
1361 +{
1362 + union {
1363 + const u8 *p8;
1364 + const u16 *p16;
1365 + const u32 *p32;
1366 + } ptr;
1367 + int result = 1;
1368 +
1369 + if (start != state->cieStart) {
1370 + state->loc = state->org;
1371 + result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
1372 + if (targetLoc == 0 && state->label == NULL)
1373 + return result;
1374 + }
1375 + for (ptr.p8 = start; result && ptr.p8 < end; ) {
1376 + switch (*ptr.p8 >> 6) {
1377 + uleb128_t value;
1378 +
1379 + case 0:
1380 + switch (*ptr.p8++) {
1381 + case DW_CFA_nop:
1382 + break;
1383 + case DW_CFA_set_loc:
1384 + state->loc = read_pointer(&ptr.p8, end, ptrType, 0, 0);
1385 + if (state->loc == 0)
1386 + result = 0;
1387 + break;
1388 + case DW_CFA_advance_loc1:
1389 + result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
1390 + break;
1391 + case DW_CFA_advance_loc2:
1392 + result = ptr.p8 <= end + 2
1393 + && advance_loc(*ptr.p16++, state);
1394 + break;
1395 + case DW_CFA_advance_loc4:
1396 + result = ptr.p8 <= end + 4
1397 + && advance_loc(*ptr.p32++, state);
1398 + break;
1399 + case DW_CFA_offset_extended:
1400 + value = get_uleb128(&ptr.p8, end);
1401 + set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
1402 + break;
1403 + case DW_CFA_val_offset:
1404 + value = get_uleb128(&ptr.p8, end);
1405 + set_rule(value, Value, get_uleb128(&ptr.p8, end), state);
1406 + break;
1407 + case DW_CFA_offset_extended_sf:
1408 + value = get_uleb128(&ptr.p8, end);
1409 + set_rule(value, Memory, get_sleb128(&ptr.p8, end), state);
1410 + break;
1411 + case DW_CFA_val_offset_sf:
1412 + value = get_uleb128(&ptr.p8, end);
1413 + set_rule(value, Value, get_sleb128(&ptr.p8, end), state);
1414 + break;
1415 + case DW_CFA_restore_extended:
1416 + case DW_CFA_undefined:
1417 + case DW_CFA_same_value:
1418 + set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state);
1419 + break;
1420 + case DW_CFA_register:
1421 + value = get_uleb128(&ptr.p8, end);
1422 + set_rule(value,
1423 + Register,
1424 + get_uleb128(&ptr.p8, end), state);
1425 + break;
1426 + case DW_CFA_remember_state:
1427 + if (ptr.p8 == state->label) {
1428 + state->label = NULL;
1429 + return 1;
1430 + }
1431 + if (state->stackDepth >= MAX_STACK_DEPTH) {
1432 + dprintk(1, "State stack overflow (%p,%p).", ptr.p8, end);
1433 + return 0;
1434 + }
1435 + state->stack[state->stackDepth++] = ptr.p8;
1436 + break;
1437 + case DW_CFA_restore_state:
1438 + if (state->stackDepth) {
1439 + const uleb128_t loc = state->loc;
1440 + const u8 *label = state->label;
1441 +
1442 + state->label = state->stack[state->stackDepth - 1];
1443 + memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
1444 + memset(state->regs, 0, sizeof(state->regs));
1445 + state->stackDepth = 0;
1446 + result = processCFI(start, end, 0, ptrType, state);
1447 + state->loc = loc;
1448 + state->label = label;
1449 + } else {
1450 + dprintk(1, "State stack underflow (%p,%p).", ptr.p8, end);
1451 + return 0;
1452 + }
1453 + break;
1454 + case DW_CFA_def_cfa:
1455 + state->cfa.reg = get_uleb128(&ptr.p8, end);
1456 + /*nobreak*/
1457 + case DW_CFA_def_cfa_offset:
1458 + state->cfa.offs = get_uleb128(&ptr.p8, end);
1459 + break;
1460 + case DW_CFA_def_cfa_sf:
1461 + state->cfa.reg = get_uleb128(&ptr.p8, end);
1462 + /*nobreak*/
1463 + case DW_CFA_def_cfa_offset_sf:
1464 + state->cfa.offs = get_sleb128(&ptr.p8, end)
1465 + * state->dataAlign;
1466 + break;
1467 + case DW_CFA_def_cfa_register:
1468 + state->cfa.reg = get_uleb128(&ptr.p8, end);
1469 + break;
1470 + /*todo case DW_CFA_def_cfa_expression: */
1471 + /*todo case DW_CFA_expression: */
1472 + /*todo case DW_CFA_val_expression: */
1473 + case DW_CFA_GNU_args_size:
1474 + get_uleb128(&ptr.p8, end);
1475 + break;
1476 + case DW_CFA_GNU_negative_offset_extended:
1477 + value = get_uleb128(&ptr.p8, end);
1478 + set_rule(value,
1479 + Memory,
1480 + (uleb128_t)0 - get_uleb128(&ptr.p8, end), state);
1481 + break;
1482 + case DW_CFA_GNU_window_save:
1483 + default:
1484 + dprintk(1, "Unrecognized CFI op %02X (%p,%p).", ptr.p8[-1], ptr.p8 - 1, end);
1485 + result = 0;
1486 + break;
1487 + }
1488 + break;
1489 + case 1:
1490 + result = advance_loc(*ptr.p8++ & 0x3f, state);
1491 + break;
1492 + case 2:
1493 + value = *ptr.p8++ & 0x3f;
1494 + set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
1495 + break;
1496 + case 3:
1497 + set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
1498 + break;
1499 + }
1500 + if (ptr.p8 > end) {
1501 + dprintk(1, "Data overrun (%p,%p).", ptr.p8, end);
1502 + result = 0;
1503 + }
1504 + if (result && targetLoc != 0 && targetLoc < state->loc)
1505 + return 1;
1506 + }
1507 +
1508 + if (result && ptr.p8 < end)
1509 + dprintk(1, "Data underrun (%p,%p).", ptr.p8, end);
1510 +
1511 + return result
1512 + && ptr.p8 == end
1513 + && (targetLoc == 0
1514 + || (/*todo While in theory this should apply, gcc in practice omits
1515 + everything past the function prolog, and hence the location
1516 + never reaches the end of the function.
1517 + targetLoc < state->loc &&*/ state->label == NULL));
1518 +}
1519 +
1520 +/* Unwind to previous to frame. Returns 0 if successful, negative
1521 + * number in case of an error. */
1522 +int unwind(struct unwind_frame_info *frame)
1523 +{
1524 +#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
1525 + const u32 *fde = NULL, *cie = NULL;
1526 + const u8 *ptr = NULL, *end = NULL;
1527 + unsigned long pc = UNW_PC(frame) - frame->call_frame, sp;
1528 + unsigned long startLoc = 0, endLoc = 0, cfa;
1529 + unsigned i;
1530 + signed ptrType = -1;
1531 + uleb128_t retAddrReg = 0;
1532 + const struct unwind_table *table;
1533 + struct unwind_state state;
1534 +
1535 + if (UNW_PC(frame) == 0)
1536 + return -EINVAL;
1537 + if ((table = find_table(pc)) != NULL
1538 + && !(table->size & (sizeof(*fde) - 1))) {
1539 + const u8 *hdr = table->header;
1540 + unsigned long tableSize;
1541 +
1542 + smp_rmb();
1543 + if (hdr && hdr[0] == 1) {
1544 + switch (hdr[3] & DW_EH_PE_FORM) {
1545 + case DW_EH_PE_native: tableSize = sizeof(unsigned long); break;
1546 + case DW_EH_PE_data2: tableSize = 2; break;
1547 + case DW_EH_PE_data4: tableSize = 4; break;
1548 + case DW_EH_PE_data8: tableSize = 8; break;
1549 + default: tableSize = 0; break;
1550 + }
1551 + ptr = hdr + 4;
1552 + end = hdr + table->hdrsz;
1553 + if (tableSize
1554 + && read_pointer(&ptr, end, hdr[1], 0, 0)
1555 + == (unsigned long)table->address
1556 + && (i = read_pointer(&ptr, end, hdr[2], 0, 0)) > 0
1557 + && i == (end - ptr) / (2 * tableSize)
1558 + && !((end - ptr) % (2 * tableSize))) {
1559 + do {
1560 + const u8 *cur = ptr + (i / 2) * (2 * tableSize);
1561 +
1562 + startLoc = read_pointer(&cur,
1563 + cur + tableSize,
1564 + hdr[3], 0,
1565 + (unsigned long)hdr);
1566 + if (pc < startLoc)
1567 + i /= 2;
1568 + else {
1569 + ptr = cur - tableSize;
1570 + i = (i + 1) / 2;
1571 + }
1572 + } while (startLoc && i > 1);
1573 + if (i == 1
1574 + && (startLoc = read_pointer(&ptr,
1575 + ptr + tableSize,
1576 + hdr[3], 0,
1577 + (unsigned long)hdr)) != 0
1578 + && pc >= startLoc)
1579 + fde = (void *)read_pointer(&ptr,
1580 + ptr + tableSize,
1581 + hdr[3], 0,
1582 + (unsigned long)hdr);
1583 + }
1584 + }
1585 + if (hdr && !fde)
1586 + dprintk(3, "Binary lookup for %lx failed.", pc);
1587 +
1588 + if (fde != NULL) {
1589 + cie = cie_for_fde(fde, table);
1590 + ptr = (const u8 *)(fde + 2);
1591 + if (cie != NULL
1592 + && cie != &bad_cie
1593 + && cie != &not_fde
1594 + && (ptrType = fde_pointer_type(cie)) >= 0
1595 + && read_pointer(&ptr,
1596 + (const u8 *)(fde + 1) + *fde,
1597 + ptrType, 0, 0) == startLoc) {
1598 + if (!(ptrType & DW_EH_PE_indirect))
1599 + ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
1600 + endLoc = startLoc
1601 + + read_pointer(&ptr,
1602 + (const u8 *)(fde + 1) + *fde,
1603 + ptrType, 0, 0);
1604 + if (pc >= endLoc)
1605 + fde = NULL;
1606 + } else
1607 + fde = NULL;
1608 + if (!fde)
1609 + dprintk(1, "Binary lookup result for %lx discarded.", pc);
1610 + }
1611 + if (fde == NULL) {
1612 + for (fde = table->address, tableSize = table->size;
1613 + cie = NULL, tableSize > sizeof(*fde)
1614 + && tableSize - sizeof(*fde) >= *fde;
1615 + tableSize -= sizeof(*fde) + *fde,
1616 + fde += 1 + *fde / sizeof(*fde)) {
1617 + cie = cie_for_fde(fde, table);
1618 + if (cie == &bad_cie) {
1619 + cie = NULL;
1620 + break;
1621 + }
1622 + if (cie == NULL
1623 + || cie == &not_fde
1624 + || (ptrType = fde_pointer_type(cie)) < 0)
1625 + continue;
1626 + ptr = (const u8 *)(fde + 2);
1627 + startLoc = read_pointer(&ptr,
1628 + (const u8 *)(fde + 1) + *fde,
1629 + ptrType, 0, 0);
1630 + if (!startLoc)
1631 + continue;
1632 + if (!(ptrType & DW_EH_PE_indirect))
1633 + ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
1634 + endLoc = startLoc
1635 + + read_pointer(&ptr,
1636 + (const u8 *)(fde + 1) + *fde,
1637 + ptrType, 0, 0);
1638 + if (pc >= startLoc && pc < endLoc)
1639 + break;
1640 + }
1641 + if (!fde)
1642 + dprintk(3, "Linear lookup for %lx failed.", pc);
1643 + }
1644 + }
1645 + if (cie != NULL) {
1646 + memset(&state, 0, sizeof(state));
1647 + state.cieEnd = ptr; /* keep here temporarily */
1648 + ptr = (const u8 *)(cie + 2);
1649 + end = (const u8 *)(cie + 1) + *cie;
1650 + frame->call_frame = 1;
1651 + if ((state.version = *ptr) != 1)
1652 + cie = NULL; /* unsupported version */
1653 + else if (*++ptr) {
1654 + /* check if augmentation size is first (and thus present) */
1655 + if (*ptr == 'z') {
1656 + while (++ptr < end && *ptr) {
1657 + switch (*ptr) {
1658 + /* check for ignorable (or already handled)
1659 + * nul-terminated augmentation string */
1660 + case 'L':
1661 + case 'P':
1662 + case 'R':
1663 + continue;
1664 + case 'S':
1665 + frame->call_frame = 0;
1666 + continue;
1667 + default:
1668 + break;
1669 + }
1670 + break;
1671 + }
1672 + }
1673 + if (ptr >= end || *ptr)
1674 + cie = NULL;
1675 + }
1676 + if (!cie)
1677 + dprintk(1, "CIE unusable (%p,%p).", ptr, end);
1678 + ++ptr;
1679 + }
1680 + if (cie != NULL) {
1681 + /* get code aligment factor */
1682 + state.codeAlign = get_uleb128(&ptr, end);
1683 + /* get data aligment factor */
1684 + state.dataAlign = get_sleb128(&ptr, end);
1685 + if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
1686 + cie = NULL;
1687 + else if (UNW_PC(frame) % state.codeAlign
1688 + || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
1689 + dprintk(1, "Input pointer(s) misaligned (%lx,%lx).",
1690 + UNW_PC(frame), UNW_SP(frame));
1691 + return -EPERM;
1692 + } else {
1693 + retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
1694 + /* skip augmentation */
1695 + if (((const char *)(cie + 2))[1] == 'z') {
1696 + uleb128_t augSize = get_uleb128(&ptr, end);
1697 +
1698 + ptr += augSize;
1699 + }
1700 + if (ptr > end
1701 + || retAddrReg >= ARRAY_SIZE(reg_info)
1702 + || REG_INVALID(retAddrReg)
1703 + || reg_info[retAddrReg].width != sizeof(unsigned long))
1704 + cie = NULL;
1705 + }
1706 + if (!cie)
1707 + dprintk(1, "CIE validation failed (%p,%p).", ptr, end);
1708 + }
1709 + if (cie != NULL) {
1710 + state.cieStart = ptr;
1711 + ptr = state.cieEnd;
1712 + state.cieEnd = end;
1713 + end = (const u8 *)(fde + 1) + *fde;
1714 + /* skip augmentation */
1715 + if (((const char *)(cie + 2))[1] == 'z') {
1716 + uleb128_t augSize = get_uleb128(&ptr, end);
1717 +
1718 + if ((ptr += augSize) > end)
1719 + fde = NULL;
1720 + }
1721 + if (!fde)
1722 + dprintk(1, "FDE validation failed (%p,%p).", ptr, end);
1723 + }
1724 + if (cie == NULL || fde == NULL) {
1725 +#ifdef CONFIG_FRAME_POINTER
1726 + unsigned long top, bottom;
1727 +
1728 + if ((UNW_SP(frame) | UNW_FP(frame)) % sizeof(unsigned long))
1729 + return -EPERM;
1730 + top = TSK_STACK_TOP(frame->task);
1731 + bottom = STACK_BOTTOM(frame->task);
1732 +# if FRAME_RETADDR_OFFSET < 0
1733 + if (UNW_SP(frame) < top
1734 + && UNW_FP(frame) <= UNW_SP(frame)
1735 + && bottom < UNW_FP(frame)
1736 +# else
1737 + if (UNW_SP(frame) > top
1738 + && UNW_FP(frame) >= UNW_SP(frame)
1739 + && bottom > UNW_FP(frame)
1740 +# endif
1741 + && !((UNW_SP(frame) | UNW_FP(frame))
1742 + & (sizeof(unsigned long) - 1))) {
1743 + unsigned long link;
1744 +
1745 + if (!probe_kernel_address(UNW_FP(frame) + FRAME_LINK_OFFSET,
1746 + link)
1747 +# if FRAME_RETADDR_OFFSET < 0
1748 + && link > bottom && link < UNW_FP(frame)
1749 +# else
1750 + && link > UNW_FP(frame) && link < bottom
1751 +# endif
1752 + && !(link & (sizeof(link) - 1))
1753 + && !probe_kernel_address(UNW_FP(frame) + FRAME_RETADDR_OFFSET,
1754 + UNW_PC(frame))) {
1755 + UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
1756 +# if FRAME_RETADDR_OFFSET < 0
1757 + -
1758 +# else
1759 + +
1760 +# endif
1761 + sizeof(UNW_PC(frame));
1762 + UNW_FP(frame) = link;
1763 + return 0;
1764 + }
1765 + }
1766 +#endif
1767 + return -ENXIO;
1768 + }
1769 + state.org = startLoc;
1770 + memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
1771 + /* process instructions */
1772 + if (!processCFI(ptr, end, pc, ptrType, &state)
1773 + || state.loc > endLoc
1774 + || state.regs[retAddrReg].where == Nowhere
1775 + || state.cfa.reg >= ARRAY_SIZE(reg_info)
1776 + || reg_info[state.cfa.reg].width != sizeof(unsigned long)
1777 + || FRAME_REG(state.cfa.reg, unsigned long) % sizeof(unsigned long)
1778 + || state.cfa.offs % sizeof(unsigned long)) {
1779 + dprintk(1, "Unusable unwind info (%p,%p).", ptr, end);
1780 + return -EIO;
1781 + }
1782 + /* update frame */
1783 +#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
1784 + if (frame->call_frame
1785 + && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
1786 + frame->call_frame = 0;
1787 +#endif
1788 + cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
1789 + startLoc = min((unsigned long)UNW_SP(frame), cfa);
1790 + endLoc = max((unsigned long)UNW_SP(frame), cfa);
1791 + if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
1792 + startLoc = min(STACK_LIMIT(cfa), cfa);
1793 + endLoc = max(STACK_LIMIT(cfa), cfa);
1794 + }
1795 +#ifndef CONFIG_64BIT
1796 +# define CASES CASE(8); CASE(16); CASE(32)
1797 +#else
1798 +# define CASES CASE(8); CASE(16); CASE(32); CASE(64)
1799 +#endif
1800 + pc = UNW_PC(frame);
1801 + sp = UNW_SP(frame);
1802 + for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
1803 + if (REG_INVALID(i)) {
1804 + if (state.regs[i].where == Nowhere)
1805 + continue;
1806 + dprintk(1, "Cannot restore register %u (%d).",
1807 + i, state.regs[i].where);
1808 + return -EIO;
1809 + }
1810 + switch (state.regs[i].where) {
1811 + default:
1812 + break;
1813 + case Register:
1814 + if (state.regs[i].value >= ARRAY_SIZE(reg_info)
1815 + || REG_INVALID(state.regs[i].value)
1816 + || reg_info[i].width > reg_info[state.regs[i].value].width) {
1817 + dprintk(1, "Cannot restore register %u from register %lu.",
1818 + i, state.regs[i].value);
1819 + return -EIO;
1820 + }
1821 + switch (reg_info[state.regs[i].value].width) {
1822 +#define CASE(n) \
1823 + case sizeof(u##n): \
1824 + state.regs[i].value = FRAME_REG(state.regs[i].value, \
1825 + const u##n); \
1826 + break
1827 + CASES;
1828 +#undef CASE
1829 + default:
1830 + dprintk(1, "Unsupported register size %u (%lu).",
1831 + reg_info[state.regs[i].value].width,
1832 + state.regs[i].value);
1833 + return -EIO;
1834 + }
1835 + break;
1836 + }
1837 + }
1838 + for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
1839 + if (REG_INVALID(i))
1840 + continue;
1841 + switch (state.regs[i].where) {
1842 + case Nowhere:
1843 + if (reg_info[i].width != sizeof(UNW_SP(frame))
1844 + || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
1845 + != &UNW_SP(frame))
1846 + continue;
1847 + UNW_SP(frame) = cfa;
1848 + break;
1849 + case Register:
1850 + switch (reg_info[i].width) {
1851 +#define CASE(n) case sizeof(u##n): \
1852 + FRAME_REG(i, u##n) = state.regs[i].value; \
1853 + break
1854 + CASES;
1855 +#undef CASE
1856 + default:
1857 + dprintk(1, "Unsupported register size %u (%u).",
1858 + reg_info[i].width, i);
1859 + return -EIO;
1860 + }
1861 + break;
1862 + case Value:
1863 + if (reg_info[i].width != sizeof(unsigned long)) {
1864 + dprintk(1, "Unsupported value size %u (%u).",
1865 + reg_info[i].width, i);
1866 + return -EIO;
1867 + }
1868 + FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
1869 + * state.dataAlign;
1870 + break;
1871 + case Memory: {
1872 + unsigned long addr = cfa + state.regs[i].value
1873 + * state.dataAlign;
1874 +
1875 + if ((state.regs[i].value * state.dataAlign)
1876 + % sizeof(unsigned long)
1877 + || addr < startLoc
1878 + || addr + sizeof(unsigned long) < addr
1879 + || addr + sizeof(unsigned long) > endLoc) {
1880 + dprintk(1, "Bad memory location %lx (%lx).",
1881 + addr, state.regs[i].value);
1882 + return -EIO;
1883 + }
1884 + switch (reg_info[i].width) {
1885 +#define CASE(n) case sizeof(u##n): \
1886 + if (probe_kernel_address(addr, \
1887 + FRAME_REG(i, u##n))) \
1888 + return -EFAULT; \
1889 + break
1890 + CASES;
1891 +#undef CASE
1892 + default:
1893 + dprintk(1, "Unsupported memory size %u (%u).",
1894 + reg_info[i].width, i);
1895 + return -EIO;
1896 + }
1897 + }
1898 + break;
1899 + }
1900 + }
1901 +
1902 + if (UNW_PC(frame) % state.codeAlign
1903 + || UNW_SP(frame) % sleb128abs(state.dataAlign)) {
1904 + dprintk(1, "Output pointer(s) misaligned (%lx,%lx).",
1905 + UNW_PC(frame), UNW_SP(frame));
1906 + return -EIO;
1907 + }
1908 + if (pc == UNW_PC(frame) && sp == UNW_SP(frame)) {
1909 + dprintk(1, "No progress (%lx,%lx).", pc, sp);
1910 + return -EIO;
1911 + }
1912 +
1913 + return 0;
1914 +#undef CASES
1915 +#undef FRAME_REG
1916 +}
1917 +EXPORT_SYMBOL_GPL(unwind);
1918 +
1919 +int unwind_init_frame_info(struct unwind_frame_info *info,
1920 + struct task_struct *tsk,
1921 + /*const*/ struct pt_regs *regs)
1922 +{
1923 + info->task = tsk;
1924 + info->call_frame = 0;
1925 + arch_unw_init_frame_info(info, regs);
1926 +
1927 + return 0;
1928 +}
1929 +EXPORT_SYMBOL_GPL(unwind_init_frame_info);
1930 +
1931 +/*
1932 + * Prepare to unwind a blocked task.
1933 + */
1934 +int unwind_init_blocked(struct unwind_frame_info *info,
1935 + struct task_struct *tsk)
1936 +{
1937 + info->task = tsk;
1938 + info->call_frame = 0;
1939 + arch_unw_init_blocked(info);
1940 +
1941 + return 0;
1942 +}
1943 +EXPORT_SYMBOL_GPL(unwind_init_blocked);
1944 +
1945 +/*
1946 + * Prepare to unwind the currently running thread.
1947 + */
1948 +int unwind_init_running(struct unwind_frame_info *info,
1949 + asmlinkage int (*callback)(struct unwind_frame_info *,
1950 + void *arg),
1951 + void *arg)
1952 +{
1953 + info->task = current;
1954 + info->call_frame = 0;
1955 +
1956 + return arch_unwind_init_running(info, callback, arg);
1957 +}
1958 +EXPORT_SYMBOL_GPL(unwind_init_running);
1959 +
1960 +/*
1961 + * Unwind until the return pointer is in user-land (or until an error
1962 + * occurs). Returns 0 if successful, negative number in case of
1963 + * error.
1964 + */
1965 +int unwind_to_user(struct unwind_frame_info *info)
1966 +{
1967 + while (!arch_unw_user_mode(info)) {
1968 + int err = unwind(info);
1969 +
1970 + if (err < 0)
1971 + return err;
1972 + }
1973 +
1974 + return 0;
1975 +}
1976 +EXPORT_SYMBOL_GPL(unwind_to_user);
1977 --- a/lib/Kconfig.debug
1978 +++ b/lib/Kconfig.debug
1979 @@ -548,6 +548,24 @@ config FRAME_POINTER
1980 some architectures or if you use external debuggers.
1981 If you don't debug the kernel, you can say N.
1982
1983 +config UNWIND_INFO
1984 + bool "Compile the kernel with frame unwind information"
1985 + depends on !IA64 && !PARISC && !ARM
1986 + depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
1987 + help
1988 + If you say Y here the resulting kernel image will be slightly larger
1989 + but not slower, and it will give very useful debugging information.
1990 + If you don't debug the kernel, you can say N, but we may not be able
1991 + to solve problems without frame unwind information or frame pointers.
1992 +
1993 +config STACK_UNWIND
1994 + bool "Stack unwind support"
1995 + depends on UNWIND_INFO
1996 + depends on X86
1997 + help
1998 + This enables more precise stack traces, omitting all unrelated
1999 + occurrences of pointers into kernel code from the dump.
2000 +
2001 config BOOT_PRINTK_DELAY
2002 bool "Delay each boot printk message by N milliseconds"
2003 depends on DEBUG_KERNEL && PRINTK && GENERIC_CALIBRATE_DELAY