]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm64/include/asm/stacktrace.h
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
[thirdparty/linux.git] / arch / arm64 / include / asm / stacktrace.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
7
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11
12 #include <asm/memory.h>
13 #include <asm/ptrace.h>
14 #include <asm/sdei.h>
15
16 struct stackframe {
17 unsigned long fp;
18 unsigned long pc;
19 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
20 int graph;
21 #endif
22 };
23
24 enum stack_type {
25 STACK_TYPE_UNKNOWN,
26 STACK_TYPE_TASK,
27 STACK_TYPE_IRQ,
28 STACK_TYPE_OVERFLOW,
29 STACK_TYPE_SDEI_NORMAL,
30 STACK_TYPE_SDEI_CRITICAL,
31 };
32
33 struct stack_info {
34 unsigned long low;
35 unsigned long high;
36 enum stack_type type;
37 };
38
39 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
40 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
41 int (*fn)(struct stackframe *, void *), void *data);
42 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
43
44 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
45
46 static inline bool on_irq_stack(unsigned long sp,
47 struct stack_info *info)
48 {
49 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
50 unsigned long high = low + IRQ_STACK_SIZE;
51
52 if (!low)
53 return false;
54
55 if (sp < low || sp >= high)
56 return false;
57
58 if (info) {
59 info->low = low;
60 info->high = high;
61 info->type = STACK_TYPE_IRQ;
62 }
63
64 return true;
65 }
66
67 static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
68 struct stack_info *info)
69 {
70 unsigned long low = (unsigned long)task_stack_page(tsk);
71 unsigned long high = low + THREAD_SIZE;
72
73 if (sp < low || sp >= high)
74 return false;
75
76 if (info) {
77 info->low = low;
78 info->high = high;
79 info->type = STACK_TYPE_TASK;
80 }
81
82 return true;
83 }
84
85 #ifdef CONFIG_VMAP_STACK
86 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
87
88 static inline bool on_overflow_stack(unsigned long sp,
89 struct stack_info *info)
90 {
91 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
92 unsigned long high = low + OVERFLOW_STACK_SIZE;
93
94 if (sp < low || sp >= high)
95 return false;
96
97 if (info) {
98 info->low = low;
99 info->high = high;
100 info->type = STACK_TYPE_OVERFLOW;
101 }
102
103 return true;
104 }
105 #else
106 static inline bool on_overflow_stack(unsigned long sp,
107 struct stack_info *info) { return false; }
108 #endif
109
110
111 /*
112 * We can only safely access per-cpu stacks from current in a non-preemptible
113 * context.
114 */
115 static inline bool on_accessible_stack(struct task_struct *tsk,
116 unsigned long sp,
117 struct stack_info *info)
118 {
119 if (on_task_stack(tsk, sp, info))
120 return true;
121 if (tsk != current || preemptible())
122 return false;
123 if (on_irq_stack(sp, info))
124 return true;
125 if (on_overflow_stack(sp, info))
126 return true;
127 if (on_sdei_stack(sp, info))
128 return true;
129
130 return false;
131 }
132
133 #endif /* __ASM_STACKTRACE_H */