]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2052e8d4 CL |
2 | /* thread_info.h: low-level thread information |
3 | * | |
4 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | |
5 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | |
6 | */ | |
7 | ||
1965aae3 PA |
8 | #ifndef _ASM_X86_THREAD_INFO_H |
9 | #define _ASM_X86_THREAD_INFO_H | |
2052e8d4 | 10 | |
2052e8d4 CL |
11 | #include <linux/compiler.h> |
12 | #include <asm/page.h> | |
198d208d | 13 | #include <asm/percpu.h> |
12a638e1 CL |
14 | #include <asm/types.h> |
15 | ||
3ee4298f AL |
16 | /* |
17 | * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we | |
18 | * reserve at the top of the kernel stack. We do it because of a nasty | |
19 | * 32-bit corner case. On x86_32, the hardware stack frame is | |
20 | * variable-length. Except for vm86 mode, struct pt_regs assumes a | |
21 | * maximum-length frame. If we enter from CPL 0, the top 8 bytes of | |
22 | * pt_regs don't actually exist. Ordinarily this doesn't matter, but it | |
23 | * does in at least one case: | |
24 | * | |
25 | * If we take an NMI early enough in SYSENTER, then we can end up with | |
26 | * pt_regs that extends above sp0. On the way out, in the espfix code, | |
27 | * we can read the saved SS value, but that value will be above sp0. | |
28 | * Without this offset, that can result in a page fault. (We are | |
29 | * careful that, in this case, the value we read doesn't matter.) | |
30 | * | |
5ed92a8a BG |
31 | * In vm86 mode, the hardware frame is much longer still, so add 16 |
32 | * bytes to make room for the real-mode segments. | |
3ee4298f AL |
33 | * |
34 | * x86_64 has a fixed-length stack frame. | |
35 | */ | |
36 | #ifdef CONFIG_X86_32 | |
5ed92a8a BG |
37 | # ifdef CONFIG_VM86 |
38 | # define TOP_OF_KERNEL_STACK_PADDING 16 | |
39 | # else | |
40 | # define TOP_OF_KERNEL_STACK_PADDING 8 | |
41 | # endif | |
3ee4298f AL |
42 | #else |
43 | # define TOP_OF_KERNEL_STACK_PADDING 0 | |
44 | #endif | |
45 | ||
2052e8d4 CL |
46 | /* |
47 | * low level task data that entry.S needs immediate access to | |
48 | * - this struct should fit entirely inside of one cache line | |
49 | * - this struct shares the supervisor stack pages | |
2052e8d4 CL |
50 | */ |
51 | #ifndef __ASSEMBLY__ | |
006c484b | 52 | struct task_struct; |
cd4d09ec | 53 | #include <asm/cpufeature.h> |
60063497 | 54 | #include <linux/atomic.h> |
2052e8d4 | 55 | |
c8061485 HC |
56 | struct thread_info { |
57 | unsigned long flags; /* low level flags */ | |
58 | }; | |
59 | ||
60 | #define INIT_THREAD_INFO(tsk) \ | |
61 | { \ | |
62 | .flags = 0, \ | |
63 | } | |
64 | ||
3351cc03 CL |
65 | #define init_stack (init_thread_union.stack) |
66 | ||
2052e8d4 CL |
67 | #else /* !__ASSEMBLY__ */ |
68 | ||
69 | #include <asm/asm-offsets.h> | |
70 | ||
71 | #endif | |
72 | ||
e57549b0 CL |
73 | /* |
74 | * thread information flags | |
75 | * - these are process state flags that various assembly files | |
76 | * may need to access | |
e57549b0 CL |
77 | */ |
78 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | |
59e52130 | 79 | #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ |
e57549b0 CL |
80 | #define TIF_SIGPENDING 2 /* signal pending */ |
81 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | |
82 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | |
e57549b0 | 83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
e57549b0 CL |
84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
85 | #define TIF_SECCOMP 8 /* secure computing */ | |
7c68af6e | 86 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
0326f5a9 | 87 | #define TIF_UPROBE 12 /* breakpointed or singlestepping */ |
afb94c9e | 88 | #define TIF_PATCH_PENDING 13 /* pending live patching update */ |
e9ea1e7f | 89 | #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ |
e57549b0 | 90 | #define TIF_NOTSC 16 /* TSC is not accessible in userland */ |
6bd33008 | 91 | #define TIF_IA32 17 /* IA32 compatibility process */ |
bf5a3c13 | 92 | #define TIF_NOHZ 19 /* in adaptive nohz mode */ |
0ddc9324 | 93 | #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ |
f80c5b39 | 94 | #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ |
e57549b0 | 95 | #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ |
e57549b0 | 96 | #define TIF_FORCED_TF 24 /* true if TF in eflags artificially */ |
ea8e61b7 | 97 | #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ |
b407fc57 | 98 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
66700001 | 99 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
6bd33008 | 100 | #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ |
bb212724 | 101 | #define TIF_X32 30 /* 32-bit native x86-64 binary */ |
5ea0727b | 102 | #define TIF_FSCHECK 31 /* Check FS is USER_DS on return */ |
e57549b0 CL |
103 | |
104 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | |
59e52130 | 105 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
e57549b0 | 106 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
e57549b0 | 107 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
3a404842 | 108 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
e57549b0 | 109 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
e57549b0 CL |
110 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
111 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | |
7c68af6e | 112 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) |
0326f5a9 | 113 | #define _TIF_UPROBE (1 << TIF_UPROBE) |
afb94c9e | 114 | #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) |
e9ea1e7f | 115 | #define _TIF_NOCPUID (1 << TIF_NOCPUID) |
e57549b0 CL |
116 | #define _TIF_NOTSC (1 << TIF_NOTSC) |
117 | #define _TIF_IA32 (1 << TIF_IA32) | |
bf5a3c13 | 118 | #define _TIF_NOHZ (1 << TIF_NOHZ) |
f80c5b39 | 119 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
e57549b0 | 120 | #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) |
e57549b0 | 121 | #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) |
ea8e61b7 | 122 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
b407fc57 | 123 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
66700001 | 124 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
6bd33008 | 125 | #define _TIF_ADDR32 (1 << TIF_ADDR32) |
bb212724 | 126 | #define _TIF_X32 (1 << TIF_X32) |
5ea0727b | 127 | #define _TIF_FSCHECK (1 << TIF_FSCHECK) |
e57549b0 | 128 | |
9999c8c0 AL |
129 | /* |
130 | * work to do in syscall_trace_enter(). Also includes TIF_NOHZ for | |
131 | * enter_from_user_mode() | |
132 | */ | |
d4d67150 | 133 | #define _TIF_WORK_SYSCALL_ENTRY \ |
66700001 | 134 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
392a6254 | 135 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
bf5a3c13 | 136 | _TIF_NOHZ) |
d4d67150 | 137 | |
00c1bb13 | 138 | /* work to do on any return to user space */ |
66700001 | 139 | #define _TIF_ALLWORK_MASK \ |
3a404842 JP |
140 | (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ |
141 | _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \ | |
142 | _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \ | |
5ea0727b TG |
143 | _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \ |
144 | _TIF_FSCHECK) | |
00c1bb13 | 145 | |
00c1bb13 CL |
146 | /* flags to check in __switch_to() */ |
147 | #define _TIF_WORK_CTXSW \ | |
e9ea1e7f | 148 | (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP) |
00c1bb13 | 149 | |
7c68af6e | 150 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
37f07655 | 151 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
00c1bb13 | 152 | |
198d208d | 153 | #define STACK_WARN (THREAD_SIZE/8) |
b84200b3 | 154 | |
2052e8d4 CL |
155 | /* |
156 | * macros/functions for gaining access to the thread information structure | |
157 | * | |
158 | * preempt_count needs to be 1 initially, until the scheduler is functional. | |
159 | */ | |
160 | #ifndef __ASSEMBLY__ | |
161 | ||
0f60a8ef KC |
162 | /* |
163 | * Walks up the stack frames to make sure that the specified object is | |
164 | * entirely contained by a single stack frame. | |
165 | * | |
166 | * Returns: | |
96dc4f9f S |
167 | * GOOD_FRAME if within a frame |
168 | * BAD_STACK if placed across a frame boundary (or outside stack) | |
169 | * NOT_STACK unable to determine (no frame pointers, etc) | |
0f60a8ef KC |
170 | */ |
171 | static inline int arch_within_stack_frames(const void * const stack, | |
172 | const void * const stackend, | |
173 | const void *obj, unsigned long len) | |
174 | { | |
175 | #if defined(CONFIG_FRAME_POINTER) | |
176 | const void *frame = NULL; | |
177 | const void *oldframe; | |
178 | ||
179 | oldframe = __builtin_frame_address(1); | |
180 | if (oldframe) | |
181 | frame = __builtin_frame_address(2); | |
182 | /* | |
183 | * low ----------------------------------------------> high | |
184 | * [saved bp][saved ip][args][local vars][saved bp][saved ip] | |
185 | * ^----------------^ | |
186 | * allow copies only within here | |
187 | */ | |
188 | while (stack <= frame && frame < stackend) { | |
189 | /* | |
190 | * If obj + len extends past the last frame, this | |
191 | * check won't pass and the next frame will be 0, | |
192 | * causing us to bail out and correctly report | |
193 | * the copy as invalid. | |
194 | */ | |
195 | if (obj + len <= frame) | |
96dc4f9f S |
196 | return obj >= oldframe + 2 * sizeof(void *) ? |
197 | GOOD_FRAME : BAD_STACK; | |
0f60a8ef KC |
198 | oldframe = frame; |
199 | frame = *(const void * const *)frame; | |
200 | } | |
96dc4f9f | 201 | return BAD_STACK; |
0f60a8ef | 202 | #else |
96dc4f9f | 203 | return NOT_STACK; |
0f60a8ef KC |
204 | #endif |
205 | } | |
206 | ||
2052e8d4 CL |
207 | #else /* !__ASSEMBLY__ */ |
208 | ||
3a23208e | 209 | #ifdef CONFIG_X86_64 |
9aaefe7b | 210 | # define cpu_current_top_of_stack (cpu_tss + TSS_sp1) |
3a23208e DV |
211 | #endif |
212 | ||
2052e8d4 CL |
213 | #endif |
214 | ||
609c19a3 AL |
215 | #ifdef CONFIG_COMPAT |
216 | #define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */ | |
217 | #endif | |
8a6c160a | 218 | #ifndef __ASSEMBLY__ |
ef334a20 | 219 | |
ef334a20 | 220 | #ifdef CONFIG_X86_32 |
b9d989c7 AL |
221 | #define in_ia32_syscall() true |
222 | #else | |
223 | #define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \ | |
224 | current->thread.status & TS_COMPAT) | |
ef334a20 | 225 | #endif |
1daeaa31 BG |
226 | |
227 | /* | |
228 | * Force syscall return via IRET by making it look as if there was | |
229 | * some work pending. IRET is our most capable (but slowest) syscall | |
230 | * return path, which is able to restore modified SS, CS and certain | |
231 | * EFLAGS values that other (fast) syscall return instructions | |
232 | * are not able to restore properly. | |
233 | */ | |
234 | #define force_iret() set_thread_flag(TIF_NOTIFY_RESUME) | |
235 | ||
61c4628b | 236 | extern void arch_task_cache_init(void); |
61c4628b | 237 | extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); |
38e7c572 | 238 | extern void arch_release_task_struct(struct task_struct *tsk); |
e9ea1e7f KH |
239 | extern void arch_setup_new_exec(void); |
240 | #define arch_setup_new_exec arch_setup_new_exec | |
5f870a3f BP |
241 | #endif /* !__ASSEMBLY__ */ |
242 | ||
1965aae3 | 243 | #endif /* _ASM_X86_THREAD_INFO_H */ |