]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/arch/x86_64/entry.S | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
7 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
4d732138 | 8 | * |
1da177e4 LT |
9 | * entry.S contains the system-call and fault low-level handling routines. |
10 | * | |
8b4777a4 AL |
11 | * Some of this is documented in Documentation/x86/entry_64.txt |
12 | * | |
0bd7b798 | 13 | * A note on terminology: |
4d732138 IM |
14 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
15 | * at the top of the kernel process stack. | |
2e91a17b AK |
16 | * |
17 | * Some macro usage: | |
4d732138 IM |
18 | * - ENTRY/END: Define functions in the symbol table. |
19 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. | |
20 | * - idtentry: Define exception entry points. | |
1da177e4 | 21 | */ |
1da177e4 LT |
22 | #include <linux/linkage.h> |
23 | #include <asm/segment.h> | |
1da177e4 LT |
24 | #include <asm/cache.h> |
25 | #include <asm/errno.h> | |
e2d5df93 | 26 | #include <asm/asm-offsets.h> |
1da177e4 LT |
27 | #include <asm/msr.h> |
28 | #include <asm/unistd.h> | |
29 | #include <asm/thread_info.h> | |
30 | #include <asm/hw_irq.h> | |
0341c14d | 31 | #include <asm/page_types.h> |
2601e64d | 32 | #include <asm/irqflags.h> |
72fe4858 | 33 | #include <asm/paravirt.h> |
9939ddaf | 34 | #include <asm/percpu.h> |
d7abc0fa | 35 | #include <asm/asm.h> |
63bcff2a | 36 | #include <asm/smap.h> |
3891a04a | 37 | #include <asm/pgtable_types.h> |
784d5699 | 38 | #include <asm/export.h> |
8c1f7558 | 39 | #include <asm/frame.h> |
2641f08b | 40 | #include <asm/nospec-branch.h> |
d7e7528b | 41 | #include <linux/err.h> |
1da177e4 | 42 | |
6fd166aa PZ |
43 | #include "calling.h" |
44 | ||
4d732138 IM |
45 | .code64 |
46 | .section .entry.text, "ax" | |
16444a8a | 47 | |
72fe4858 | 48 | #ifdef CONFIG_PARAVIRT |
2be29982 | 49 | ENTRY(native_usergs_sysret64) |
8c1f7558 | 50 | UNWIND_HINT_EMPTY |
72fe4858 GOC |
51 | swapgs |
52 | sysretq | |
8c1f7558 | 53 | END(native_usergs_sysret64) |
72fe4858 GOC |
54 | #endif /* CONFIG_PARAVIRT */ |
55 | ||
f2db9382 | 56 | .macro TRACE_IRQS_IRETQ |
2601e64d | 57 | #ifdef CONFIG_TRACE_IRQFLAGS |
4d732138 IM |
58 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
59 | jnc 1f | |
2601e64d IM |
60 | TRACE_IRQS_ON |
61 | 1: | |
62 | #endif | |
63 | .endm | |
64 | ||
5963e317 SR |
65 | /* |
66 | * When dynamic function tracer is enabled it will add a breakpoint | |
67 | * to all locations that it is about to modify, sync CPUs, update | |
68 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
69 | * if lockdep is enabled, it might jump back into the debug handler | |
70 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
71 | * | |
72 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
73 | * make sure the stack pointer does not get reset back to the top | |
74 | * of the debug stack, and instead just reuses the current stack. | |
75 | */ | |
76 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
77 | ||
78 | .macro TRACE_IRQS_OFF_DEBUG | |
4d732138 | 79 | call debug_stack_set_zero |
5963e317 | 80 | TRACE_IRQS_OFF |
4d732138 | 81 | call debug_stack_reset |
5963e317 SR |
82 | .endm |
83 | ||
84 | .macro TRACE_IRQS_ON_DEBUG | |
4d732138 | 85 | call debug_stack_set_zero |
5963e317 | 86 | TRACE_IRQS_ON |
4d732138 | 87 | call debug_stack_reset |
5963e317 SR |
88 | .endm |
89 | ||
f2db9382 | 90 | .macro TRACE_IRQS_IRETQ_DEBUG |
4d732138 IM |
91 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
92 | jnc 1f | |
5963e317 SR |
93 | TRACE_IRQS_ON_DEBUG |
94 | 1: | |
95 | .endm | |
96 | ||
97 | #else | |
4d732138 IM |
98 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
99 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
100 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
5963e317 SR |
101 | #endif |
102 | ||
1da177e4 | 103 | /* |
4d732138 | 104 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
1da177e4 | 105 | * |
fda57b22 AL |
106 | * This is the only entry point used for 64-bit system calls. The |
107 | * hardware interface is reasonably well designed and the register to | |
108 | * argument mapping Linux uses fits well with the registers that are | |
109 | * available when SYSCALL is used. | |
110 | * | |
111 | * SYSCALL instructions can be found inlined in libc implementations as | |
112 | * well as some other programs and libraries. There are also a handful | |
113 | * of SYSCALL instructions in the vDSO used, for example, as a | |
114 | * clock_gettimeofday fallback. | |
115 | * | |
4d732138 | 116 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
b87cf63e DV |
117 | * then loads new ss, cs, and rip from previously programmed MSRs. |
118 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
119 | * are not needed). SYSCALL does not save anything on the stack | |
120 | * and does not change rsp. | |
121 | * | |
122 | * Registers on entry: | |
1da177e4 | 123 | * rax system call number |
b87cf63e DV |
124 | * rcx return address |
125 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) | |
1da177e4 | 126 | * rdi arg0 |
1da177e4 | 127 | * rsi arg1 |
0bd7b798 | 128 | * rdx arg2 |
b87cf63e | 129 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
1da177e4 LT |
130 | * r8 arg4 |
131 | * r9 arg5 | |
4d732138 | 132 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
0bd7b798 | 133 | * |
1da177e4 LT |
134 | * Only called from user space. |
135 | * | |
7fcb3bc3 | 136 | * When user can change pt_regs->foo always force IRET. That is because |
7bf36bbc AK |
137 | * it deals with uncanonical addresses better. SYSRET has trouble |
138 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 139 | */ |
1da177e4 | 140 | |
3386bc8a AL |
141 | .pushsection .entry_trampoline, "ax" |
142 | ||
143 | /* | |
144 | * The code in here gets remapped into cpu_entry_area's trampoline. This means | |
145 | * that the assembler and linker have the wrong idea as to where this code | |
146 | * lives (and, in fact, it's mapped more than once, so it's not even at a | |
147 | * fixed address). So we can't reference any symbols outside the entry | |
148 | * trampoline and expect it to work. | |
149 | * | |
150 | * Instead, we carefully abuse %rip-relative addressing. | |
151 | * _entry_trampoline(%rip) refers to the start of the remapped) entry | |
152 | * trampoline. We can thus find cpu_entry_area with this macro: | |
153 | */ | |
154 | ||
155 | #define CPU_ENTRY_AREA \ | |
156 | _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) | |
157 | ||
158 | /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ | |
4fe2d8b1 DH |
159 | #define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ |
160 | SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA | |
3386bc8a AL |
161 | |
162 | ENTRY(entry_SYSCALL_64_trampoline) | |
163 | UNWIND_HINT_EMPTY | |
164 | swapgs | |
165 | ||
166 | /* Stash the user RSP. */ | |
167 | movq %rsp, RSP_SCRATCH | |
168 | ||
8a09317b DH |
169 | /* Note: using %rsp as a scratch reg. */ |
170 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp | |
171 | ||
3386bc8a AL |
172 | /* Load the top of the task stack into RSP */ |
173 | movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp | |
174 | ||
175 | /* Start building the simulated IRET frame. */ | |
176 | pushq $__USER_DS /* pt_regs->ss */ | |
177 | pushq RSP_SCRATCH /* pt_regs->sp */ | |
178 | pushq %r11 /* pt_regs->flags */ | |
179 | pushq $__USER_CS /* pt_regs->cs */ | |
180 | pushq %rcx /* pt_regs->ip */ | |
181 | ||
182 | /* | |
183 | * x86 lacks a near absolute jump, and we can't jump to the real | |
184 | * entry text with a relative jump. We could push the target | |
185 | * address and then use retq, but this destroys the pipeline on | |
186 | * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, | |
187 | * spill RDI and restore it in a second-stage trampoline. | |
188 | */ | |
189 | pushq %rdi | |
190 | movq $entry_SYSCALL_64_stage2, %rdi | |
2641f08b | 191 | JMP_NOSPEC %rdi |
3386bc8a AL |
192 | END(entry_SYSCALL_64_trampoline) |
193 | ||
194 | .popsection | |
195 | ||
196 | ENTRY(entry_SYSCALL_64_stage2) | |
197 | UNWIND_HINT_EMPTY | |
198 | popq %rdi | |
199 | jmp entry_SYSCALL_64_after_hwframe | |
200 | END(entry_SYSCALL_64_stage2) | |
201 | ||
b2502b41 | 202 | ENTRY(entry_SYSCALL_64) |
8c1f7558 | 203 | UNWIND_HINT_EMPTY |
9ed8e7d8 DV |
204 | /* |
205 | * Interrupts are off on entry. | |
206 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
207 | * it is too small to ever cause noticeable irq latency. | |
208 | */ | |
72fe4858 | 209 | |
8a9949bc | 210 | swapgs |
8a09317b DH |
211 | /* |
212 | * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it | |
213 | * is not required to switch CR3. | |
214 | */ | |
4d732138 IM |
215 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
216 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
9ed8e7d8 | 217 | |
1e423bff AL |
218 | TRACE_IRQS_OFF |
219 | ||
9ed8e7d8 | 220 | /* Construct struct pt_regs on stack */ |
4d732138 IM |
221 | pushq $__USER_DS /* pt_regs->ss */ |
222 | pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ | |
4d732138 IM |
223 | pushq %r11 /* pt_regs->flags */ |
224 | pushq $__USER_CS /* pt_regs->cs */ | |
225 | pushq %rcx /* pt_regs->ip */ | |
8a9949bc | 226 | GLOBAL(entry_SYSCALL_64_after_hwframe) |
4d732138 IM |
227 | pushq %rax /* pt_regs->orig_ax */ |
228 | pushq %rdi /* pt_regs->di */ | |
229 | pushq %rsi /* pt_regs->si */ | |
230 | pushq %rdx /* pt_regs->dx */ | |
231 | pushq %rcx /* pt_regs->cx */ | |
232 | pushq $-ENOSYS /* pt_regs->ax */ | |
233 | pushq %r8 /* pt_regs->r8 */ | |
234 | pushq %r9 /* pt_regs->r9 */ | |
235 | pushq %r10 /* pt_regs->r10 */ | |
236 | pushq %r11 /* pt_regs->r11 */ | |
237 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ | |
8c1f7558 | 238 | UNWIND_HINT_REGS extra=0 |
4d732138 | 239 | |
1e423bff AL |
240 | /* |
241 | * If we need to do entry work or if we guess we'll need to do | |
242 | * exit work, go straight to the slow path. | |
243 | */ | |
15f4eae7 AL |
244 | movq PER_CPU_VAR(current_task), %r11 |
245 | testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) | |
1e423bff AL |
246 | jnz entry_SYSCALL64_slow_path |
247 | ||
b2502b41 | 248 | entry_SYSCALL_64_fastpath: |
1e423bff AL |
249 | /* |
250 | * Easy case: enable interrupts and issue the syscall. If the syscall | |
251 | * needs pt_regs, we'll call a stub that disables interrupts again | |
252 | * and jumps to the slow path. | |
253 | */ | |
254 | TRACE_IRQS_ON | |
255 | ENABLE_INTERRUPTS(CLBR_NONE) | |
fca460f9 | 256 | #if __SYSCALL_MASK == ~0 |
4d732138 | 257 | cmpq $__NR_syscall_max, %rax |
fca460f9 | 258 | #else |
4d732138 IM |
259 | andl $__SYSCALL_MASK, %eax |
260 | cmpl $__NR_syscall_max, %eax | |
fca460f9 | 261 | #endif |
4d732138 IM |
262 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
263 | movq %r10, %rcx | |
302f5b26 AL |
264 | |
265 | /* | |
266 | * This call instruction is handled specially in stub_ptregs_64. | |
b7765086 AL |
267 | * It might end up jumping to the slow path. If it jumps, RAX |
268 | * and all argument registers are clobbered. | |
302f5b26 | 269 | */ |
2641f08b DW |
270 | #ifdef CONFIG_RETPOLINE |
271 | movq sys_call_table(, %rax, 8), %rax | |
272 | call __x86_indirect_thunk_rax | |
273 | #else | |
4d732138 | 274 | call *sys_call_table(, %rax, 8) |
2641f08b | 275 | #endif |
302f5b26 AL |
276 | .Lentry_SYSCALL_64_after_fastpath_call: |
277 | ||
4d732138 | 278 | movq %rax, RAX(%rsp) |
146b2b09 | 279 | 1: |
b3494a4a AL |
280 | |
281 | /* | |
1e423bff AL |
282 | * If we get here, then we know that pt_regs is clean for SYSRET64. |
283 | * If we see that no exit work is required (which we are required | |
284 | * to check with IRQs off), then we can go straight to SYSRET64. | |
b3494a4a | 285 | */ |
2140a994 | 286 | DISABLE_INTERRUPTS(CLBR_ANY) |
1e423bff | 287 | TRACE_IRQS_OFF |
15f4eae7 AL |
288 | movq PER_CPU_VAR(current_task), %r11 |
289 | testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) | |
1e423bff | 290 | jnz 1f |
b3494a4a | 291 | |
1e423bff AL |
292 | LOCKDEP_SYS_EXIT |
293 | TRACE_IRQS_ON /* user mode is traced as IRQs on */ | |
eb2a54c3 AL |
294 | movq RIP(%rsp), %rcx |
295 | movq EFLAGS(%rsp), %r11 | |
a5122106 | 296 | addq $6*8, %rsp /* skip extra regs -- they were preserved */ |
8c1f7558 | 297 | UNWIND_HINT_EMPTY |
a5122106 | 298 | jmp .Lpop_c_regs_except_rcx_r11_and_sysret |
1da177e4 | 299 | |
1e423bff AL |
300 | 1: |
301 | /* | |
302 | * The fast path looked good when we started, but something changed | |
303 | * along the way and we need to switch to the slow path. Calling | |
304 | * raise(3) will trigger this, for example. IRQs are off. | |
305 | */ | |
29ea1b25 | 306 | TRACE_IRQS_ON |
2140a994 | 307 | ENABLE_INTERRUPTS(CLBR_ANY) |
76f5df43 | 308 | SAVE_EXTRA_REGS |
4d732138 | 309 | movq %rsp, %rdi |
1e423bff AL |
310 | call syscall_return_slowpath /* returns with IRQs disabled */ |
311 | jmp return_from_SYSCALL_64 | |
0bd7b798 | 312 | |
1e423bff AL |
313 | entry_SYSCALL64_slow_path: |
314 | /* IRQs are off. */ | |
76f5df43 | 315 | SAVE_EXTRA_REGS |
29ea1b25 | 316 | movq %rsp, %rdi |
1e423bff AL |
317 | call do_syscall_64 /* returns with IRQs disabled */ |
318 | ||
319 | return_from_SYSCALL_64: | |
29ea1b25 | 320 | TRACE_IRQS_IRETQ /* we're about to change IF */ |
fffbb5dc DV |
321 | |
322 | /* | |
323 | * Try to use SYSRET instead of IRET if we're returning to | |
8a055d7f AL |
324 | * a completely clean 64-bit userspace context. If we're not, |
325 | * go to the slow exit path. | |
fffbb5dc | 326 | */ |
4d732138 IM |
327 | movq RCX(%rsp), %rcx |
328 | movq RIP(%rsp), %r11 | |
8a055d7f AL |
329 | |
330 | cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ | |
331 | jne swapgs_restore_regs_and_return_to_usermode | |
fffbb5dc DV |
332 | |
333 | /* | |
334 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP | |
335 | * in kernel space. This essentially lets the user take over | |
17be0aec | 336 | * the kernel, since userspace controls RSP. |
fffbb5dc | 337 | * |
17be0aec | 338 | * If width of "canonical tail" ever becomes variable, this will need |
fffbb5dc | 339 | * to be updated to remain correct on both old and new CPUs. |
361b4b58 | 340 | * |
cbe0317b KS |
341 | * Change top bits to match most significant bit (47th or 56th bit |
342 | * depending on paging mode) in the address. | |
fffbb5dc | 343 | */ |
17be0aec DV |
344 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
345 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
4d732138 | 346 | |
17be0aec DV |
347 | /* If this changed %rcx, it was not canonical */ |
348 | cmpq %rcx, %r11 | |
8a055d7f | 349 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 350 | |
4d732138 | 351 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
8a055d7f | 352 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 353 | |
4d732138 IM |
354 | movq R11(%rsp), %r11 |
355 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ | |
8a055d7f | 356 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
357 | |
358 | /* | |
3e035305 BP |
359 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
360 | * restore RF properly. If the slowpath sets it for whatever reason, we | |
361 | * need to restore it correctly. | |
362 | * | |
363 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a | |
364 | * trap from userspace immediately after SYSRET. This would cause an | |
365 | * infinite loop whenever #DB happens with register state that satisfies | |
366 | * the opportunistic SYSRET conditions. For example, single-stepping | |
367 | * this user code: | |
fffbb5dc | 368 | * |
4d732138 | 369 | * movq $stuck_here, %rcx |
fffbb5dc DV |
370 | * pushfq |
371 | * popq %r11 | |
372 | * stuck_here: | |
373 | * | |
374 | * would never get past 'stuck_here'. | |
375 | */ | |
4d732138 | 376 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
8a055d7f | 377 | jnz swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
378 | |
379 | /* nothing to check for RSP */ | |
380 | ||
4d732138 | 381 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
8a055d7f | 382 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
383 | |
384 | /* | |
4d732138 IM |
385 | * We win! This label is here just for ease of understanding |
386 | * perf profiles. Nothing jumps here. | |
fffbb5dc DV |
387 | */ |
388 | syscall_return_via_sysret: | |
17be0aec | 389 | /* rcx and r11 are already restored (see code above) */ |
8c1f7558 | 390 | UNWIND_HINT_EMPTY |
4fbb3910 | 391 | POP_EXTRA_REGS |
a5122106 | 392 | .Lpop_c_regs_except_rcx_r11_and_sysret: |
4fbb3910 AL |
393 | popq %rsi /* skip r11 */ |
394 | popq %r10 | |
395 | popq %r9 | |
396 | popq %r8 | |
397 | popq %rax | |
398 | popq %rsi /* skip rcx */ | |
399 | popq %rdx | |
400 | popq %rsi | |
3e3b9293 AL |
401 | |
402 | /* | |
403 | * Now all regs are restored except RSP and RDI. | |
404 | * Save old stack pointer and switch to trampoline stack. | |
405 | */ | |
406 | movq %rsp, %rdi | |
c482feef | 407 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
3e3b9293 AL |
408 | |
409 | pushq RSP-RDI(%rdi) /* RSP */ | |
410 | pushq (%rdi) /* RDI */ | |
411 | ||
412 | /* | |
413 | * We are on the trampoline stack. All regs except RDI are live. | |
414 | * We can do future final exit work right here. | |
415 | */ | |
6fd166aa | 416 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
3e3b9293 | 417 | |
4fbb3910 | 418 | popq %rdi |
3e3b9293 | 419 | popq %rsp |
fffbb5dc | 420 | USERGS_SYSRET64 |
b2502b41 | 421 | END(entry_SYSCALL_64) |
0bd7b798 | 422 | |
302f5b26 AL |
423 | ENTRY(stub_ptregs_64) |
424 | /* | |
425 | * Syscalls marked as needing ptregs land here. | |
b7765086 AL |
426 | * If we are on the fast path, we need to save the extra regs, |
427 | * which we achieve by trying again on the slow path. If we are on | |
428 | * the slow path, the extra regs are already saved. | |
302f5b26 AL |
429 | * |
430 | * RAX stores a pointer to the C function implementing the syscall. | |
b7765086 | 431 | * IRQs are on. |
302f5b26 AL |
432 | */ |
433 | cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) | |
434 | jne 1f | |
435 | ||
b7765086 AL |
436 | /* |
437 | * Called from fast path -- disable IRQs again, pop return address | |
438 | * and jump to slow path | |
439 | */ | |
2140a994 | 440 | DISABLE_INTERRUPTS(CLBR_ANY) |
b7765086 | 441 | TRACE_IRQS_OFF |
302f5b26 | 442 | popq %rax |
8c1f7558 | 443 | UNWIND_HINT_REGS extra=0 |
b7765086 | 444 | jmp entry_SYSCALL64_slow_path |
302f5b26 AL |
445 | |
446 | 1: | |
2641f08b | 447 | JMP_NOSPEC %rax /* Called from C */ |
302f5b26 AL |
448 | END(stub_ptregs_64) |
449 | ||
450 | .macro ptregs_stub func | |
451 | ENTRY(ptregs_\func) | |
8c1f7558 | 452 | UNWIND_HINT_FUNC |
302f5b26 AL |
453 | leaq \func(%rip), %rax |
454 | jmp stub_ptregs_64 | |
455 | END(ptregs_\func) | |
456 | .endm | |
457 | ||
458 | /* Instantiate ptregs_stub for each ptregs-using syscall */ | |
459 | #define __SYSCALL_64_QUAL_(sym) | |
460 | #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym | |
461 | #define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) | |
462 | #include <asm/syscalls_64.h> | |
fffbb5dc | 463 | |
0100301b BG |
464 | /* |
465 | * %rdi: prev task | |
466 | * %rsi: next task | |
467 | */ | |
468 | ENTRY(__switch_to_asm) | |
8c1f7558 | 469 | UNWIND_HINT_FUNC |
0100301b BG |
470 | /* |
471 | * Save callee-saved registers | |
472 | * This must match the order in inactive_task_frame | |
473 | */ | |
474 | pushq %rbp | |
475 | pushq %rbx | |
476 | pushq %r12 | |
477 | pushq %r13 | |
478 | pushq %r14 | |
479 | pushq %r15 | |
480 | ||
481 | /* switch stack */ | |
482 | movq %rsp, TASK_threadsp(%rdi) | |
483 | movq TASK_threadsp(%rsi), %rsp | |
484 | ||
485 | #ifdef CONFIG_CC_STACKPROTECTOR | |
486 | movq TASK_stack_canary(%rsi), %rbx | |
487 | movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset | |
488 | #endif | |
489 | ||
c995efd5 DW |
490 | #ifdef CONFIG_RETPOLINE |
491 | /* | |
492 | * When switching from a shallower to a deeper call stack | |
493 | * the RSB may either underflow or use entries populated | |
494 | * with userspace addresses. On CPUs where those concerns | |
495 | * exist, overwrite the RSB with entries which capture | |
496 | * speculative execution to prevent attack. | |
497 | */ | |
498 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW | |
499 | #endif | |
500 | ||
0100301b BG |
501 | /* restore callee-saved registers */ |
502 | popq %r15 | |
503 | popq %r14 | |
504 | popq %r13 | |
505 | popq %r12 | |
506 | popq %rbx | |
507 | popq %rbp | |
508 | ||
509 | jmp __switch_to | |
510 | END(__switch_to_asm) | |
511 | ||
1eeb207f DV |
512 | /* |
513 | * A newly forked process directly context switches into this address. | |
514 | * | |
0100301b | 515 | * rax: prev task we switched from |
616d2483 BG |
516 | * rbx: kernel thread func (NULL for user thread) |
517 | * r12: kernel thread arg | |
1eeb207f DV |
518 | */ |
519 | ENTRY(ret_from_fork) | |
8c1f7558 | 520 | UNWIND_HINT_EMPTY |
0100301b | 521 | movq %rax, %rdi |
ebd57499 | 522 | call schedule_tail /* rdi: 'prev' task parameter */ |
1eeb207f | 523 | |
ebd57499 JP |
524 | testq %rbx, %rbx /* from kernel_thread? */ |
525 | jnz 1f /* kernel threads are uncommon */ | |
24d978b7 | 526 | |
616d2483 | 527 | 2: |
8c1f7558 | 528 | UNWIND_HINT_REGS |
ebd57499 | 529 | movq %rsp, %rdi |
24d978b7 AL |
530 | call syscall_return_slowpath /* returns with IRQs disabled */ |
531 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ | |
8a055d7f | 532 | jmp swapgs_restore_regs_and_return_to_usermode |
616d2483 BG |
533 | |
534 | 1: | |
535 | /* kernel thread */ | |
536 | movq %r12, %rdi | |
2641f08b | 537 | CALL_NOSPEC %rbx |
616d2483 BG |
538 | /* |
539 | * A kernel thread is allowed to return here after successfully | |
540 | * calling do_execve(). Exit to userspace to complete the execve() | |
541 | * syscall. | |
542 | */ | |
543 | movq $0, RAX(%rsp) | |
544 | jmp 2b | |
1eeb207f DV |
545 | END(ret_from_fork) |
546 | ||
939b7871 | 547 | /* |
3304c9c3 DV |
548 | * Build the entry stubs with some assembler magic. |
549 | * We pack 1 stub into every 8-byte block. | |
939b7871 | 550 | */ |
3304c9c3 | 551 | .align 8 |
939b7871 | 552 | ENTRY(irq_entries_start) |
3304c9c3 DV |
553 | vector=FIRST_EXTERNAL_VECTOR |
554 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
8c1f7558 | 555 | UNWIND_HINT_IRET_REGS |
4d732138 | 556 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 | 557 | jmp common_interrupt |
3304c9c3 | 558 | .align 8 |
8c1f7558 | 559 | vector=vector+1 |
3304c9c3 | 560 | .endr |
939b7871 PA |
561 | END(irq_entries_start) |
562 | ||
1d3e53e8 AL |
563 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF |
564 | #ifdef CONFIG_DEBUG_ENTRY | |
e17f8234 BO |
565 | pushq %rax |
566 | SAVE_FLAGS(CLBR_RAX) | |
567 | testl $X86_EFLAGS_IF, %eax | |
1d3e53e8 AL |
568 | jz .Lokay_\@ |
569 | ud2 | |
570 | .Lokay_\@: | |
e17f8234 | 571 | popq %rax |
1d3e53e8 AL |
572 | #endif |
573 | .endm | |
574 | ||
575 | /* | |
576 | * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers | |
577 | * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. | |
578 | * Requires kernel GSBASE. | |
579 | * | |
580 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. | |
581 | */ | |
8c1f7558 | 582 | .macro ENTER_IRQ_STACK regs=1 old_rsp |
1d3e53e8 AL |
583 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
584 | movq %rsp, \old_rsp | |
8c1f7558 JP |
585 | |
586 | .if \regs | |
587 | UNWIND_HINT_REGS base=\old_rsp | |
588 | .endif | |
589 | ||
1d3e53e8 | 590 | incl PER_CPU_VAR(irq_count) |
29955909 | 591 | jnz .Lirq_stack_push_old_rsp_\@ |
1d3e53e8 AL |
592 | |
593 | /* | |
594 | * Right now, if we just incremented irq_count to zero, we've | |
595 | * claimed the IRQ stack but we haven't switched to it yet. | |
596 | * | |
597 | * If anything is added that can interrupt us here without using IST, | |
598 | * it must be *extremely* careful to limit its stack usage. This | |
599 | * could include kprobes and a hypothetical future IST-less #DB | |
600 | * handler. | |
29955909 AL |
601 | * |
602 | * The OOPS unwinder relies on the word at the top of the IRQ | |
603 | * stack linking back to the previous RSP for the entire time we're | |
604 | * on the IRQ stack. For this to work reliably, we need to write | |
605 | * it before we actually move ourselves to the IRQ stack. | |
606 | */ | |
607 | ||
608 | movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) | |
609 | movq PER_CPU_VAR(irq_stack_ptr), %rsp | |
610 | ||
611 | #ifdef CONFIG_DEBUG_ENTRY | |
612 | /* | |
613 | * If the first movq above becomes wrong due to IRQ stack layout | |
614 | * changes, the only way we'll notice is if we try to unwind right | |
615 | * here. Assert that we set up the stack right to catch this type | |
616 | * of bug quickly. | |
1d3e53e8 | 617 | */ |
29955909 AL |
618 | cmpq -8(%rsp), \old_rsp |
619 | je .Lirq_stack_okay\@ | |
620 | ud2 | |
621 | .Lirq_stack_okay\@: | |
622 | #endif | |
1d3e53e8 | 623 | |
29955909 | 624 | .Lirq_stack_push_old_rsp_\@: |
1d3e53e8 | 625 | pushq \old_rsp |
8c1f7558 JP |
626 | |
627 | .if \regs | |
628 | UNWIND_HINT_REGS indirect=1 | |
629 | .endif | |
1d3e53e8 AL |
630 | .endm |
631 | ||
632 | /* | |
633 | * Undoes ENTER_IRQ_STACK. | |
634 | */ | |
8c1f7558 | 635 | .macro LEAVE_IRQ_STACK regs=1 |
1d3e53e8 AL |
636 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
637 | /* We need to be off the IRQ stack before decrementing irq_count. */ | |
638 | popq %rsp | |
639 | ||
8c1f7558 JP |
640 | .if \regs |
641 | UNWIND_HINT_REGS | |
642 | .endif | |
643 | ||
1d3e53e8 AL |
644 | /* |
645 | * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming | |
646 | * the irq stack but we're not on it. | |
647 | */ | |
648 | ||
649 | decl PER_CPU_VAR(irq_count) | |
650 | .endm | |
651 | ||
d99015b1 | 652 | /* |
1da177e4 LT |
653 | * Interrupt entry/exit. |
654 | * | |
655 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
656 | * |
657 | * Entry runs with interrupts off. | |
658 | */ | |
1da177e4 | 659 | |
722024db | 660 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 661 | .macro interrupt func |
f6f64681 | 662 | cld |
7f2590a1 AL |
663 | |
664 | testb $3, CS-ORIG_RAX(%rsp) | |
665 | jz 1f | |
666 | SWAPGS | |
667 | call switch_to_thread_stack | |
668 | 1: | |
669 | ||
ff467594 AL |
670 | ALLOC_PT_GPREGS_ON_STACK |
671 | SAVE_C_REGS | |
672 | SAVE_EXTRA_REGS | |
946c1911 | 673 | ENCODE_FRAME_POINTER |
76f5df43 | 674 | |
ff467594 | 675 | testb $3, CS(%rsp) |
dde74f2e | 676 | jz 1f |
02bc7768 AL |
677 | |
678 | /* | |
7f2590a1 AL |
679 | * IRQ from user mode. |
680 | * | |
f1075053 AL |
681 | * We need to tell lockdep that IRQs are off. We can't do this until |
682 | * we fix gsbase, and we should do it before enter_from_user_mode | |
683 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, | |
684 | * the simplest way to handle it is to just call it twice if | |
685 | * we enter from user mode. There's no reason to optimize this since | |
686 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | |
687 | */ | |
688 | TRACE_IRQS_OFF | |
689 | ||
478dc89c | 690 | CALL_enter_from_user_mode |
02bc7768 | 691 | |
76f5df43 | 692 | 1: |
1d3e53e8 | 693 | ENTER_IRQ_STACK old_rsp=%rdi |
f6f64681 DV |
694 | /* We entered an interrupt context - irqs are off: */ |
695 | TRACE_IRQS_OFF | |
696 | ||
a586f98e | 697 | call \func /* rdi points to pt_regs */ |
1da177e4 LT |
698 | .endm |
699 | ||
722024db AH |
700 | /* |
701 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
702 | * then jump to common_interrupt. | |
703 | */ | |
939b7871 PA |
704 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
705 | common_interrupt: | |
ee4eb87b | 706 | ASM_CLAC |
4d732138 | 707 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
1da177e4 | 708 | interrupt do_IRQ |
34061f13 | 709 | /* 0(%rsp): old RSP */ |
7effaa88 | 710 | ret_from_intr: |
2140a994 | 711 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 712 | TRACE_IRQS_OFF |
625dbc3b | 713 | |
1d3e53e8 | 714 | LEAVE_IRQ_STACK |
625dbc3b | 715 | |
03335e95 | 716 | testb $3, CS(%rsp) |
dde74f2e | 717 | jz retint_kernel |
4d732138 | 718 | |
02bc7768 | 719 | /* Interrupt came from user space */ |
02bc7768 AL |
720 | GLOBAL(retint_user) |
721 | mov %rsp,%rdi | |
722 | call prepare_exit_to_usermode | |
2601e64d | 723 | TRACE_IRQS_IRETQ |
26c4ef9c | 724 | |
8a055d7f | 725 | GLOBAL(swapgs_restore_regs_and_return_to_usermode) |
26c4ef9c AL |
726 | #ifdef CONFIG_DEBUG_ENTRY |
727 | /* Assert that pt_regs indicates user mode. */ | |
1e4c4f61 | 728 | testb $3, CS(%rsp) |
26c4ef9c AL |
729 | jnz 1f |
730 | ud2 | |
731 | 1: | |
732 | #endif | |
e872045b | 733 | POP_EXTRA_REGS |
3e3b9293 AL |
734 | popq %r11 |
735 | popq %r10 | |
736 | popq %r9 | |
737 | popq %r8 | |
738 | popq %rax | |
739 | popq %rcx | |
740 | popq %rdx | |
741 | popq %rsi | |
742 | ||
743 | /* | |
744 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. | |
745 | * Save old stack pointer and switch to trampoline stack. | |
746 | */ | |
747 | movq %rsp, %rdi | |
c482feef | 748 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
3e3b9293 AL |
749 | |
750 | /* Copy the IRET frame to the trampoline stack. */ | |
751 | pushq 6*8(%rdi) /* SS */ | |
752 | pushq 5*8(%rdi) /* RSP */ | |
753 | pushq 4*8(%rdi) /* EFLAGS */ | |
754 | pushq 3*8(%rdi) /* CS */ | |
755 | pushq 2*8(%rdi) /* RIP */ | |
756 | ||
757 | /* Push user RDI on the trampoline stack. */ | |
758 | pushq (%rdi) | |
759 | ||
760 | /* | |
761 | * We are on the trampoline stack. All regs except RDI are live. | |
762 | * We can do future final exit work right here. | |
763 | */ | |
764 | ||
6fd166aa | 765 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b | 766 | |
3e3b9293 AL |
767 | /* Restore RDI. */ |
768 | popq %rdi | |
769 | SWAPGS | |
26c4ef9c AL |
770 | INTERRUPT_RETURN |
771 | ||
2601e64d | 772 | |
627276cb | 773 | /* Returning to kernel space */ |
6ba71b76 | 774 | retint_kernel: |
627276cb DV |
775 | #ifdef CONFIG_PREEMPT |
776 | /* Interrupts are off */ | |
777 | /* Check if we need preemption */ | |
4d732138 | 778 | bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
6ba71b76 | 779 | jnc 1f |
4d732138 | 780 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
36acef25 | 781 | jnz 1f |
627276cb | 782 | call preempt_schedule_irq |
36acef25 | 783 | jmp 0b |
6ba71b76 | 784 | 1: |
627276cb | 785 | #endif |
2601e64d IM |
786 | /* |
787 | * The iretq could re-enable interrupts: | |
788 | */ | |
789 | TRACE_IRQS_IRETQ | |
fffbb5dc | 790 | |
26c4ef9c AL |
791 | GLOBAL(restore_regs_and_return_to_kernel) |
792 | #ifdef CONFIG_DEBUG_ENTRY | |
793 | /* Assert that pt_regs indicates kernel mode. */ | |
1e4c4f61 | 794 | testb $3, CS(%rsp) |
26c4ef9c AL |
795 | jz 1f |
796 | ud2 | |
797 | 1: | |
798 | #endif | |
e872045b AL |
799 | POP_EXTRA_REGS |
800 | POP_C_REGS | |
801 | addq $8, %rsp /* skip regs->orig_ax */ | |
7209a75d AL |
802 | INTERRUPT_RETURN |
803 | ||
804 | ENTRY(native_iret) | |
8c1f7558 | 805 | UNWIND_HINT_IRET_REGS |
3891a04a PA |
806 | /* |
807 | * Are we returning to a stack segment from the LDT? Note: in | |
808 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
809 | */ | |
34273f41 | 810 | #ifdef CONFIG_X86_ESPFIX64 |
4d732138 IM |
811 | testb $4, (SS-RIP)(%rsp) |
812 | jnz native_irq_return_ldt | |
34273f41 | 813 | #endif |
3891a04a | 814 | |
af726f21 | 815 | .global native_irq_return_iret |
7209a75d | 816 | native_irq_return_iret: |
b645af2d AL |
817 | /* |
818 | * This may fault. Non-paranoid faults on return to userspace are | |
819 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
820 | * Double-faults due to espfix64 are handled in do_double_fault. | |
821 | * Other faults here are fatal. | |
822 | */ | |
1da177e4 | 823 | iretq |
3701d863 | 824 | |
34273f41 | 825 | #ifdef CONFIG_X86_ESPFIX64 |
7209a75d | 826 | native_irq_return_ldt: |
85063fac AL |
827 | /* |
828 | * We are running with user GSBASE. All GPRs contain their user | |
829 | * values. We have a percpu ESPFIX stack that is eight slots | |
830 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom | |
831 | * of the ESPFIX stack. | |
832 | * | |
833 | * We clobber RAX and RDI in this code. We stash RDI on the | |
834 | * normal stack and RAX on the ESPFIX stack. | |
835 | * | |
836 | * The ESPFIX stack layout we set up looks like this: | |
837 | * | |
838 | * --- top of ESPFIX stack --- | |
839 | * SS | |
840 | * RSP | |
841 | * RFLAGS | |
842 | * CS | |
843 | * RIP <-- RSP points here when we're done | |
844 | * RAX <-- espfix_waddr points here | |
845 | * --- bottom of ESPFIX stack --- | |
846 | */ | |
847 | ||
848 | pushq %rdi /* Stash user RDI */ | |
8a09317b DH |
849 | SWAPGS /* to kernel GS */ |
850 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ | |
851 | ||
4d732138 | 852 | movq PER_CPU_VAR(espfix_waddr), %rdi |
85063fac AL |
853 | movq %rax, (0*8)(%rdi) /* user RAX */ |
854 | movq (1*8)(%rsp), %rax /* user RIP */ | |
4d732138 | 855 | movq %rax, (1*8)(%rdi) |
85063fac | 856 | movq (2*8)(%rsp), %rax /* user CS */ |
4d732138 | 857 | movq %rax, (2*8)(%rdi) |
85063fac | 858 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
4d732138 | 859 | movq %rax, (3*8)(%rdi) |
85063fac | 860 | movq (5*8)(%rsp), %rax /* user SS */ |
4d732138 | 861 | movq %rax, (5*8)(%rdi) |
85063fac | 862 | movq (4*8)(%rsp), %rax /* user RSP */ |
4d732138 | 863 | movq %rax, (4*8)(%rdi) |
85063fac AL |
864 | /* Now RAX == RSP. */ |
865 | ||
866 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ | |
85063fac AL |
867 | |
868 | /* | |
869 | * espfix_stack[31:16] == 0. The page tables are set up such that | |
870 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of | |
871 | * espfix_waddr for any X. That is, there are 65536 RO aliases of | |
872 | * the same page. Set up RSP so that RSP[31:16] contains the | |
873 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless | |
874 | * still points to an RO alias of the ESPFIX stack. | |
875 | */ | |
4d732138 | 876 | orq PER_CPU_VAR(espfix_stack), %rax |
8a09317b | 877 | |
6fd166aa | 878 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b DH |
879 | SWAPGS /* to user GS */ |
880 | popq %rdi /* Restore user RDI */ | |
881 | ||
4d732138 | 882 | movq %rax, %rsp |
8c1f7558 | 883 | UNWIND_HINT_IRET_REGS offset=8 |
85063fac AL |
884 | |
885 | /* | |
886 | * At this point, we cannot write to the stack any more, but we can | |
887 | * still read. | |
888 | */ | |
889 | popq %rax /* Restore user RAX */ | |
890 | ||
891 | /* | |
892 | * RSP now points to an ordinary IRET frame, except that the page | |
893 | * is read-only and RSP[31:16] are preloaded with the userspace | |
894 | * values. We can now IRET back to userspace. | |
895 | */ | |
4d732138 | 896 | jmp native_irq_return_iret |
34273f41 | 897 | #endif |
4b787e0b | 898 | END(common_interrupt) |
3891a04a | 899 | |
1da177e4 LT |
900 | /* |
901 | * APIC interrupts. | |
0bd7b798 | 902 | */ |
cf910e83 | 903 | .macro apicinterrupt3 num sym do_sym |
322648d1 | 904 | ENTRY(\sym) |
8c1f7558 | 905 | UNWIND_HINT_IRET_REGS |
ee4eb87b | 906 | ASM_CLAC |
4d732138 | 907 | pushq $~(\num) |
39e95433 | 908 | .Lcommon_\sym: |
322648d1 | 909 | interrupt \do_sym |
4d732138 | 910 | jmp ret_from_intr |
322648d1 AH |
911 | END(\sym) |
912 | .endm | |
1da177e4 | 913 | |
469f0023 | 914 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
229a7186 MH |
915 | #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" |
916 | #define POP_SECTION_IRQENTRY .popsection | |
469f0023 | 917 | |
cf910e83 | 918 | .macro apicinterrupt num sym do_sym |
469f0023 | 919 | PUSH_SECTION_IRQENTRY |
cf910e83 | 920 | apicinterrupt3 \num \sym \do_sym |
469f0023 | 921 | POP_SECTION_IRQENTRY |
cf910e83 SA |
922 | .endm |
923 | ||
322648d1 | 924 | #ifdef CONFIG_SMP |
4d732138 IM |
925 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
926 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt | |
322648d1 | 927 | #endif |
1da177e4 | 928 | |
03b48632 | 929 | #ifdef CONFIG_X86_UV |
4d732138 | 930 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 931 | #endif |
4d732138 IM |
932 | |
933 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt | |
934 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 935 | |
d78f2664 | 936 | #ifdef CONFIG_HAVE_KVM |
4d732138 IM |
937 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
938 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi | |
210f84b0 | 939 | apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi |
d78f2664 YZ |
940 | #endif |
941 | ||
33e5ff63 | 942 | #ifdef CONFIG_X86_MCE_THRESHOLD |
4d732138 | 943 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
33e5ff63 SA |
944 | #endif |
945 | ||
24fd78a8 | 946 | #ifdef CONFIG_X86_MCE_AMD |
4d732138 | 947 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
24fd78a8 AG |
948 | #endif |
949 | ||
33e5ff63 | 950 | #ifdef CONFIG_X86_THERMAL_VECTOR |
4d732138 | 951 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
33e5ff63 | 952 | #endif |
1812924b | 953 | |
322648d1 | 954 | #ifdef CONFIG_SMP |
4d732138 IM |
955 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
956 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt | |
957 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt | |
322648d1 | 958 | #endif |
1da177e4 | 959 | |
4d732138 IM |
960 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
961 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 962 | |
e360adbe | 963 | #ifdef CONFIG_IRQ_WORK |
4d732138 | 964 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
241771ef IM |
965 | #endif |
966 | ||
1da177e4 LT |
967 | /* |
968 | * Exception entry points. | |
0bd7b798 | 969 | */ |
c482feef | 970 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) |
577ed45e | 971 | |
7f2590a1 AL |
972 | /* |
973 | * Switch to the thread stack. This is called with the IRET frame and | |
974 | * orig_ax on the stack. (That is, RDI..R12 are not on the stack and | |
975 | * space has not been allocated for them.) | |
976 | */ | |
977 | ENTRY(switch_to_thread_stack) | |
978 | UNWIND_HINT_FUNC | |
979 | ||
980 | pushq %rdi | |
8a09317b DH |
981 | /* Need to switch before accessing the thread stack. */ |
982 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | |
7f2590a1 AL |
983 | movq %rsp, %rdi |
984 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
985 | UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI | |
986 | ||
987 | pushq 7*8(%rdi) /* regs->ss */ | |
988 | pushq 6*8(%rdi) /* regs->rsp */ | |
989 | pushq 5*8(%rdi) /* regs->eflags */ | |
990 | pushq 4*8(%rdi) /* regs->cs */ | |
991 | pushq 3*8(%rdi) /* regs->ip */ | |
992 | pushq 2*8(%rdi) /* regs->orig_ax */ | |
993 | pushq 8(%rdi) /* return address */ | |
994 | UNWIND_HINT_FUNC | |
995 | ||
996 | movq (%rdi), %rdi | |
997 | ret | |
998 | END(switch_to_thread_stack) | |
999 | ||
577ed45e | 1000 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
322648d1 | 1001 | ENTRY(\sym) |
98990a33 | 1002 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
8c1f7558 | 1003 | |
577ed45e AL |
1004 | /* Sanity check */ |
1005 | .if \shift_ist != -1 && \paranoid == 0 | |
1006 | .error "using shift_ist requires paranoid=1" | |
1007 | .endif | |
1008 | ||
ee4eb87b | 1009 | ASM_CLAC |
cb5dd2c5 | 1010 | |
82c62fa0 | 1011 | .if \has_error_code == 0 |
4d732138 | 1012 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
cb5dd2c5 AL |
1013 | .endif |
1014 | ||
76f5df43 | 1015 | ALLOC_PT_GPREGS_ON_STACK |
cb5dd2c5 | 1016 | |
7f2590a1 | 1017 | .if \paranoid < 2 |
4d732138 | 1018 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
7f2590a1 | 1019 | jnz .Lfrom_usermode_switch_stack_\@ |
48e08d0f | 1020 | .endif |
7f2590a1 AL |
1021 | |
1022 | .if \paranoid | |
4d732138 | 1023 | call paranoid_entry |
cb5dd2c5 | 1024 | .else |
4d732138 | 1025 | call error_entry |
cb5dd2c5 | 1026 | .endif |
8c1f7558 | 1027 | UNWIND_HINT_REGS |
ebfc453e | 1028 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
cb5dd2c5 | 1029 | |
cb5dd2c5 | 1030 | .if \paranoid |
577ed45e | 1031 | .if \shift_ist != -1 |
4d732138 | 1032 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ |
577ed45e | 1033 | .else |
b8b1d08b | 1034 | TRACE_IRQS_OFF |
cb5dd2c5 | 1035 | .endif |
577ed45e | 1036 | .endif |
cb5dd2c5 | 1037 | |
4d732138 | 1038 | movq %rsp, %rdi /* pt_regs pointer */ |
cb5dd2c5 AL |
1039 | |
1040 | .if \has_error_code | |
4d732138 IM |
1041 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
1042 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
cb5dd2c5 | 1043 | .else |
4d732138 | 1044 | xorl %esi, %esi /* no error code */ |
cb5dd2c5 AL |
1045 | .endif |
1046 | ||
577ed45e | 1047 | .if \shift_ist != -1 |
4d732138 | 1048 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1049 | .endif |
1050 | ||
4d732138 | 1051 | call \do_sym |
cb5dd2c5 | 1052 | |
577ed45e | 1053 | .if \shift_ist != -1 |
4d732138 | 1054 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1055 | .endif |
1056 | ||
ebfc453e | 1057 | /* these procedures expect "no swapgs" flag in ebx */ |
cb5dd2c5 | 1058 | .if \paranoid |
4d732138 | 1059 | jmp paranoid_exit |
cb5dd2c5 | 1060 | .else |
4d732138 | 1061 | jmp error_exit |
cb5dd2c5 AL |
1062 | .endif |
1063 | ||
7f2590a1 | 1064 | .if \paranoid < 2 |
48e08d0f | 1065 | /* |
7f2590a1 | 1066 | * Entry from userspace. Switch stacks and treat it |
48e08d0f AL |
1067 | * as a normal entry. This means that paranoid handlers |
1068 | * run in real process context if user_mode(regs). | |
1069 | */ | |
7f2590a1 | 1070 | .Lfrom_usermode_switch_stack_\@: |
4d732138 | 1071 | call error_entry |
48e08d0f | 1072 | |
4d732138 | 1073 | movq %rsp, %rdi /* pt_regs pointer */ |
48e08d0f AL |
1074 | |
1075 | .if \has_error_code | |
4d732138 IM |
1076 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
1077 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
48e08d0f | 1078 | .else |
4d732138 | 1079 | xorl %esi, %esi /* no error code */ |
48e08d0f AL |
1080 | .endif |
1081 | ||
4d732138 | 1082 | call \do_sym |
48e08d0f | 1083 | |
4d732138 | 1084 | jmp error_exit /* %ebx: no swapgs flag */ |
48e08d0f | 1085 | .endif |
ddeb8f21 | 1086 | END(\sym) |
322648d1 | 1087 | .endm |
b8b1d08b | 1088 | |
4d732138 IM |
1089 | idtentry divide_error do_divide_error has_error_code=0 |
1090 | idtentry overflow do_overflow has_error_code=0 | |
1091 | idtentry bounds do_bounds has_error_code=0 | |
1092 | idtentry invalid_op do_invalid_op has_error_code=0 | |
1093 | idtentry device_not_available do_device_not_available has_error_code=0 | |
1094 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 | |
1095 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 | |
1096 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 | |
1097 | idtentry segment_not_present do_segment_not_present has_error_code=1 | |
1098 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 | |
1099 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 | |
1100 | idtentry alignment_check do_alignment_check has_error_code=1 | |
1101 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 | |
1102 | ||
1103 | ||
1104 | /* | |
1105 | * Reload gs selector with exception handling | |
1106 | * edi: new selector | |
1107 | */ | |
9f9d489a | 1108 | ENTRY(native_load_gs_index) |
8c1f7558 | 1109 | FRAME_BEGIN |
131484c8 | 1110 | pushfq |
b8aa287f | 1111 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea | 1112 | SWAPGS |
42c748bb | 1113 | .Lgs_change: |
4d732138 | 1114 | movl %edi, %gs |
96e5d28a | 1115 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
72fe4858 | 1116 | SWAPGS |
131484c8 | 1117 | popfq |
8c1f7558 | 1118 | FRAME_END |
9f1e87ea | 1119 | ret |
8c1f7558 | 1120 | ENDPROC(native_load_gs_index) |
784d5699 | 1121 | EXPORT_SYMBOL(native_load_gs_index) |
0bd7b798 | 1122 | |
42c748bb | 1123 | _ASM_EXTABLE(.Lgs_change, bad_gs) |
4d732138 | 1124 | .section .fixup, "ax" |
1da177e4 | 1125 | /* running with kernelgs */ |
0bd7b798 | 1126 | bad_gs: |
4d732138 | 1127 | SWAPGS /* switch back to user gs */ |
b038c842 AL |
1128 | .macro ZAP_GS |
1129 | /* This can't be a string because the preprocessor needs to see it. */ | |
1130 | movl $__USER_DS, %eax | |
1131 | movl %eax, %gs | |
1132 | .endm | |
1133 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG | |
4d732138 IM |
1134 | xorl %eax, %eax |
1135 | movl %eax, %gs | |
1136 | jmp 2b | |
9f1e87ea | 1137 | .previous |
0bd7b798 | 1138 | |
2699500b | 1139 | /* Call softirq on interrupt stack. Interrupts are off. */ |
7d65f4a6 | 1140 | ENTRY(do_softirq_own_stack) |
4d732138 IM |
1141 | pushq %rbp |
1142 | mov %rsp, %rbp | |
8c1f7558 | 1143 | ENTER_IRQ_STACK regs=0 old_rsp=%r11 |
4d732138 | 1144 | call __do_softirq |
8c1f7558 | 1145 | LEAVE_IRQ_STACK regs=0 |
2699500b | 1146 | leaveq |
ed6b676c | 1147 | ret |
8c1f7558 | 1148 | ENDPROC(do_softirq_own_stack) |
75154f40 | 1149 | |
3d75e1b8 | 1150 | #ifdef CONFIG_XEN |
5878d5d6 | 1151 | idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
3d75e1b8 JF |
1152 | |
1153 | /* | |
9f1e87ea CG |
1154 | * A note on the "critical region" in our callback handler. |
1155 | * We want to avoid stacking callback handlers due to events occurring | |
1156 | * during handling of the last event. To do this, we keep events disabled | |
1157 | * until we've done all processing. HOWEVER, we must enable events before | |
1158 | * popping the stack frame (can't be done atomically) and so it would still | |
1159 | * be possible to get enough handler activations to overflow the stack. | |
1160 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1161 | * like to avoid the possibility. | |
1162 | * So, on entry to the handler we detect whether we interrupted an | |
1163 | * existing activation in its critical region -- if so, we pop the current | |
1164 | * activation and restart the handler using the previous one. | |
1165 | */ | |
4d732138 IM |
1166 | ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ |
1167 | ||
9f1e87ea CG |
1168 | /* |
1169 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1170 | * see the correct pointer to the pt_regs | |
1171 | */ | |
8c1f7558 | 1172 | UNWIND_HINT_FUNC |
4d732138 | 1173 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
8c1f7558 | 1174 | UNWIND_HINT_REGS |
1d3e53e8 AL |
1175 | |
1176 | ENTER_IRQ_STACK old_rsp=%r10 | |
4d732138 | 1177 | call xen_evtchn_do_upcall |
1d3e53e8 AL |
1178 | LEAVE_IRQ_STACK |
1179 | ||
fdfd811d | 1180 | #ifndef CONFIG_PREEMPT |
4d732138 | 1181 | call xen_maybe_preempt_hcall |
fdfd811d | 1182 | #endif |
4d732138 | 1183 | jmp error_exit |
371c394a | 1184 | END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
1185 | |
1186 | /* | |
9f1e87ea CG |
1187 | * Hypervisor uses this for application faults while it executes. |
1188 | * We get here for two reasons: | |
1189 | * 1. Fault while reloading DS, ES, FS or GS | |
1190 | * 2. Fault while executing IRET | |
1191 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1192 | * registers that could be reloaded and zeroed the others. | |
1193 | * Category 2 we fix up by killing the current process. We cannot use the | |
1194 | * normal Linux return path in this case because if we use the IRET hypercall | |
1195 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1196 | * We distinguish between categories by comparing each saved segment register | |
1197 | * with its current contents: any discrepancy means we in category 1. | |
1198 | */ | |
3d75e1b8 | 1199 | ENTRY(xen_failsafe_callback) |
8c1f7558 | 1200 | UNWIND_HINT_EMPTY |
4d732138 IM |
1201 | movl %ds, %ecx |
1202 | cmpw %cx, 0x10(%rsp) | |
1203 | jne 1f | |
1204 | movl %es, %ecx | |
1205 | cmpw %cx, 0x18(%rsp) | |
1206 | jne 1f | |
1207 | movl %fs, %ecx | |
1208 | cmpw %cx, 0x20(%rsp) | |
1209 | jne 1f | |
1210 | movl %gs, %ecx | |
1211 | cmpw %cx, 0x28(%rsp) | |
1212 | jne 1f | |
3d75e1b8 | 1213 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
4d732138 IM |
1214 | movq (%rsp), %rcx |
1215 | movq 8(%rsp), %r11 | |
1216 | addq $0x30, %rsp | |
1217 | pushq $0 /* RIP */ | |
8c1f7558 | 1218 | UNWIND_HINT_IRET_REGS offset=8 |
4d732138 | 1219 | jmp general_protection |
3d75e1b8 | 1220 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
4d732138 IM |
1221 | movq (%rsp), %rcx |
1222 | movq 8(%rsp), %r11 | |
1223 | addq $0x30, %rsp | |
8c1f7558 | 1224 | UNWIND_HINT_IRET_REGS |
4d732138 | 1225 | pushq $-1 /* orig_ax = -1 => not a system call */ |
76f5df43 DV |
1226 | ALLOC_PT_GPREGS_ON_STACK |
1227 | SAVE_C_REGS | |
1228 | SAVE_EXTRA_REGS | |
946c1911 | 1229 | ENCODE_FRAME_POINTER |
4d732138 | 1230 | jmp error_exit |
3d75e1b8 JF |
1231 | END(xen_failsafe_callback) |
1232 | ||
cf910e83 | 1233 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 SY |
1234 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1235 | ||
3d75e1b8 | 1236 | #endif /* CONFIG_XEN */ |
ddeb8f21 | 1237 | |
bc2b0331 | 1238 | #if IS_ENABLED(CONFIG_HYPERV) |
cf910e83 | 1239 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
bc2b0331 S |
1240 | hyperv_callback_vector hyperv_vector_handler |
1241 | #endif /* CONFIG_HYPERV */ | |
1242 | ||
4d732138 IM |
1243 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
1244 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK | |
1245 | idtentry stack_segment do_stack_segment has_error_code=1 | |
1246 | ||
6cac5a92 | 1247 | #ifdef CONFIG_XEN |
43e41110 | 1248 | idtentry xennmi do_nmi has_error_code=0 |
5878d5d6 JG |
1249 | idtentry xendebug do_debug has_error_code=0 |
1250 | idtentry xenint3 do_int3 has_error_code=0 | |
6cac5a92 | 1251 | #endif |
4d732138 IM |
1252 | |
1253 | idtentry general_protection do_general_protection has_error_code=1 | |
11a7ffb0 | 1254 | idtentry page_fault do_page_fault has_error_code=1 |
4d732138 | 1255 | |
631bc487 | 1256 | #ifdef CONFIG_KVM_GUEST |
4d732138 | 1257 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
631bc487 | 1258 | #endif |
4d732138 | 1259 | |
ddeb8f21 | 1260 | #ifdef CONFIG_X86_MCE |
4d732138 | 1261 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
ddeb8f21 AH |
1262 | #endif |
1263 | ||
ebfc453e DV |
1264 | /* |
1265 | * Save all registers in pt_regs, and switch gs if needed. | |
1266 | * Use slow, but surefire "are we in kernel?" check. | |
1267 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
1268 | */ | |
1269 | ENTRY(paranoid_entry) | |
8c1f7558 | 1270 | UNWIND_HINT_FUNC |
1eeb207f DV |
1271 | cld |
1272 | SAVE_C_REGS 8 | |
1273 | SAVE_EXTRA_REGS 8 | |
946c1911 | 1274 | ENCODE_FRAME_POINTER 8 |
4d732138 IM |
1275 | movl $1, %ebx |
1276 | movl $MSR_GS_BASE, %ecx | |
1eeb207f | 1277 | rdmsr |
4d732138 IM |
1278 | testl %edx, %edx |
1279 | js 1f /* negative -> in kernel */ | |
1eeb207f | 1280 | SWAPGS |
4d732138 | 1281 | xorl %ebx, %ebx |
8a09317b DH |
1282 | |
1283 | 1: | |
1284 | SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 | |
1285 | ||
1286 | ret | |
ebfc453e | 1287 | END(paranoid_entry) |
ddeb8f21 | 1288 | |
ebfc453e DV |
1289 | /* |
1290 | * "Paranoid" exit path from exception stack. This is invoked | |
1291 | * only on return from non-NMI IST interrupts that came | |
1292 | * from kernel space. | |
1293 | * | |
1294 | * We may be returning to very strange contexts (e.g. very early | |
1295 | * in syscall entry), so checking for preemption here would | |
1296 | * be complicated. Fortunately, we there's no good reason | |
1297 | * to try to handle preemption here. | |
4d732138 IM |
1298 | * |
1299 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) | |
ebfc453e | 1300 | */ |
ddeb8f21 | 1301 | ENTRY(paranoid_exit) |
8c1f7558 | 1302 | UNWIND_HINT_REGS |
2140a994 | 1303 | DISABLE_INTERRUPTS(CLBR_ANY) |
5963e317 | 1304 | TRACE_IRQS_OFF_DEBUG |
4d732138 | 1305 | testl %ebx, %ebx /* swapgs needed? */ |
e5317832 | 1306 | jnz .Lparanoid_exit_no_swapgs |
f2db9382 | 1307 | TRACE_IRQS_IRETQ |
21e94459 | 1308 | RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
ddeb8f21 | 1309 | SWAPGS_UNSAFE_STACK |
e5317832 AL |
1310 | jmp .Lparanoid_exit_restore |
1311 | .Lparanoid_exit_no_swapgs: | |
f2db9382 | 1312 | TRACE_IRQS_IRETQ_DEBUG |
e5317832 AL |
1313 | .Lparanoid_exit_restore: |
1314 | jmp restore_regs_and_return_to_kernel | |
ddeb8f21 AH |
1315 | END(paranoid_exit) |
1316 | ||
1317 | /* | |
ebfc453e | 1318 | * Save all registers in pt_regs, and switch gs if needed. |
539f5113 | 1319 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
ddeb8f21 AH |
1320 | */ |
1321 | ENTRY(error_entry) | |
8c1f7558 | 1322 | UNWIND_HINT_FUNC |
ddeb8f21 | 1323 | cld |
76f5df43 DV |
1324 | SAVE_C_REGS 8 |
1325 | SAVE_EXTRA_REGS 8 | |
946c1911 | 1326 | ENCODE_FRAME_POINTER 8 |
4d732138 | 1327 | xorl %ebx, %ebx |
03335e95 | 1328 | testb $3, CS+8(%rsp) |
cb6f64ed | 1329 | jz .Lerror_kernelspace |
539f5113 | 1330 | |
cb6f64ed AL |
1331 | /* |
1332 | * We entered from user mode or we're pretending to have entered | |
1333 | * from user mode due to an IRET fault. | |
1334 | */ | |
ddeb8f21 | 1335 | SWAPGS |
8a09317b DH |
1336 | /* We have user CR3. Change to kernel CR3. */ |
1337 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax | |
539f5113 | 1338 | |
cb6f64ed | 1339 | .Lerror_entry_from_usermode_after_swapgs: |
7f2590a1 AL |
1340 | /* Put us onto the real thread stack. */ |
1341 | popq %r12 /* save return addr in %12 */ | |
1342 | movq %rsp, %rdi /* arg0 = pt_regs pointer */ | |
1343 | call sync_regs | |
1344 | movq %rax, %rsp /* switch stack */ | |
1345 | ENCODE_FRAME_POINTER | |
1346 | pushq %r12 | |
1347 | ||
f1075053 AL |
1348 | /* |
1349 | * We need to tell lockdep that IRQs are off. We can't do this until | |
1350 | * we fix gsbase, and we should do it before enter_from_user_mode | |
1351 | * (which can take locks). | |
1352 | */ | |
1353 | TRACE_IRQS_OFF | |
478dc89c | 1354 | CALL_enter_from_user_mode |
f1075053 | 1355 | ret |
02bc7768 | 1356 | |
cb6f64ed | 1357 | .Lerror_entry_done: |
ddeb8f21 AH |
1358 | TRACE_IRQS_OFF |
1359 | ret | |
ddeb8f21 | 1360 | |
ebfc453e DV |
1361 | /* |
1362 | * There are two places in the kernel that can potentially fault with | |
1363 | * usergs. Handle them here. B stepping K8s sometimes report a | |
1364 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1365 | * for these here too. | |
1366 | */ | |
cb6f64ed | 1367 | .Lerror_kernelspace: |
4d732138 IM |
1368 | incl %ebx |
1369 | leaq native_irq_return_iret(%rip), %rcx | |
1370 | cmpq %rcx, RIP+8(%rsp) | |
cb6f64ed | 1371 | je .Lerror_bad_iret |
4d732138 IM |
1372 | movl %ecx, %eax /* zero extend */ |
1373 | cmpq %rax, RIP+8(%rsp) | |
cb6f64ed | 1374 | je .Lbstep_iret |
42c748bb | 1375 | cmpq $.Lgs_change, RIP+8(%rsp) |
cb6f64ed | 1376 | jne .Lerror_entry_done |
539f5113 AL |
1377 | |
1378 | /* | |
42c748bb | 1379 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
539f5113 | 1380 | * gsbase and proceed. We'll fix up the exception and land in |
42c748bb | 1381 | * .Lgs_change's error handler with kernel gsbase. |
539f5113 | 1382 | */ |
2fa5f04f | 1383 | SWAPGS |
8a09317b | 1384 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
2fa5f04f | 1385 | jmp .Lerror_entry_done |
ae24ffe5 | 1386 | |
cb6f64ed | 1387 | .Lbstep_iret: |
ae24ffe5 | 1388 | /* Fix truncated RIP */ |
4d732138 | 1389 | movq %rcx, RIP+8(%rsp) |
b645af2d AL |
1390 | /* fall through */ |
1391 | ||
cb6f64ed | 1392 | .Lerror_bad_iret: |
539f5113 | 1393 | /* |
8a09317b DH |
1394 | * We came from an IRET to user mode, so we have user |
1395 | * gsbase and CR3. Switch to kernel gsbase and CR3: | |
539f5113 | 1396 | */ |
b645af2d | 1397 | SWAPGS |
8a09317b | 1398 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
539f5113 AL |
1399 | |
1400 | /* | |
1401 | * Pretend that the exception came from user mode: set up pt_regs | |
1402 | * as if we faulted immediately after IRET and clear EBX so that | |
1403 | * error_exit knows that we will be returning to user mode. | |
1404 | */ | |
4d732138 IM |
1405 | mov %rsp, %rdi |
1406 | call fixup_bad_iret | |
1407 | mov %rax, %rsp | |
539f5113 | 1408 | decl %ebx |
cb6f64ed | 1409 | jmp .Lerror_entry_from_usermode_after_swapgs |
ddeb8f21 AH |
1410 | END(error_entry) |
1411 | ||
1412 | ||
539f5113 | 1413 | /* |
75ca5b22 | 1414 | * On entry, EBX is a "return to kernel mode" flag: |
539f5113 AL |
1415 | * 1: already in kernel mode, don't need SWAPGS |
1416 | * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode | |
1417 | */ | |
ddeb8f21 | 1418 | ENTRY(error_exit) |
8c1f7558 | 1419 | UNWIND_HINT_REGS |
2140a994 | 1420 | DISABLE_INTERRUPTS(CLBR_ANY) |
ddeb8f21 | 1421 | TRACE_IRQS_OFF |
2140a994 | 1422 | testl %ebx, %ebx |
4d732138 IM |
1423 | jnz retint_kernel |
1424 | jmp retint_user | |
ddeb8f21 AH |
1425 | END(error_exit) |
1426 | ||
929bacec AL |
1427 | /* |
1428 | * Runs on exception stack. Xen PV does not go through this path at all, | |
1429 | * so we can use real assembly here. | |
8a09317b DH |
1430 | * |
1431 | * Registers: | |
1432 | * %r14: Used to save/restore the CR3 of the interrupted context | |
1433 | * when PAGE_TABLE_ISOLATION is in use. Do not clobber. | |
929bacec | 1434 | */ |
ddeb8f21 | 1435 | ENTRY(nmi) |
8c1f7558 | 1436 | UNWIND_HINT_IRET_REGS |
929bacec | 1437 | |
3f3c8b8c SR |
1438 | /* |
1439 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1440 | * the iretq it performs will take us out of NMI context. | |
1441 | * This means that we can have nested NMIs where the next | |
1442 | * NMI is using the top of the stack of the previous NMI. We | |
1443 | * can't let it execute because the nested NMI will corrupt the | |
1444 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1445 | * anyway. | |
1446 | * | |
1447 | * To handle this case we do the following: | |
1448 | * Check the a special location on the stack that contains | |
1449 | * a variable that is set when NMIs are executing. | |
1450 | * The interrupted task's stack is also checked to see if it | |
1451 | * is an NMI stack. | |
1452 | * If the variable is not set and the stack is not the NMI | |
1453 | * stack then: | |
1454 | * o Set the special variable on the stack | |
0b22930e AL |
1455 | * o Copy the interrupt frame into an "outermost" location on the |
1456 | * stack | |
1457 | * o Copy the interrupt frame into an "iret" location on the stack | |
3f3c8b8c SR |
1458 | * o Continue processing the NMI |
1459 | * If the variable is set or the previous stack is the NMI stack: | |
0b22930e | 1460 | * o Modify the "iret" location to jump to the repeat_nmi |
3f3c8b8c SR |
1461 | * o return back to the first NMI |
1462 | * | |
1463 | * Now on exit of the first NMI, we first clear the stack variable | |
1464 | * The NMI stack will tell any nested NMIs at that point that it is | |
1465 | * nested. Then we pop the stack normally with iret, and if there was | |
1466 | * a nested NMI that updated the copy interrupt stack frame, a | |
1467 | * jump will be made to the repeat_nmi code that will handle the second | |
1468 | * NMI. | |
9b6e6a83 AL |
1469 | * |
1470 | * However, espfix prevents us from directly returning to userspace | |
1471 | * with a single IRET instruction. Similarly, IRET to user mode | |
1472 | * can fault. We therefore handle NMIs from user space like | |
1473 | * other IST entries. | |
3f3c8b8c SR |
1474 | */ |
1475 | ||
e93c1730 AL |
1476 | ASM_CLAC |
1477 | ||
146b2b09 | 1478 | /* Use %rdx as our temp variable throughout */ |
4d732138 | 1479 | pushq %rdx |
3f3c8b8c | 1480 | |
9b6e6a83 AL |
1481 | testb $3, CS-RIP+8(%rsp) |
1482 | jz .Lnmi_from_kernel | |
1483 | ||
1484 | /* | |
1485 | * NMI from user mode. We need to run on the thread stack, but we | |
1486 | * can't go through the normal entry paths: NMIs are masked, and | |
1487 | * we don't want to enable interrupts, because then we'll end | |
1488 | * up in an awkward situation in which IRQs are on but NMIs | |
1489 | * are off. | |
83c133cf AL |
1490 | * |
1491 | * We also must not push anything to the stack before switching | |
1492 | * stacks lest we corrupt the "NMI executing" variable. | |
9b6e6a83 AL |
1493 | */ |
1494 | ||
929bacec | 1495 | swapgs |
9b6e6a83 | 1496 | cld |
8a09317b | 1497 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx |
9b6e6a83 AL |
1498 | movq %rsp, %rdx |
1499 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
8c1f7558 | 1500 | UNWIND_HINT_IRET_REGS base=%rdx offset=8 |
9b6e6a83 AL |
1501 | pushq 5*8(%rdx) /* pt_regs->ss */ |
1502 | pushq 4*8(%rdx) /* pt_regs->rsp */ | |
1503 | pushq 3*8(%rdx) /* pt_regs->flags */ | |
1504 | pushq 2*8(%rdx) /* pt_regs->cs */ | |
1505 | pushq 1*8(%rdx) /* pt_regs->rip */ | |
8c1f7558 | 1506 | UNWIND_HINT_IRET_REGS |
9b6e6a83 AL |
1507 | pushq $-1 /* pt_regs->orig_ax */ |
1508 | pushq %rdi /* pt_regs->di */ | |
1509 | pushq %rsi /* pt_regs->si */ | |
1510 | pushq (%rdx) /* pt_regs->dx */ | |
1511 | pushq %rcx /* pt_regs->cx */ | |
1512 | pushq %rax /* pt_regs->ax */ | |
1513 | pushq %r8 /* pt_regs->r8 */ | |
1514 | pushq %r9 /* pt_regs->r9 */ | |
1515 | pushq %r10 /* pt_regs->r10 */ | |
1516 | pushq %r11 /* pt_regs->r11 */ | |
1517 | pushq %rbx /* pt_regs->rbx */ | |
1518 | pushq %rbp /* pt_regs->rbp */ | |
1519 | pushq %r12 /* pt_regs->r12 */ | |
1520 | pushq %r13 /* pt_regs->r13 */ | |
1521 | pushq %r14 /* pt_regs->r14 */ | |
1522 | pushq %r15 /* pt_regs->r15 */ | |
8c1f7558 | 1523 | UNWIND_HINT_REGS |
946c1911 | 1524 | ENCODE_FRAME_POINTER |
9b6e6a83 AL |
1525 | |
1526 | /* | |
1527 | * At this point we no longer need to worry about stack damage | |
1528 | * due to nesting -- we're on the normal thread stack and we're | |
1529 | * done with the NMI stack. | |
1530 | */ | |
1531 | ||
1532 | movq %rsp, %rdi | |
1533 | movq $-1, %rsi | |
1534 | call do_nmi | |
1535 | ||
45d5a168 | 1536 | /* |
9b6e6a83 | 1537 | * Return back to user mode. We must *not* do the normal exit |
946c1911 | 1538 | * work, because we don't want to enable interrupts. |
45d5a168 | 1539 | */ |
8a055d7f | 1540 | jmp swapgs_restore_regs_and_return_to_usermode |
45d5a168 | 1541 | |
9b6e6a83 | 1542 | .Lnmi_from_kernel: |
3f3c8b8c | 1543 | /* |
0b22930e AL |
1544 | * Here's what our stack frame will look like: |
1545 | * +---------------------------------------------------------+ | |
1546 | * | original SS | | |
1547 | * | original Return RSP | | |
1548 | * | original RFLAGS | | |
1549 | * | original CS | | |
1550 | * | original RIP | | |
1551 | * +---------------------------------------------------------+ | |
1552 | * | temp storage for rdx | | |
1553 | * +---------------------------------------------------------+ | |
1554 | * | "NMI executing" variable | | |
1555 | * +---------------------------------------------------------+ | |
1556 | * | iret SS } Copied from "outermost" frame | | |
1557 | * | iret Return RSP } on each loop iteration; overwritten | | |
1558 | * | iret RFLAGS } by a nested NMI to force another | | |
1559 | * | iret CS } iteration if needed. | | |
1560 | * | iret RIP } | | |
1561 | * +---------------------------------------------------------+ | |
1562 | * | outermost SS } initialized in first_nmi; | | |
1563 | * | outermost Return RSP } will not be changed before | | |
1564 | * | outermost RFLAGS } NMI processing is done. | | |
1565 | * | outermost CS } Copied to "iret" frame on each | | |
1566 | * | outermost RIP } iteration. | | |
1567 | * +---------------------------------------------------------+ | |
1568 | * | pt_regs | | |
1569 | * +---------------------------------------------------------+ | |
1570 | * | |
1571 | * The "original" frame is used by hardware. Before re-enabling | |
1572 | * NMIs, we need to be done with it, and we need to leave enough | |
1573 | * space for the asm code here. | |
1574 | * | |
1575 | * We return by executing IRET while RSP points to the "iret" frame. | |
1576 | * That will either return for real or it will loop back into NMI | |
1577 | * processing. | |
1578 | * | |
1579 | * The "outermost" frame is copied to the "iret" frame on each | |
1580 | * iteration of the loop, so each iteration starts with the "iret" | |
1581 | * frame pointing to the final return target. | |
1582 | */ | |
1583 | ||
45d5a168 | 1584 | /* |
0b22930e AL |
1585 | * Determine whether we're a nested NMI. |
1586 | * | |
a27507ca AL |
1587 | * If we interrupted kernel code between repeat_nmi and |
1588 | * end_repeat_nmi, then we are a nested NMI. We must not | |
1589 | * modify the "iret" frame because it's being written by | |
1590 | * the outer NMI. That's okay; the outer NMI handler is | |
1591 | * about to about to call do_nmi anyway, so we can just | |
1592 | * resume the outer NMI. | |
45d5a168 | 1593 | */ |
a27507ca AL |
1594 | |
1595 | movq $repeat_nmi, %rdx | |
1596 | cmpq 8(%rsp), %rdx | |
1597 | ja 1f | |
1598 | movq $end_repeat_nmi, %rdx | |
1599 | cmpq 8(%rsp), %rdx | |
1600 | ja nested_nmi_out | |
1601 | 1: | |
45d5a168 | 1602 | |
3f3c8b8c | 1603 | /* |
a27507ca | 1604 | * Now check "NMI executing". If it's set, then we're nested. |
0b22930e AL |
1605 | * This will not detect if we interrupted an outer NMI just |
1606 | * before IRET. | |
3f3c8b8c | 1607 | */ |
4d732138 IM |
1608 | cmpl $1, -8(%rsp) |
1609 | je nested_nmi | |
3f3c8b8c SR |
1610 | |
1611 | /* | |
0b22930e AL |
1612 | * Now test if the previous stack was an NMI stack. This covers |
1613 | * the case where we interrupt an outer NMI after it clears | |
810bc075 AL |
1614 | * "NMI executing" but before IRET. We need to be careful, though: |
1615 | * there is one case in which RSP could point to the NMI stack | |
1616 | * despite there being no NMI active: naughty userspace controls | |
1617 | * RSP at the very beginning of the SYSCALL targets. We can | |
1618 | * pull a fast one on naughty userspace, though: we program | |
1619 | * SYSCALL to mask DF, so userspace cannot cause DF to be set | |
1620 | * if it controls the kernel's RSP. We set DF before we clear | |
1621 | * "NMI executing". | |
3f3c8b8c | 1622 | */ |
0784b364 DV |
1623 | lea 6*8(%rsp), %rdx |
1624 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ | |
1625 | cmpq %rdx, 4*8(%rsp) | |
1626 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ | |
1627 | ja first_nmi | |
4d732138 | 1628 | |
0784b364 DV |
1629 | subq $EXCEPTION_STKSZ, %rdx |
1630 | cmpq %rdx, 4*8(%rsp) | |
1631 | /* If it is below the NMI stack, it is a normal NMI */ | |
1632 | jb first_nmi | |
810bc075 AL |
1633 | |
1634 | /* Ah, it is within the NMI stack. */ | |
1635 | ||
1636 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) | |
1637 | jz first_nmi /* RSP was user controlled. */ | |
1638 | ||
1639 | /* This is a nested NMI. */ | |
0784b364 | 1640 | |
3f3c8b8c SR |
1641 | nested_nmi: |
1642 | /* | |
0b22930e AL |
1643 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
1644 | * iteration of NMI handling. | |
3f3c8b8c | 1645 | */ |
23a781e9 | 1646 | subq $8, %rsp |
4d732138 IM |
1647 | leaq -10*8(%rsp), %rdx |
1648 | pushq $__KERNEL_DS | |
1649 | pushq %rdx | |
131484c8 | 1650 | pushfq |
4d732138 IM |
1651 | pushq $__KERNEL_CS |
1652 | pushq $repeat_nmi | |
3f3c8b8c SR |
1653 | |
1654 | /* Put stack back */ | |
4d732138 | 1655 | addq $(6*8), %rsp |
3f3c8b8c SR |
1656 | |
1657 | nested_nmi_out: | |
4d732138 | 1658 | popq %rdx |
3f3c8b8c | 1659 | |
0b22930e | 1660 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
929bacec | 1661 | iretq |
3f3c8b8c SR |
1662 | |
1663 | first_nmi: | |
0b22930e | 1664 | /* Restore rdx. */ |
4d732138 | 1665 | movq (%rsp), %rdx |
62610913 | 1666 | |
36f1a77b AL |
1667 | /* Make room for "NMI executing". */ |
1668 | pushq $0 | |
3f3c8b8c | 1669 | |
0b22930e | 1670 | /* Leave room for the "iret" frame */ |
4d732138 | 1671 | subq $(5*8), %rsp |
28696f43 | 1672 | |
0b22930e | 1673 | /* Copy the "original" frame to the "outermost" frame */ |
3f3c8b8c | 1674 | .rept 5 |
4d732138 | 1675 | pushq 11*8(%rsp) |
3f3c8b8c | 1676 | .endr |
8c1f7558 | 1677 | UNWIND_HINT_IRET_REGS |
62610913 | 1678 | |
79fb4ad6 SR |
1679 | /* Everything up to here is safe from nested NMIs */ |
1680 | ||
a97439aa AL |
1681 | #ifdef CONFIG_DEBUG_ENTRY |
1682 | /* | |
1683 | * For ease of testing, unmask NMIs right away. Disabled by | |
1684 | * default because IRET is very expensive. | |
1685 | */ | |
1686 | pushq $0 /* SS */ | |
1687 | pushq %rsp /* RSP (minus 8 because of the previous push) */ | |
1688 | addq $8, (%rsp) /* Fix up RSP */ | |
1689 | pushfq /* RFLAGS */ | |
1690 | pushq $__KERNEL_CS /* CS */ | |
1691 | pushq $1f /* RIP */ | |
929bacec | 1692 | iretq /* continues at repeat_nmi below */ |
8c1f7558 | 1693 | UNWIND_HINT_IRET_REGS |
a97439aa AL |
1694 | 1: |
1695 | #endif | |
1696 | ||
0b22930e | 1697 | repeat_nmi: |
62610913 JB |
1698 | /* |
1699 | * If there was a nested NMI, the first NMI's iret will return | |
1700 | * here. But NMIs are still enabled and we can take another | |
1701 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1702 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1703 | * it will just return, as we are about to repeat an NMI anyway. | |
1704 | * This makes it safe to copy to the stack frame that a nested | |
1705 | * NMI will update. | |
0b22930e AL |
1706 | * |
1707 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if | |
1708 | * we're repeating an NMI, gsbase has the same value that it had on | |
1709 | * the first iteration. paranoid_entry will load the kernel | |
36f1a77b AL |
1710 | * gsbase if needed before we call do_nmi. "NMI executing" |
1711 | * is zero. | |
62610913 | 1712 | */ |
36f1a77b | 1713 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
3f3c8b8c | 1714 | |
62610913 | 1715 | /* |
0b22930e AL |
1716 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
1717 | * here must not modify the "iret" frame while we're writing to | |
1718 | * it or it will end up containing garbage. | |
62610913 | 1719 | */ |
4d732138 | 1720 | addq $(10*8), %rsp |
3f3c8b8c | 1721 | .rept 5 |
4d732138 | 1722 | pushq -6*8(%rsp) |
3f3c8b8c | 1723 | .endr |
4d732138 | 1724 | subq $(5*8), %rsp |
62610913 | 1725 | end_repeat_nmi: |
3f3c8b8c SR |
1726 | |
1727 | /* | |
0b22930e AL |
1728 | * Everything below this point can be preempted by a nested NMI. |
1729 | * If this happens, then the inner NMI will change the "iret" | |
1730 | * frame to point back to repeat_nmi. | |
3f3c8b8c | 1731 | */ |
4d732138 | 1732 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
76f5df43 DV |
1733 | ALLOC_PT_GPREGS_ON_STACK |
1734 | ||
1fd466ef | 1735 | /* |
ebfc453e | 1736 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1fd466ef SR |
1737 | * as we should not be calling schedule in NMI context. |
1738 | * Even with normal interrupts enabled. An NMI should not be | |
1739 | * setting NEED_RESCHED or anything that normal interrupts and | |
1740 | * exceptions might do. | |
1741 | */ | |
4d732138 | 1742 | call paranoid_entry |
8c1f7558 | 1743 | UNWIND_HINT_REGS |
7fbb98c5 | 1744 | |
ddeb8f21 | 1745 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
4d732138 IM |
1746 | movq %rsp, %rdi |
1747 | movq $-1, %rsi | |
1748 | call do_nmi | |
7fbb98c5 | 1749 | |
21e94459 | 1750 | RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 |
8a09317b | 1751 | |
4d732138 IM |
1752 | testl %ebx, %ebx /* swapgs needed? */ |
1753 | jnz nmi_restore | |
ddeb8f21 AH |
1754 | nmi_swapgs: |
1755 | SWAPGS_UNSAFE_STACK | |
1756 | nmi_restore: | |
471ee483 AL |
1757 | POP_EXTRA_REGS |
1758 | POP_C_REGS | |
0b22930e | 1759 | |
471ee483 AL |
1760 | /* |
1761 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" | |
1762 | * at the "iret" frame. | |
1763 | */ | |
1764 | addq $6*8, %rsp | |
28696f43 | 1765 | |
810bc075 AL |
1766 | /* |
1767 | * Clear "NMI executing". Set DF first so that we can easily | |
1768 | * distinguish the remaining code between here and IRET from | |
929bacec AL |
1769 | * the SYSCALL entry and exit paths. |
1770 | * | |
1771 | * We arguably should just inspect RIP instead, but I (Andy) wrote | |
1772 | * this code when I had the misapprehension that Xen PV supported | |
1773 | * NMIs, and Xen PV would break that approach. | |
810bc075 AL |
1774 | */ |
1775 | std | |
1776 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ | |
0b22930e AL |
1777 | |
1778 | /* | |
929bacec AL |
1779 | * iretq reads the "iret" frame and exits the NMI stack in a |
1780 | * single instruction. We are returning to kernel mode, so this | |
1781 | * cannot result in a fault. Similarly, we don't need to worry | |
1782 | * about espfix64 on the way back to kernel mode. | |
0b22930e | 1783 | */ |
929bacec | 1784 | iretq |
ddeb8f21 AH |
1785 | END(nmi) |
1786 | ||
1787 | ENTRY(ignore_sysret) | |
8c1f7558 | 1788 | UNWIND_HINT_EMPTY |
4d732138 | 1789 | mov $-ENOSYS, %eax |
ddeb8f21 | 1790 | sysret |
ddeb8f21 | 1791 | END(ignore_sysret) |
2deb4be2 AL |
1792 | |
1793 | ENTRY(rewind_stack_do_exit) | |
8c1f7558 | 1794 | UNWIND_HINT_FUNC |
2deb4be2 AL |
1795 | /* Prevent any naive code from trying to unwind to our caller. */ |
1796 | xorl %ebp, %ebp | |
1797 | ||
1798 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax | |
8c1f7558 JP |
1799 | leaq -PTREGS_SIZE(%rax), %rsp |
1800 | UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE | |
2deb4be2 AL |
1801 | |
1802 | call do_exit | |
2deb4be2 | 1803 | END(rewind_stack_do_exit) |