]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * linux/arch/x86_64/entry.S | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
7 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
4d732138 | 8 | * |
1da177e4 LT |
9 | * entry.S contains the system-call and fault low-level handling routines. |
10 | * | |
8b4777a4 AL |
11 | * Some of this is documented in Documentation/x86/entry_64.txt |
12 | * | |
0bd7b798 | 13 | * A note on terminology: |
4d732138 IM |
14 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
15 | * at the top of the kernel process stack. | |
2e91a17b AK |
16 | * |
17 | * Some macro usage: | |
4d732138 IM |
18 | * - ENTRY/END: Define functions in the symbol table. |
19 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. | |
20 | * - idtentry: Define exception entry points. | |
1da177e4 | 21 | */ |
1da177e4 LT |
22 | #include <linux/linkage.h> |
23 | #include <asm/segment.h> | |
1da177e4 LT |
24 | #include <asm/cache.h> |
25 | #include <asm/errno.h> | |
e2d5df93 | 26 | #include <asm/asm-offsets.h> |
1da177e4 LT |
27 | #include <asm/msr.h> |
28 | #include <asm/unistd.h> | |
29 | #include <asm/thread_info.h> | |
30 | #include <asm/hw_irq.h> | |
0341c14d | 31 | #include <asm/page_types.h> |
2601e64d | 32 | #include <asm/irqflags.h> |
72fe4858 | 33 | #include <asm/paravirt.h> |
9939ddaf | 34 | #include <asm/percpu.h> |
d7abc0fa | 35 | #include <asm/asm.h> |
63bcff2a | 36 | #include <asm/smap.h> |
3891a04a | 37 | #include <asm/pgtable_types.h> |
784d5699 | 38 | #include <asm/export.h> |
8c1f7558 | 39 | #include <asm/frame.h> |
d7e7528b | 40 | #include <linux/err.h> |
1da177e4 | 41 | |
6fd166aa PZ |
42 | #include "calling.h" |
43 | ||
4d732138 IM |
44 | .code64 |
45 | .section .entry.text, "ax" | |
16444a8a | 46 | |
72fe4858 | 47 | #ifdef CONFIG_PARAVIRT |
2be29982 | 48 | ENTRY(native_usergs_sysret64) |
8c1f7558 | 49 | UNWIND_HINT_EMPTY |
72fe4858 GOC |
50 | swapgs |
51 | sysretq | |
8c1f7558 | 52 | END(native_usergs_sysret64) |
72fe4858 GOC |
53 | #endif /* CONFIG_PARAVIRT */ |
54 | ||
f2db9382 | 55 | .macro TRACE_IRQS_IRETQ |
2601e64d | 56 | #ifdef CONFIG_TRACE_IRQFLAGS |
4d732138 IM |
57 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
58 | jnc 1f | |
2601e64d IM |
59 | TRACE_IRQS_ON |
60 | 1: | |
61 | #endif | |
62 | .endm | |
63 | ||
5963e317 SR |
64 | /* |
65 | * When dynamic function tracer is enabled it will add a breakpoint | |
66 | * to all locations that it is about to modify, sync CPUs, update | |
67 | * all the code, sync CPUs, then remove the breakpoints. In this time | |
68 | * if lockdep is enabled, it might jump back into the debug handler | |
69 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). | |
70 | * | |
71 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to | |
72 | * make sure the stack pointer does not get reset back to the top | |
73 | * of the debug stack, and instead just reuses the current stack. | |
74 | */ | |
75 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) | |
76 | ||
77 | .macro TRACE_IRQS_OFF_DEBUG | |
4d732138 | 78 | call debug_stack_set_zero |
5963e317 | 79 | TRACE_IRQS_OFF |
4d732138 | 80 | call debug_stack_reset |
5963e317 SR |
81 | .endm |
82 | ||
83 | .macro TRACE_IRQS_ON_DEBUG | |
4d732138 | 84 | call debug_stack_set_zero |
5963e317 | 85 | TRACE_IRQS_ON |
4d732138 | 86 | call debug_stack_reset |
5963e317 SR |
87 | .endm |
88 | ||
f2db9382 | 89 | .macro TRACE_IRQS_IRETQ_DEBUG |
4d732138 IM |
90 | bt $9, EFLAGS(%rsp) /* interrupts off? */ |
91 | jnc 1f | |
5963e317 SR |
92 | TRACE_IRQS_ON_DEBUG |
93 | 1: | |
94 | .endm | |
95 | ||
96 | #else | |
4d732138 IM |
97 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
98 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON | |
99 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ | |
5963e317 SR |
100 | #endif |
101 | ||
1da177e4 | 102 | /* |
4d732138 | 103 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
1da177e4 | 104 | * |
fda57b22 AL |
105 | * This is the only entry point used for 64-bit system calls. The |
106 | * hardware interface is reasonably well designed and the register to | |
107 | * argument mapping Linux uses fits well with the registers that are | |
108 | * available when SYSCALL is used. | |
109 | * | |
110 | * SYSCALL instructions can be found inlined in libc implementations as | |
111 | * well as some other programs and libraries. There are also a handful | |
112 | * of SYSCALL instructions in the vDSO used, for example, as a | |
113 | * clock_gettimeofday fallback. | |
114 | * | |
4d732138 | 115 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
b87cf63e DV |
116 | * then loads new ss, cs, and rip from previously programmed MSRs. |
117 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
118 | * are not needed). SYSCALL does not save anything on the stack | |
119 | * and does not change rsp. | |
120 | * | |
121 | * Registers on entry: | |
1da177e4 | 122 | * rax system call number |
b87cf63e DV |
123 | * rcx return address |
124 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) | |
1da177e4 | 125 | * rdi arg0 |
1da177e4 | 126 | * rsi arg1 |
0bd7b798 | 127 | * rdx arg2 |
b87cf63e | 128 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
1da177e4 LT |
129 | * r8 arg4 |
130 | * r9 arg5 | |
4d732138 | 131 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
0bd7b798 | 132 | * |
1da177e4 LT |
133 | * Only called from user space. |
134 | * | |
7fcb3bc3 | 135 | * When user can change pt_regs->foo always force IRET. That is because |
7bf36bbc AK |
136 | * it deals with uncanonical addresses better. SYSRET has trouble |
137 | * with them due to bugs in both AMD and Intel CPUs. | |
0bd7b798 | 138 | */ |
1da177e4 | 139 | |
3386bc8a AL |
140 | .pushsection .entry_trampoline, "ax" |
141 | ||
142 | /* | |
143 | * The code in here gets remapped into cpu_entry_area's trampoline. This means | |
144 | * that the assembler and linker have the wrong idea as to where this code | |
145 | * lives (and, in fact, it's mapped more than once, so it's not even at a | |
146 | * fixed address). So we can't reference any symbols outside the entry | |
147 | * trampoline and expect it to work. | |
148 | * | |
149 | * Instead, we carefully abuse %rip-relative addressing. | |
150 | * _entry_trampoline(%rip) refers to the start of the remapped) entry | |
151 | * trampoline. We can thus find cpu_entry_area with this macro: | |
152 | */ | |
153 | ||
154 | #define CPU_ENTRY_AREA \ | |
155 | _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) | |
156 | ||
157 | /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ | |
4fe2d8b1 DH |
158 | #define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ |
159 | SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA | |
3386bc8a AL |
160 | |
161 | ENTRY(entry_SYSCALL_64_trampoline) | |
162 | UNWIND_HINT_EMPTY | |
163 | swapgs | |
164 | ||
165 | /* Stash the user RSP. */ | |
166 | movq %rsp, RSP_SCRATCH | |
167 | ||
8a09317b DH |
168 | /* Note: using %rsp as a scratch reg. */ |
169 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp | |
170 | ||
3386bc8a AL |
171 | /* Load the top of the task stack into RSP */ |
172 | movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp | |
173 | ||
174 | /* Start building the simulated IRET frame. */ | |
175 | pushq $__USER_DS /* pt_regs->ss */ | |
176 | pushq RSP_SCRATCH /* pt_regs->sp */ | |
177 | pushq %r11 /* pt_regs->flags */ | |
178 | pushq $__USER_CS /* pt_regs->cs */ | |
179 | pushq %rcx /* pt_regs->ip */ | |
180 | ||
181 | /* | |
182 | * x86 lacks a near absolute jump, and we can't jump to the real | |
183 | * entry text with a relative jump. We could push the target | |
184 | * address and then use retq, but this destroys the pipeline on | |
185 | * many CPUs (wasting over 20 cycles on Sandy Bridge). Instead, | |
186 | * spill RDI and restore it in a second-stage trampoline. | |
187 | */ | |
188 | pushq %rdi | |
189 | movq $entry_SYSCALL_64_stage2, %rdi | |
190 | jmp *%rdi | |
191 | END(entry_SYSCALL_64_trampoline) | |
192 | ||
193 | .popsection | |
194 | ||
195 | ENTRY(entry_SYSCALL_64_stage2) | |
196 | UNWIND_HINT_EMPTY | |
197 | popq %rdi | |
198 | jmp entry_SYSCALL_64_after_hwframe | |
199 | END(entry_SYSCALL_64_stage2) | |
200 | ||
b2502b41 | 201 | ENTRY(entry_SYSCALL_64) |
8c1f7558 | 202 | UNWIND_HINT_EMPTY |
9ed8e7d8 DV |
203 | /* |
204 | * Interrupts are off on entry. | |
205 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
206 | * it is too small to ever cause noticeable irq latency. | |
207 | */ | |
72fe4858 | 208 | |
8a9949bc | 209 | swapgs |
8a09317b DH |
210 | /* |
211 | * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it | |
212 | * is not required to switch CR3. | |
213 | */ | |
4d732138 IM |
214 | movq %rsp, PER_CPU_VAR(rsp_scratch) |
215 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
9ed8e7d8 | 216 | |
1e423bff AL |
217 | TRACE_IRQS_OFF |
218 | ||
9ed8e7d8 | 219 | /* Construct struct pt_regs on stack */ |
4d732138 IM |
220 | pushq $__USER_DS /* pt_regs->ss */ |
221 | pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */ | |
4d732138 IM |
222 | pushq %r11 /* pt_regs->flags */ |
223 | pushq $__USER_CS /* pt_regs->cs */ | |
224 | pushq %rcx /* pt_regs->ip */ | |
8a9949bc | 225 | GLOBAL(entry_SYSCALL_64_after_hwframe) |
4d732138 IM |
226 | pushq %rax /* pt_regs->orig_ax */ |
227 | pushq %rdi /* pt_regs->di */ | |
228 | pushq %rsi /* pt_regs->si */ | |
229 | pushq %rdx /* pt_regs->dx */ | |
230 | pushq %rcx /* pt_regs->cx */ | |
231 | pushq $-ENOSYS /* pt_regs->ax */ | |
232 | pushq %r8 /* pt_regs->r8 */ | |
233 | pushq %r9 /* pt_regs->r9 */ | |
234 | pushq %r10 /* pt_regs->r10 */ | |
235 | pushq %r11 /* pt_regs->r11 */ | |
236 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ | |
8c1f7558 | 237 | UNWIND_HINT_REGS extra=0 |
4d732138 | 238 | |
1e423bff AL |
239 | /* |
240 | * If we need to do entry work or if we guess we'll need to do | |
241 | * exit work, go straight to the slow path. | |
242 | */ | |
15f4eae7 AL |
243 | movq PER_CPU_VAR(current_task), %r11 |
244 | testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) | |
1e423bff AL |
245 | jnz entry_SYSCALL64_slow_path |
246 | ||
b2502b41 | 247 | entry_SYSCALL_64_fastpath: |
1e423bff AL |
248 | /* |
249 | * Easy case: enable interrupts and issue the syscall. If the syscall | |
250 | * needs pt_regs, we'll call a stub that disables interrupts again | |
251 | * and jumps to the slow path. | |
252 | */ | |
253 | TRACE_IRQS_ON | |
254 | ENABLE_INTERRUPTS(CLBR_NONE) | |
fca460f9 | 255 | #if __SYSCALL_MASK == ~0 |
4d732138 | 256 | cmpq $__NR_syscall_max, %rax |
fca460f9 | 257 | #else |
4d732138 IM |
258 | andl $__SYSCALL_MASK, %eax |
259 | cmpl $__NR_syscall_max, %eax | |
fca460f9 | 260 | #endif |
4d732138 IM |
261 | ja 1f /* return -ENOSYS (already in pt_regs->ax) */ |
262 | movq %r10, %rcx | |
302f5b26 AL |
263 | |
264 | /* | |
265 | * This call instruction is handled specially in stub_ptregs_64. | |
b7765086 AL |
266 | * It might end up jumping to the slow path. If it jumps, RAX |
267 | * and all argument registers are clobbered. | |
302f5b26 | 268 | */ |
4d732138 | 269 | call *sys_call_table(, %rax, 8) |
302f5b26 AL |
270 | .Lentry_SYSCALL_64_after_fastpath_call: |
271 | ||
4d732138 | 272 | movq %rax, RAX(%rsp) |
146b2b09 | 273 | 1: |
b3494a4a AL |
274 | |
275 | /* | |
1e423bff AL |
276 | * If we get here, then we know that pt_regs is clean for SYSRET64. |
277 | * If we see that no exit work is required (which we are required | |
278 | * to check with IRQs off), then we can go straight to SYSRET64. | |
b3494a4a | 279 | */ |
2140a994 | 280 | DISABLE_INTERRUPTS(CLBR_ANY) |
1e423bff | 281 | TRACE_IRQS_OFF |
15f4eae7 AL |
282 | movq PER_CPU_VAR(current_task), %r11 |
283 | testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) | |
1e423bff | 284 | jnz 1f |
b3494a4a | 285 | |
1e423bff AL |
286 | LOCKDEP_SYS_EXIT |
287 | TRACE_IRQS_ON /* user mode is traced as IRQs on */ | |
eb2a54c3 AL |
288 | movq RIP(%rsp), %rcx |
289 | movq EFLAGS(%rsp), %r11 | |
a5122106 | 290 | addq $6*8, %rsp /* skip extra regs -- they were preserved */ |
8c1f7558 | 291 | UNWIND_HINT_EMPTY |
a5122106 | 292 | jmp .Lpop_c_regs_except_rcx_r11_and_sysret |
1da177e4 | 293 | |
1e423bff AL |
294 | 1: |
295 | /* | |
296 | * The fast path looked good when we started, but something changed | |
297 | * along the way and we need to switch to the slow path. Calling | |
298 | * raise(3) will trigger this, for example. IRQs are off. | |
299 | */ | |
29ea1b25 | 300 | TRACE_IRQS_ON |
2140a994 | 301 | ENABLE_INTERRUPTS(CLBR_ANY) |
76f5df43 | 302 | SAVE_EXTRA_REGS |
4d732138 | 303 | movq %rsp, %rdi |
1e423bff AL |
304 | call syscall_return_slowpath /* returns with IRQs disabled */ |
305 | jmp return_from_SYSCALL_64 | |
0bd7b798 | 306 | |
1e423bff AL |
307 | entry_SYSCALL64_slow_path: |
308 | /* IRQs are off. */ | |
76f5df43 | 309 | SAVE_EXTRA_REGS |
29ea1b25 | 310 | movq %rsp, %rdi |
1e423bff AL |
311 | call do_syscall_64 /* returns with IRQs disabled */ |
312 | ||
313 | return_from_SYSCALL_64: | |
29ea1b25 | 314 | TRACE_IRQS_IRETQ /* we're about to change IF */ |
fffbb5dc DV |
315 | |
316 | /* | |
317 | * Try to use SYSRET instead of IRET if we're returning to | |
8a055d7f AL |
318 | * a completely clean 64-bit userspace context. If we're not, |
319 | * go to the slow exit path. | |
fffbb5dc | 320 | */ |
4d732138 IM |
321 | movq RCX(%rsp), %rcx |
322 | movq RIP(%rsp), %r11 | |
8a055d7f AL |
323 | |
324 | cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ | |
325 | jne swapgs_restore_regs_and_return_to_usermode | |
fffbb5dc DV |
326 | |
327 | /* | |
328 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP | |
329 | * in kernel space. This essentially lets the user take over | |
17be0aec | 330 | * the kernel, since userspace controls RSP. |
fffbb5dc | 331 | * |
17be0aec | 332 | * If width of "canonical tail" ever becomes variable, this will need |
fffbb5dc | 333 | * to be updated to remain correct on both old and new CPUs. |
361b4b58 | 334 | * |
cbe0317b KS |
335 | * Change top bits to match most significant bit (47th or 56th bit |
336 | * depending on paging mode) in the address. | |
fffbb5dc | 337 | */ |
17be0aec DV |
338 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
339 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx | |
4d732138 | 340 | |
17be0aec DV |
341 | /* If this changed %rcx, it was not canonical */ |
342 | cmpq %rcx, %r11 | |
8a055d7f | 343 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 344 | |
4d732138 | 345 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
8a055d7f | 346 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc | 347 | |
4d732138 IM |
348 | movq R11(%rsp), %r11 |
349 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ | |
8a055d7f | 350 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
351 | |
352 | /* | |
3e035305 BP |
353 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
354 | * restore RF properly. If the slowpath sets it for whatever reason, we | |
355 | * need to restore it correctly. | |
356 | * | |
357 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a | |
358 | * trap from userspace immediately after SYSRET. This would cause an | |
359 | * infinite loop whenever #DB happens with register state that satisfies | |
360 | * the opportunistic SYSRET conditions. For example, single-stepping | |
361 | * this user code: | |
fffbb5dc | 362 | * |
4d732138 | 363 | * movq $stuck_here, %rcx |
fffbb5dc DV |
364 | * pushfq |
365 | * popq %r11 | |
366 | * stuck_here: | |
367 | * | |
368 | * would never get past 'stuck_here'. | |
369 | */ | |
4d732138 | 370 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
8a055d7f | 371 | jnz swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
372 | |
373 | /* nothing to check for RSP */ | |
374 | ||
4d732138 | 375 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
8a055d7f | 376 | jne swapgs_restore_regs_and_return_to_usermode |
fffbb5dc DV |
377 | |
378 | /* | |
4d732138 IM |
379 | * We win! This label is here just for ease of understanding |
380 | * perf profiles. Nothing jumps here. | |
fffbb5dc DV |
381 | */ |
382 | syscall_return_via_sysret: | |
17be0aec | 383 | /* rcx and r11 are already restored (see code above) */ |
8c1f7558 | 384 | UNWIND_HINT_EMPTY |
4fbb3910 | 385 | POP_EXTRA_REGS |
a5122106 | 386 | .Lpop_c_regs_except_rcx_r11_and_sysret: |
4fbb3910 AL |
387 | popq %rsi /* skip r11 */ |
388 | popq %r10 | |
389 | popq %r9 | |
390 | popq %r8 | |
391 | popq %rax | |
392 | popq %rsi /* skip rcx */ | |
393 | popq %rdx | |
394 | popq %rsi | |
3e3b9293 AL |
395 | |
396 | /* | |
397 | * Now all regs are restored except RSP and RDI. | |
398 | * Save old stack pointer and switch to trampoline stack. | |
399 | */ | |
400 | movq %rsp, %rdi | |
c482feef | 401 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
3e3b9293 AL |
402 | |
403 | pushq RSP-RDI(%rdi) /* RSP */ | |
404 | pushq (%rdi) /* RDI */ | |
405 | ||
406 | /* | |
407 | * We are on the trampoline stack. All regs except RDI are live. | |
408 | * We can do future final exit work right here. | |
409 | */ | |
6fd166aa | 410 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
3e3b9293 | 411 | |
4fbb3910 | 412 | popq %rdi |
3e3b9293 | 413 | popq %rsp |
fffbb5dc | 414 | USERGS_SYSRET64 |
b2502b41 | 415 | END(entry_SYSCALL_64) |
0bd7b798 | 416 | |
302f5b26 AL |
417 | ENTRY(stub_ptregs_64) |
418 | /* | |
419 | * Syscalls marked as needing ptregs land here. | |
b7765086 AL |
420 | * If we are on the fast path, we need to save the extra regs, |
421 | * which we achieve by trying again on the slow path. If we are on | |
422 | * the slow path, the extra regs are already saved. | |
302f5b26 AL |
423 | * |
424 | * RAX stores a pointer to the C function implementing the syscall. | |
b7765086 | 425 | * IRQs are on. |
302f5b26 AL |
426 | */ |
427 | cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp) | |
428 | jne 1f | |
429 | ||
b7765086 AL |
430 | /* |
431 | * Called from fast path -- disable IRQs again, pop return address | |
432 | * and jump to slow path | |
433 | */ | |
2140a994 | 434 | DISABLE_INTERRUPTS(CLBR_ANY) |
b7765086 | 435 | TRACE_IRQS_OFF |
302f5b26 | 436 | popq %rax |
8c1f7558 | 437 | UNWIND_HINT_REGS extra=0 |
b7765086 | 438 | jmp entry_SYSCALL64_slow_path |
302f5b26 AL |
439 | |
440 | 1: | |
b3830e8d | 441 | jmp *%rax /* Called from C */ |
302f5b26 AL |
442 | END(stub_ptregs_64) |
443 | ||
444 | .macro ptregs_stub func | |
445 | ENTRY(ptregs_\func) | |
8c1f7558 | 446 | UNWIND_HINT_FUNC |
302f5b26 AL |
447 | leaq \func(%rip), %rax |
448 | jmp stub_ptregs_64 | |
449 | END(ptregs_\func) | |
450 | .endm | |
451 | ||
452 | /* Instantiate ptregs_stub for each ptregs-using syscall */ | |
453 | #define __SYSCALL_64_QUAL_(sym) | |
454 | #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym | |
455 | #define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym) | |
456 | #include <asm/syscalls_64.h> | |
fffbb5dc | 457 | |
0100301b BG |
458 | /* |
459 | * %rdi: prev task | |
460 | * %rsi: next task | |
461 | */ | |
462 | ENTRY(__switch_to_asm) | |
8c1f7558 | 463 | UNWIND_HINT_FUNC |
0100301b BG |
464 | /* |
465 | * Save callee-saved registers | |
466 | * This must match the order in inactive_task_frame | |
467 | */ | |
468 | pushq %rbp | |
469 | pushq %rbx | |
470 | pushq %r12 | |
471 | pushq %r13 | |
472 | pushq %r14 | |
473 | pushq %r15 | |
474 | ||
475 | /* switch stack */ | |
476 | movq %rsp, TASK_threadsp(%rdi) | |
477 | movq TASK_threadsp(%rsi), %rsp | |
478 | ||
479 | #ifdef CONFIG_CC_STACKPROTECTOR | |
480 | movq TASK_stack_canary(%rsi), %rbx | |
481 | movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset | |
482 | #endif | |
483 | ||
484 | /* restore callee-saved registers */ | |
485 | popq %r15 | |
486 | popq %r14 | |
487 | popq %r13 | |
488 | popq %r12 | |
489 | popq %rbx | |
490 | popq %rbp | |
491 | ||
492 | jmp __switch_to | |
493 | END(__switch_to_asm) | |
494 | ||
1eeb207f DV |
495 | /* |
496 | * A newly forked process directly context switches into this address. | |
497 | * | |
0100301b | 498 | * rax: prev task we switched from |
616d2483 BG |
499 | * rbx: kernel thread func (NULL for user thread) |
500 | * r12: kernel thread arg | |
1eeb207f DV |
501 | */ |
502 | ENTRY(ret_from_fork) | |
8c1f7558 | 503 | UNWIND_HINT_EMPTY |
0100301b | 504 | movq %rax, %rdi |
ebd57499 | 505 | call schedule_tail /* rdi: 'prev' task parameter */ |
1eeb207f | 506 | |
ebd57499 JP |
507 | testq %rbx, %rbx /* from kernel_thread? */ |
508 | jnz 1f /* kernel threads are uncommon */ | |
24d978b7 | 509 | |
616d2483 | 510 | 2: |
8c1f7558 | 511 | UNWIND_HINT_REGS |
ebd57499 | 512 | movq %rsp, %rdi |
24d978b7 AL |
513 | call syscall_return_slowpath /* returns with IRQs disabled */ |
514 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ | |
8a055d7f | 515 | jmp swapgs_restore_regs_and_return_to_usermode |
616d2483 BG |
516 | |
517 | 1: | |
518 | /* kernel thread */ | |
519 | movq %r12, %rdi | |
520 | call *%rbx | |
521 | /* | |
522 | * A kernel thread is allowed to return here after successfully | |
523 | * calling do_execve(). Exit to userspace to complete the execve() | |
524 | * syscall. | |
525 | */ | |
526 | movq $0, RAX(%rsp) | |
527 | jmp 2b | |
1eeb207f DV |
528 | END(ret_from_fork) |
529 | ||
939b7871 | 530 | /* |
3304c9c3 DV |
531 | * Build the entry stubs with some assembler magic. |
532 | * We pack 1 stub into every 8-byte block. | |
939b7871 | 533 | */ |
3304c9c3 | 534 | .align 8 |
939b7871 | 535 | ENTRY(irq_entries_start) |
3304c9c3 DV |
536 | vector=FIRST_EXTERNAL_VECTOR |
537 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
8c1f7558 | 538 | UNWIND_HINT_IRET_REGS |
4d732138 | 539 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 | 540 | jmp common_interrupt |
3304c9c3 | 541 | .align 8 |
8c1f7558 | 542 | vector=vector+1 |
3304c9c3 | 543 | .endr |
939b7871 PA |
544 | END(irq_entries_start) |
545 | ||
1d3e53e8 AL |
546 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF |
547 | #ifdef CONFIG_DEBUG_ENTRY | |
e17f8234 BO |
548 | pushq %rax |
549 | SAVE_FLAGS(CLBR_RAX) | |
550 | testl $X86_EFLAGS_IF, %eax | |
1d3e53e8 AL |
551 | jz .Lokay_\@ |
552 | ud2 | |
553 | .Lokay_\@: | |
e17f8234 | 554 | popq %rax |
1d3e53e8 AL |
555 | #endif |
556 | .endm | |
557 | ||
558 | /* | |
559 | * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers | |
560 | * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. | |
561 | * Requires kernel GSBASE. | |
562 | * | |
563 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. | |
564 | */ | |
8c1f7558 | 565 | .macro ENTER_IRQ_STACK regs=1 old_rsp |
1d3e53e8 AL |
566 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
567 | movq %rsp, \old_rsp | |
8c1f7558 JP |
568 | |
569 | .if \regs | |
570 | UNWIND_HINT_REGS base=\old_rsp | |
571 | .endif | |
572 | ||
1d3e53e8 | 573 | incl PER_CPU_VAR(irq_count) |
29955909 | 574 | jnz .Lirq_stack_push_old_rsp_\@ |
1d3e53e8 AL |
575 | |
576 | /* | |
577 | * Right now, if we just incremented irq_count to zero, we've | |
578 | * claimed the IRQ stack but we haven't switched to it yet. | |
579 | * | |
580 | * If anything is added that can interrupt us here without using IST, | |
581 | * it must be *extremely* careful to limit its stack usage. This | |
582 | * could include kprobes and a hypothetical future IST-less #DB | |
583 | * handler. | |
29955909 AL |
584 | * |
585 | * The OOPS unwinder relies on the word at the top of the IRQ | |
586 | * stack linking back to the previous RSP for the entire time we're | |
587 | * on the IRQ stack. For this to work reliably, we need to write | |
588 | * it before we actually move ourselves to the IRQ stack. | |
589 | */ | |
590 | ||
591 | movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) | |
592 | movq PER_CPU_VAR(irq_stack_ptr), %rsp | |
593 | ||
594 | #ifdef CONFIG_DEBUG_ENTRY | |
595 | /* | |
596 | * If the first movq above becomes wrong due to IRQ stack layout | |
597 | * changes, the only way we'll notice is if we try to unwind right | |
598 | * here. Assert that we set up the stack right to catch this type | |
599 | * of bug quickly. | |
1d3e53e8 | 600 | */ |
29955909 AL |
601 | cmpq -8(%rsp), \old_rsp |
602 | je .Lirq_stack_okay\@ | |
603 | ud2 | |
604 | .Lirq_stack_okay\@: | |
605 | #endif | |
1d3e53e8 | 606 | |
29955909 | 607 | .Lirq_stack_push_old_rsp_\@: |
1d3e53e8 | 608 | pushq \old_rsp |
8c1f7558 JP |
609 | |
610 | .if \regs | |
611 | UNWIND_HINT_REGS indirect=1 | |
612 | .endif | |
1d3e53e8 AL |
613 | .endm |
614 | ||
615 | /* | |
616 | * Undoes ENTER_IRQ_STACK. | |
617 | */ | |
8c1f7558 | 618 | .macro LEAVE_IRQ_STACK regs=1 |
1d3e53e8 AL |
619 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
620 | /* We need to be off the IRQ stack before decrementing irq_count. */ | |
621 | popq %rsp | |
622 | ||
8c1f7558 JP |
623 | .if \regs |
624 | UNWIND_HINT_REGS | |
625 | .endif | |
626 | ||
1d3e53e8 AL |
627 | /* |
628 | * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming | |
629 | * the irq stack but we're not on it. | |
630 | */ | |
631 | ||
632 | decl PER_CPU_VAR(irq_count) | |
633 | .endm | |
634 | ||
d99015b1 | 635 | /* |
1da177e4 LT |
636 | * Interrupt entry/exit. |
637 | * | |
638 | * Interrupt entry points save only callee clobbered registers in fast path. | |
d99015b1 AH |
639 | * |
640 | * Entry runs with interrupts off. | |
641 | */ | |
1da177e4 | 642 | |
722024db | 643 | /* 0(%rsp): ~(interrupt number) */ |
1da177e4 | 644 | .macro interrupt func |
f6f64681 | 645 | cld |
7f2590a1 AL |
646 | |
647 | testb $3, CS-ORIG_RAX(%rsp) | |
648 | jz 1f | |
649 | SWAPGS | |
650 | call switch_to_thread_stack | |
651 | 1: | |
652 | ||
ff467594 AL |
653 | ALLOC_PT_GPREGS_ON_STACK |
654 | SAVE_C_REGS | |
655 | SAVE_EXTRA_REGS | |
946c1911 | 656 | ENCODE_FRAME_POINTER |
76f5df43 | 657 | |
ff467594 | 658 | testb $3, CS(%rsp) |
dde74f2e | 659 | jz 1f |
02bc7768 AL |
660 | |
661 | /* | |
7f2590a1 AL |
662 | * IRQ from user mode. |
663 | * | |
f1075053 AL |
664 | * We need to tell lockdep that IRQs are off. We can't do this until |
665 | * we fix gsbase, and we should do it before enter_from_user_mode | |
666 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, | |
667 | * the simplest way to handle it is to just call it twice if | |
668 | * we enter from user mode. There's no reason to optimize this since | |
669 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | |
670 | */ | |
671 | TRACE_IRQS_OFF | |
672 | ||
478dc89c | 673 | CALL_enter_from_user_mode |
02bc7768 | 674 | |
76f5df43 | 675 | 1: |
1d3e53e8 | 676 | ENTER_IRQ_STACK old_rsp=%rdi |
f6f64681 DV |
677 | /* We entered an interrupt context - irqs are off: */ |
678 | TRACE_IRQS_OFF | |
679 | ||
a586f98e | 680 | call \func /* rdi points to pt_regs */ |
1da177e4 LT |
681 | .endm |
682 | ||
722024db AH |
683 | /* |
684 | * The interrupt stubs push (~vector+0x80) onto the stack and | |
685 | * then jump to common_interrupt. | |
686 | */ | |
939b7871 PA |
687 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
688 | common_interrupt: | |
ee4eb87b | 689 | ASM_CLAC |
4d732138 | 690 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
1da177e4 | 691 | interrupt do_IRQ |
34061f13 | 692 | /* 0(%rsp): old RSP */ |
7effaa88 | 693 | ret_from_intr: |
2140a994 | 694 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 695 | TRACE_IRQS_OFF |
625dbc3b | 696 | |
1d3e53e8 | 697 | LEAVE_IRQ_STACK |
625dbc3b | 698 | |
03335e95 | 699 | testb $3, CS(%rsp) |
dde74f2e | 700 | jz retint_kernel |
4d732138 | 701 | |
02bc7768 | 702 | /* Interrupt came from user space */ |
02bc7768 AL |
703 | GLOBAL(retint_user) |
704 | mov %rsp,%rdi | |
705 | call prepare_exit_to_usermode | |
2601e64d | 706 | TRACE_IRQS_IRETQ |
26c4ef9c | 707 | |
8a055d7f | 708 | GLOBAL(swapgs_restore_regs_and_return_to_usermode) |
26c4ef9c AL |
709 | #ifdef CONFIG_DEBUG_ENTRY |
710 | /* Assert that pt_regs indicates user mode. */ | |
1e4c4f61 | 711 | testb $3, CS(%rsp) |
26c4ef9c AL |
712 | jnz 1f |
713 | ud2 | |
714 | 1: | |
715 | #endif | |
e872045b | 716 | POP_EXTRA_REGS |
3e3b9293 AL |
717 | popq %r11 |
718 | popq %r10 | |
719 | popq %r9 | |
720 | popq %r8 | |
721 | popq %rax | |
722 | popq %rcx | |
723 | popq %rdx | |
724 | popq %rsi | |
725 | ||
726 | /* | |
727 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. | |
728 | * Save old stack pointer and switch to trampoline stack. | |
729 | */ | |
730 | movq %rsp, %rdi | |
c482feef | 731 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
3e3b9293 AL |
732 | |
733 | /* Copy the IRET frame to the trampoline stack. */ | |
734 | pushq 6*8(%rdi) /* SS */ | |
735 | pushq 5*8(%rdi) /* RSP */ | |
736 | pushq 4*8(%rdi) /* EFLAGS */ | |
737 | pushq 3*8(%rdi) /* CS */ | |
738 | pushq 2*8(%rdi) /* RIP */ | |
739 | ||
740 | /* Push user RDI on the trampoline stack. */ | |
741 | pushq (%rdi) | |
742 | ||
743 | /* | |
744 | * We are on the trampoline stack. All regs except RDI are live. | |
745 | * We can do future final exit work right here. | |
746 | */ | |
747 | ||
6fd166aa | 748 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b | 749 | |
3e3b9293 AL |
750 | /* Restore RDI. */ |
751 | popq %rdi | |
752 | SWAPGS | |
26c4ef9c AL |
753 | INTERRUPT_RETURN |
754 | ||
2601e64d | 755 | |
627276cb | 756 | /* Returning to kernel space */ |
6ba71b76 | 757 | retint_kernel: |
627276cb DV |
758 | #ifdef CONFIG_PREEMPT |
759 | /* Interrupts are off */ | |
760 | /* Check if we need preemption */ | |
4d732138 | 761 | bt $9, EFLAGS(%rsp) /* were interrupts off? */ |
6ba71b76 | 762 | jnc 1f |
4d732138 | 763 | 0: cmpl $0, PER_CPU_VAR(__preempt_count) |
36acef25 | 764 | jnz 1f |
627276cb | 765 | call preempt_schedule_irq |
36acef25 | 766 | jmp 0b |
6ba71b76 | 767 | 1: |
627276cb | 768 | #endif |
2601e64d IM |
769 | /* |
770 | * The iretq could re-enable interrupts: | |
771 | */ | |
772 | TRACE_IRQS_IRETQ | |
fffbb5dc | 773 | |
26c4ef9c AL |
774 | GLOBAL(restore_regs_and_return_to_kernel) |
775 | #ifdef CONFIG_DEBUG_ENTRY | |
776 | /* Assert that pt_regs indicates kernel mode. */ | |
1e4c4f61 | 777 | testb $3, CS(%rsp) |
26c4ef9c AL |
778 | jz 1f |
779 | ud2 | |
780 | 1: | |
781 | #endif | |
e872045b AL |
782 | POP_EXTRA_REGS |
783 | POP_C_REGS | |
784 | addq $8, %rsp /* skip regs->orig_ax */ | |
7209a75d AL |
785 | INTERRUPT_RETURN |
786 | ||
787 | ENTRY(native_iret) | |
8c1f7558 | 788 | UNWIND_HINT_IRET_REGS |
3891a04a PA |
789 | /* |
790 | * Are we returning to a stack segment from the LDT? Note: in | |
791 | * 64-bit mode SS:RSP on the exception stack is always valid. | |
792 | */ | |
34273f41 | 793 | #ifdef CONFIG_X86_ESPFIX64 |
4d732138 IM |
794 | testb $4, (SS-RIP)(%rsp) |
795 | jnz native_irq_return_ldt | |
34273f41 | 796 | #endif |
3891a04a | 797 | |
af726f21 | 798 | .global native_irq_return_iret |
7209a75d | 799 | native_irq_return_iret: |
b645af2d AL |
800 | /* |
801 | * This may fault. Non-paranoid faults on return to userspace are | |
802 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. | |
803 | * Double-faults due to espfix64 are handled in do_double_fault. | |
804 | * Other faults here are fatal. | |
805 | */ | |
1da177e4 | 806 | iretq |
3701d863 | 807 | |
34273f41 | 808 | #ifdef CONFIG_X86_ESPFIX64 |
7209a75d | 809 | native_irq_return_ldt: |
85063fac AL |
810 | /* |
811 | * We are running with user GSBASE. All GPRs contain their user | |
812 | * values. We have a percpu ESPFIX stack that is eight slots | |
813 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom | |
814 | * of the ESPFIX stack. | |
815 | * | |
816 | * We clobber RAX and RDI in this code. We stash RDI on the | |
817 | * normal stack and RAX on the ESPFIX stack. | |
818 | * | |
819 | * The ESPFIX stack layout we set up looks like this: | |
820 | * | |
821 | * --- top of ESPFIX stack --- | |
822 | * SS | |
823 | * RSP | |
824 | * RFLAGS | |
825 | * CS | |
826 | * RIP <-- RSP points here when we're done | |
827 | * RAX <-- espfix_waddr points here | |
828 | * --- bottom of ESPFIX stack --- | |
829 | */ | |
830 | ||
831 | pushq %rdi /* Stash user RDI */ | |
8a09317b DH |
832 | SWAPGS /* to kernel GS */ |
833 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ | |
834 | ||
4d732138 | 835 | movq PER_CPU_VAR(espfix_waddr), %rdi |
85063fac AL |
836 | movq %rax, (0*8)(%rdi) /* user RAX */ |
837 | movq (1*8)(%rsp), %rax /* user RIP */ | |
4d732138 | 838 | movq %rax, (1*8)(%rdi) |
85063fac | 839 | movq (2*8)(%rsp), %rax /* user CS */ |
4d732138 | 840 | movq %rax, (2*8)(%rdi) |
85063fac | 841 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
4d732138 | 842 | movq %rax, (3*8)(%rdi) |
85063fac | 843 | movq (5*8)(%rsp), %rax /* user SS */ |
4d732138 | 844 | movq %rax, (5*8)(%rdi) |
85063fac | 845 | movq (4*8)(%rsp), %rax /* user RSP */ |
4d732138 | 846 | movq %rax, (4*8)(%rdi) |
85063fac AL |
847 | /* Now RAX == RSP. */ |
848 | ||
849 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ | |
85063fac AL |
850 | |
851 | /* | |
852 | * espfix_stack[31:16] == 0. The page tables are set up such that | |
853 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of | |
854 | * espfix_waddr for any X. That is, there are 65536 RO aliases of | |
855 | * the same page. Set up RSP so that RSP[31:16] contains the | |
856 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless | |
857 | * still points to an RO alias of the ESPFIX stack. | |
858 | */ | |
4d732138 | 859 | orq PER_CPU_VAR(espfix_stack), %rax |
8a09317b | 860 | |
6fd166aa | 861 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
8a09317b DH |
862 | SWAPGS /* to user GS */ |
863 | popq %rdi /* Restore user RDI */ | |
864 | ||
4d732138 | 865 | movq %rax, %rsp |
8c1f7558 | 866 | UNWIND_HINT_IRET_REGS offset=8 |
85063fac AL |
867 | |
868 | /* | |
869 | * At this point, we cannot write to the stack any more, but we can | |
870 | * still read. | |
871 | */ | |
872 | popq %rax /* Restore user RAX */ | |
873 | ||
874 | /* | |
875 | * RSP now points to an ordinary IRET frame, except that the page | |
876 | * is read-only and RSP[31:16] are preloaded with the userspace | |
877 | * values. We can now IRET back to userspace. | |
878 | */ | |
4d732138 | 879 | jmp native_irq_return_iret |
34273f41 | 880 | #endif |
4b787e0b | 881 | END(common_interrupt) |
3891a04a | 882 | |
1da177e4 LT |
883 | /* |
884 | * APIC interrupts. | |
0bd7b798 | 885 | */ |
cf910e83 | 886 | .macro apicinterrupt3 num sym do_sym |
322648d1 | 887 | ENTRY(\sym) |
8c1f7558 | 888 | UNWIND_HINT_IRET_REGS |
ee4eb87b | 889 | ASM_CLAC |
4d732138 | 890 | pushq $~(\num) |
39e95433 | 891 | .Lcommon_\sym: |
322648d1 | 892 | interrupt \do_sym |
4d732138 | 893 | jmp ret_from_intr |
322648d1 AH |
894 | END(\sym) |
895 | .endm | |
1da177e4 | 896 | |
469f0023 | 897 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
229a7186 MH |
898 | #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" |
899 | #define POP_SECTION_IRQENTRY .popsection | |
469f0023 | 900 | |
cf910e83 | 901 | .macro apicinterrupt num sym do_sym |
469f0023 | 902 | PUSH_SECTION_IRQENTRY |
cf910e83 | 903 | apicinterrupt3 \num \sym \do_sym |
469f0023 | 904 | POP_SECTION_IRQENTRY |
cf910e83 SA |
905 | .endm |
906 | ||
322648d1 | 907 | #ifdef CONFIG_SMP |
4d732138 IM |
908 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
909 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt | |
322648d1 | 910 | #endif |
1da177e4 | 911 | |
03b48632 | 912 | #ifdef CONFIG_X86_UV |
4d732138 | 913 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
03b48632 | 914 | #endif |
4d732138 IM |
915 | |
916 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt | |
917 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi | |
89b831ef | 918 | |
d78f2664 | 919 | #ifdef CONFIG_HAVE_KVM |
4d732138 IM |
920 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
921 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi | |
210f84b0 | 922 | apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi |
d78f2664 YZ |
923 | #endif |
924 | ||
33e5ff63 | 925 | #ifdef CONFIG_X86_MCE_THRESHOLD |
4d732138 | 926 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
33e5ff63 SA |
927 | #endif |
928 | ||
24fd78a8 | 929 | #ifdef CONFIG_X86_MCE_AMD |
4d732138 | 930 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
24fd78a8 AG |
931 | #endif |
932 | ||
33e5ff63 | 933 | #ifdef CONFIG_X86_THERMAL_VECTOR |
4d732138 | 934 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
33e5ff63 | 935 | #endif |
1812924b | 936 | |
322648d1 | 937 | #ifdef CONFIG_SMP |
4d732138 IM |
938 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
939 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt | |
940 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt | |
322648d1 | 941 | #endif |
1da177e4 | 942 | |
4d732138 IM |
943 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
944 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt | |
0bd7b798 | 945 | |
e360adbe | 946 | #ifdef CONFIG_IRQ_WORK |
4d732138 | 947 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
241771ef IM |
948 | #endif |
949 | ||
1da177e4 LT |
950 | /* |
951 | * Exception entry points. | |
0bd7b798 | 952 | */ |
c482feef | 953 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) |
577ed45e | 954 | |
7f2590a1 AL |
955 | /* |
956 | * Switch to the thread stack. This is called with the IRET frame and | |
957 | * orig_ax on the stack. (That is, RDI..R12 are not on the stack and | |
958 | * space has not been allocated for them.) | |
959 | */ | |
960 | ENTRY(switch_to_thread_stack) | |
961 | UNWIND_HINT_FUNC | |
962 | ||
963 | pushq %rdi | |
8a09317b DH |
964 | /* Need to switch before accessing the thread stack. */ |
965 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | |
7f2590a1 AL |
966 | movq %rsp, %rdi |
967 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
968 | UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI | |
969 | ||
970 | pushq 7*8(%rdi) /* regs->ss */ | |
971 | pushq 6*8(%rdi) /* regs->rsp */ | |
972 | pushq 5*8(%rdi) /* regs->eflags */ | |
973 | pushq 4*8(%rdi) /* regs->cs */ | |
974 | pushq 3*8(%rdi) /* regs->ip */ | |
975 | pushq 2*8(%rdi) /* regs->orig_ax */ | |
976 | pushq 8(%rdi) /* return address */ | |
977 | UNWIND_HINT_FUNC | |
978 | ||
979 | movq (%rdi), %rdi | |
980 | ret | |
981 | END(switch_to_thread_stack) | |
982 | ||
577ed45e | 983 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
322648d1 | 984 | ENTRY(\sym) |
98990a33 | 985 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
8c1f7558 | 986 | |
577ed45e AL |
987 | /* Sanity check */ |
988 | .if \shift_ist != -1 && \paranoid == 0 | |
989 | .error "using shift_ist requires paranoid=1" | |
990 | .endif | |
991 | ||
ee4eb87b | 992 | ASM_CLAC |
cb5dd2c5 | 993 | |
82c62fa0 | 994 | .if \has_error_code == 0 |
4d732138 | 995 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
cb5dd2c5 AL |
996 | .endif |
997 | ||
76f5df43 | 998 | ALLOC_PT_GPREGS_ON_STACK |
cb5dd2c5 | 999 | |
7f2590a1 | 1000 | .if \paranoid < 2 |
4d732138 | 1001 | testb $3, CS(%rsp) /* If coming from userspace, switch stacks */ |
7f2590a1 | 1002 | jnz .Lfrom_usermode_switch_stack_\@ |
48e08d0f | 1003 | .endif |
7f2590a1 AL |
1004 | |
1005 | .if \paranoid | |
4d732138 | 1006 | call paranoid_entry |
cb5dd2c5 | 1007 | .else |
4d732138 | 1008 | call error_entry |
cb5dd2c5 | 1009 | .endif |
8c1f7558 | 1010 | UNWIND_HINT_REGS |
ebfc453e | 1011 | /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ |
cb5dd2c5 | 1012 | |
cb5dd2c5 | 1013 | .if \paranoid |
577ed45e | 1014 | .if \shift_ist != -1 |
4d732138 | 1015 | TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */ |
577ed45e | 1016 | .else |
b8b1d08b | 1017 | TRACE_IRQS_OFF |
cb5dd2c5 | 1018 | .endif |
577ed45e | 1019 | .endif |
cb5dd2c5 | 1020 | |
4d732138 | 1021 | movq %rsp, %rdi /* pt_regs pointer */ |
cb5dd2c5 AL |
1022 | |
1023 | .if \has_error_code | |
4d732138 IM |
1024 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
1025 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
cb5dd2c5 | 1026 | .else |
4d732138 | 1027 | xorl %esi, %esi /* no error code */ |
cb5dd2c5 AL |
1028 | .endif |
1029 | ||
577ed45e | 1030 | .if \shift_ist != -1 |
4d732138 | 1031 | subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1032 | .endif |
1033 | ||
4d732138 | 1034 | call \do_sym |
cb5dd2c5 | 1035 | |
577ed45e | 1036 | .if \shift_ist != -1 |
4d732138 | 1037 | addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist) |
577ed45e AL |
1038 | .endif |
1039 | ||
ebfc453e | 1040 | /* these procedures expect "no swapgs" flag in ebx */ |
cb5dd2c5 | 1041 | .if \paranoid |
4d732138 | 1042 | jmp paranoid_exit |
cb5dd2c5 | 1043 | .else |
4d732138 | 1044 | jmp error_exit |
cb5dd2c5 AL |
1045 | .endif |
1046 | ||
7f2590a1 | 1047 | .if \paranoid < 2 |
48e08d0f | 1048 | /* |
7f2590a1 | 1049 | * Entry from userspace. Switch stacks and treat it |
48e08d0f AL |
1050 | * as a normal entry. This means that paranoid handlers |
1051 | * run in real process context if user_mode(regs). | |
1052 | */ | |
7f2590a1 | 1053 | .Lfrom_usermode_switch_stack_\@: |
4d732138 | 1054 | call error_entry |
48e08d0f | 1055 | |
4d732138 | 1056 | movq %rsp, %rdi /* pt_regs pointer */ |
48e08d0f AL |
1057 | |
1058 | .if \has_error_code | |
4d732138 IM |
1059 | movq ORIG_RAX(%rsp), %rsi /* get error code */ |
1060 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ | |
48e08d0f | 1061 | .else |
4d732138 | 1062 | xorl %esi, %esi /* no error code */ |
48e08d0f AL |
1063 | .endif |
1064 | ||
4d732138 | 1065 | call \do_sym |
48e08d0f | 1066 | |
4d732138 | 1067 | jmp error_exit /* %ebx: no swapgs flag */ |
48e08d0f | 1068 | .endif |
ddeb8f21 | 1069 | END(\sym) |
322648d1 | 1070 | .endm |
b8b1d08b | 1071 | |
4d732138 IM |
1072 | idtentry divide_error do_divide_error has_error_code=0 |
1073 | idtentry overflow do_overflow has_error_code=0 | |
1074 | idtentry bounds do_bounds has_error_code=0 | |
1075 | idtentry invalid_op do_invalid_op has_error_code=0 | |
1076 | idtentry device_not_available do_device_not_available has_error_code=0 | |
1077 | idtentry double_fault do_double_fault has_error_code=1 paranoid=2 | |
1078 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 | |
1079 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 | |
1080 | idtentry segment_not_present do_segment_not_present has_error_code=1 | |
1081 | idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0 | |
1082 | idtentry coprocessor_error do_coprocessor_error has_error_code=0 | |
1083 | idtentry alignment_check do_alignment_check has_error_code=1 | |
1084 | idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 | |
1085 | ||
1086 | ||
1087 | /* | |
1088 | * Reload gs selector with exception handling | |
1089 | * edi: new selector | |
1090 | */ | |
9f9d489a | 1091 | ENTRY(native_load_gs_index) |
8c1f7558 | 1092 | FRAME_BEGIN |
131484c8 | 1093 | pushfq |
b8aa287f | 1094 | DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea | 1095 | SWAPGS |
42c748bb | 1096 | .Lgs_change: |
4d732138 | 1097 | movl %edi, %gs |
96e5d28a | 1098 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
72fe4858 | 1099 | SWAPGS |
131484c8 | 1100 | popfq |
8c1f7558 | 1101 | FRAME_END |
9f1e87ea | 1102 | ret |
8c1f7558 | 1103 | ENDPROC(native_load_gs_index) |
784d5699 | 1104 | EXPORT_SYMBOL(native_load_gs_index) |
0bd7b798 | 1105 | |
42c748bb | 1106 | _ASM_EXTABLE(.Lgs_change, bad_gs) |
4d732138 | 1107 | .section .fixup, "ax" |
1da177e4 | 1108 | /* running with kernelgs */ |
0bd7b798 | 1109 | bad_gs: |
4d732138 | 1110 | SWAPGS /* switch back to user gs */ |
b038c842 AL |
1111 | .macro ZAP_GS |
1112 | /* This can't be a string because the preprocessor needs to see it. */ | |
1113 | movl $__USER_DS, %eax | |
1114 | movl %eax, %gs | |
1115 | .endm | |
1116 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG | |
4d732138 IM |
1117 | xorl %eax, %eax |
1118 | movl %eax, %gs | |
1119 | jmp 2b | |
9f1e87ea | 1120 | .previous |
0bd7b798 | 1121 | |
2699500b | 1122 | /* Call softirq on interrupt stack. Interrupts are off. */ |
7d65f4a6 | 1123 | ENTRY(do_softirq_own_stack) |
4d732138 IM |
1124 | pushq %rbp |
1125 | mov %rsp, %rbp | |
8c1f7558 | 1126 | ENTER_IRQ_STACK regs=0 old_rsp=%r11 |
4d732138 | 1127 | call __do_softirq |
8c1f7558 | 1128 | LEAVE_IRQ_STACK regs=0 |
2699500b | 1129 | leaveq |
ed6b676c | 1130 | ret |
8c1f7558 | 1131 | ENDPROC(do_softirq_own_stack) |
75154f40 | 1132 | |
3d75e1b8 | 1133 | #ifdef CONFIG_XEN |
5878d5d6 | 1134 | idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
3d75e1b8 JF |
1135 | |
1136 | /* | |
9f1e87ea CG |
1137 | * A note on the "critical region" in our callback handler. |
1138 | * We want to avoid stacking callback handlers due to events occurring | |
1139 | * during handling of the last event. To do this, we keep events disabled | |
1140 | * until we've done all processing. HOWEVER, we must enable events before | |
1141 | * popping the stack frame (can't be done atomically) and so it would still | |
1142 | * be possible to get enough handler activations to overflow the stack. | |
1143 | * Although unlikely, bugs of that kind are hard to track down, so we'd | |
1144 | * like to avoid the possibility. | |
1145 | * So, on entry to the handler we detect whether we interrupted an | |
1146 | * existing activation in its critical region -- if so, we pop the current | |
1147 | * activation and restart the handler using the previous one. | |
1148 | */ | |
4d732138 IM |
1149 | ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ |
1150 | ||
9f1e87ea CG |
1151 | /* |
1152 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will | |
1153 | * see the correct pointer to the pt_regs | |
1154 | */ | |
8c1f7558 | 1155 | UNWIND_HINT_FUNC |
4d732138 | 1156 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
8c1f7558 | 1157 | UNWIND_HINT_REGS |
1d3e53e8 AL |
1158 | |
1159 | ENTER_IRQ_STACK old_rsp=%r10 | |
4d732138 | 1160 | call xen_evtchn_do_upcall |
1d3e53e8 AL |
1161 | LEAVE_IRQ_STACK |
1162 | ||
fdfd811d | 1163 | #ifndef CONFIG_PREEMPT |
4d732138 | 1164 | call xen_maybe_preempt_hcall |
fdfd811d | 1165 | #endif |
4d732138 | 1166 | jmp error_exit |
371c394a | 1167 | END(xen_do_hypervisor_callback) |
3d75e1b8 JF |
1168 | |
1169 | /* | |
9f1e87ea CG |
1170 | * Hypervisor uses this for application faults while it executes. |
1171 | * We get here for two reasons: | |
1172 | * 1. Fault while reloading DS, ES, FS or GS | |
1173 | * 2. Fault while executing IRET | |
1174 | * Category 1 we do not need to fix up as Xen has already reloaded all segment | |
1175 | * registers that could be reloaded and zeroed the others. | |
1176 | * Category 2 we fix up by killing the current process. We cannot use the | |
1177 | * normal Linux return path in this case because if we use the IRET hypercall | |
1178 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
1179 | * We distinguish between categories by comparing each saved segment register | |
1180 | * with its current contents: any discrepancy means we in category 1. | |
1181 | */ | |
3d75e1b8 | 1182 | ENTRY(xen_failsafe_callback) |
8c1f7558 | 1183 | UNWIND_HINT_EMPTY |
4d732138 IM |
1184 | movl %ds, %ecx |
1185 | cmpw %cx, 0x10(%rsp) | |
1186 | jne 1f | |
1187 | movl %es, %ecx | |
1188 | cmpw %cx, 0x18(%rsp) | |
1189 | jne 1f | |
1190 | movl %fs, %ecx | |
1191 | cmpw %cx, 0x20(%rsp) | |
1192 | jne 1f | |
1193 | movl %gs, %ecx | |
1194 | cmpw %cx, 0x28(%rsp) | |
1195 | jne 1f | |
3d75e1b8 | 1196 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
4d732138 IM |
1197 | movq (%rsp), %rcx |
1198 | movq 8(%rsp), %r11 | |
1199 | addq $0x30, %rsp | |
1200 | pushq $0 /* RIP */ | |
8c1f7558 | 1201 | UNWIND_HINT_IRET_REGS offset=8 |
4d732138 | 1202 | jmp general_protection |
3d75e1b8 | 1203 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
4d732138 IM |
1204 | movq (%rsp), %rcx |
1205 | movq 8(%rsp), %r11 | |
1206 | addq $0x30, %rsp | |
8c1f7558 | 1207 | UNWIND_HINT_IRET_REGS |
4d732138 | 1208 | pushq $-1 /* orig_ax = -1 => not a system call */ |
76f5df43 DV |
1209 | ALLOC_PT_GPREGS_ON_STACK |
1210 | SAVE_C_REGS | |
1211 | SAVE_EXTRA_REGS | |
946c1911 | 1212 | ENCODE_FRAME_POINTER |
4d732138 | 1213 | jmp error_exit |
3d75e1b8 JF |
1214 | END(xen_failsafe_callback) |
1215 | ||
cf910e83 | 1216 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
38e20b07 SY |
1217 | xen_hvm_callback_vector xen_evtchn_do_upcall |
1218 | ||
3d75e1b8 | 1219 | #endif /* CONFIG_XEN */ |
ddeb8f21 | 1220 | |
bc2b0331 | 1221 | #if IS_ENABLED(CONFIG_HYPERV) |
cf910e83 | 1222 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
bc2b0331 S |
1223 | hyperv_callback_vector hyperv_vector_handler |
1224 | #endif /* CONFIG_HYPERV */ | |
1225 | ||
4d732138 IM |
1226 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
1227 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK | |
1228 | idtentry stack_segment do_stack_segment has_error_code=1 | |
1229 | ||
6cac5a92 | 1230 | #ifdef CONFIG_XEN |
43e41110 | 1231 | idtentry xennmi do_nmi has_error_code=0 |
5878d5d6 JG |
1232 | idtentry xendebug do_debug has_error_code=0 |
1233 | idtentry xenint3 do_int3 has_error_code=0 | |
6cac5a92 | 1234 | #endif |
4d732138 IM |
1235 | |
1236 | idtentry general_protection do_general_protection has_error_code=1 | |
11a7ffb0 | 1237 | idtentry page_fault do_page_fault has_error_code=1 |
4d732138 | 1238 | |
631bc487 | 1239 | #ifdef CONFIG_KVM_GUEST |
4d732138 | 1240 | idtentry async_page_fault do_async_page_fault has_error_code=1 |
631bc487 | 1241 | #endif |
4d732138 | 1242 | |
ddeb8f21 | 1243 | #ifdef CONFIG_X86_MCE |
4d732138 | 1244 | idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) |
ddeb8f21 AH |
1245 | #endif |
1246 | ||
ebfc453e DV |
1247 | /* |
1248 | * Save all registers in pt_regs, and switch gs if needed. | |
1249 | * Use slow, but surefire "are we in kernel?" check. | |
1250 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise | |
1251 | */ | |
1252 | ENTRY(paranoid_entry) | |
8c1f7558 | 1253 | UNWIND_HINT_FUNC |
1eeb207f DV |
1254 | cld |
1255 | SAVE_C_REGS 8 | |
1256 | SAVE_EXTRA_REGS 8 | |
946c1911 | 1257 | ENCODE_FRAME_POINTER 8 |
4d732138 IM |
1258 | movl $1, %ebx |
1259 | movl $MSR_GS_BASE, %ecx | |
1eeb207f | 1260 | rdmsr |
4d732138 IM |
1261 | testl %edx, %edx |
1262 | js 1f /* negative -> in kernel */ | |
1eeb207f | 1263 | SWAPGS |
4d732138 | 1264 | xorl %ebx, %ebx |
8a09317b DH |
1265 | |
1266 | 1: | |
1267 | SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 | |
1268 | ||
1269 | ret | |
ebfc453e | 1270 | END(paranoid_entry) |
ddeb8f21 | 1271 | |
ebfc453e DV |
1272 | /* |
1273 | * "Paranoid" exit path from exception stack. This is invoked | |
1274 | * only on return from non-NMI IST interrupts that came | |
1275 | * from kernel space. | |
1276 | * | |
1277 | * We may be returning to very strange contexts (e.g. very early | |
1278 | * in syscall entry), so checking for preemption here would | |
1279 | * be complicated. Fortunately, we there's no good reason | |
1280 | * to try to handle preemption here. | |
4d732138 IM |
1281 | * |
1282 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) | |
ebfc453e | 1283 | */ |
ddeb8f21 | 1284 | ENTRY(paranoid_exit) |
8c1f7558 | 1285 | UNWIND_HINT_REGS |
2140a994 | 1286 | DISABLE_INTERRUPTS(CLBR_ANY) |
5963e317 | 1287 | TRACE_IRQS_OFF_DEBUG |
4d732138 | 1288 | testl %ebx, %ebx /* swapgs needed? */ |
e5317832 | 1289 | jnz .Lparanoid_exit_no_swapgs |
f2db9382 | 1290 | TRACE_IRQS_IRETQ |
8a09317b | 1291 | RESTORE_CR3 save_reg=%r14 |
ddeb8f21 | 1292 | SWAPGS_UNSAFE_STACK |
e5317832 AL |
1293 | jmp .Lparanoid_exit_restore |
1294 | .Lparanoid_exit_no_swapgs: | |
f2db9382 | 1295 | TRACE_IRQS_IRETQ_DEBUG |
e5317832 AL |
1296 | .Lparanoid_exit_restore: |
1297 | jmp restore_regs_and_return_to_kernel | |
ddeb8f21 AH |
1298 | END(paranoid_exit) |
1299 | ||
1300 | /* | |
ebfc453e | 1301 | * Save all registers in pt_regs, and switch gs if needed. |
539f5113 | 1302 | * Return: EBX=0: came from user mode; EBX=1: otherwise |
ddeb8f21 AH |
1303 | */ |
1304 | ENTRY(error_entry) | |
8c1f7558 | 1305 | UNWIND_HINT_FUNC |
ddeb8f21 | 1306 | cld |
76f5df43 DV |
1307 | SAVE_C_REGS 8 |
1308 | SAVE_EXTRA_REGS 8 | |
946c1911 | 1309 | ENCODE_FRAME_POINTER 8 |
4d732138 | 1310 | xorl %ebx, %ebx |
03335e95 | 1311 | testb $3, CS+8(%rsp) |
cb6f64ed | 1312 | jz .Lerror_kernelspace |
539f5113 | 1313 | |
cb6f64ed AL |
1314 | /* |
1315 | * We entered from user mode or we're pretending to have entered | |
1316 | * from user mode due to an IRET fault. | |
1317 | */ | |
ddeb8f21 | 1318 | SWAPGS |
8a09317b DH |
1319 | /* We have user CR3. Change to kernel CR3. */ |
1320 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax | |
539f5113 | 1321 | |
cb6f64ed | 1322 | .Lerror_entry_from_usermode_after_swapgs: |
7f2590a1 AL |
1323 | /* Put us onto the real thread stack. */ |
1324 | popq %r12 /* save return addr in %12 */ | |
1325 | movq %rsp, %rdi /* arg0 = pt_regs pointer */ | |
1326 | call sync_regs | |
1327 | movq %rax, %rsp /* switch stack */ | |
1328 | ENCODE_FRAME_POINTER | |
1329 | pushq %r12 | |
1330 | ||
f1075053 AL |
1331 | /* |
1332 | * We need to tell lockdep that IRQs are off. We can't do this until | |
1333 | * we fix gsbase, and we should do it before enter_from_user_mode | |
1334 | * (which can take locks). | |
1335 | */ | |
1336 | TRACE_IRQS_OFF | |
478dc89c | 1337 | CALL_enter_from_user_mode |
f1075053 | 1338 | ret |
02bc7768 | 1339 | |
cb6f64ed | 1340 | .Lerror_entry_done: |
ddeb8f21 AH |
1341 | TRACE_IRQS_OFF |
1342 | ret | |
ddeb8f21 | 1343 | |
ebfc453e DV |
1344 | /* |
1345 | * There are two places in the kernel that can potentially fault with | |
1346 | * usergs. Handle them here. B stepping K8s sometimes report a | |
1347 | * truncated RIP for IRET exceptions returning to compat mode. Check | |
1348 | * for these here too. | |
1349 | */ | |
cb6f64ed | 1350 | .Lerror_kernelspace: |
4d732138 IM |
1351 | incl %ebx |
1352 | leaq native_irq_return_iret(%rip), %rcx | |
1353 | cmpq %rcx, RIP+8(%rsp) | |
cb6f64ed | 1354 | je .Lerror_bad_iret |
4d732138 IM |
1355 | movl %ecx, %eax /* zero extend */ |
1356 | cmpq %rax, RIP+8(%rsp) | |
cb6f64ed | 1357 | je .Lbstep_iret |
42c748bb | 1358 | cmpq $.Lgs_change, RIP+8(%rsp) |
cb6f64ed | 1359 | jne .Lerror_entry_done |
539f5113 AL |
1360 | |
1361 | /* | |
42c748bb | 1362 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
539f5113 | 1363 | * gsbase and proceed. We'll fix up the exception and land in |
42c748bb | 1364 | * .Lgs_change's error handler with kernel gsbase. |
539f5113 | 1365 | */ |
2fa5f04f | 1366 | SWAPGS |
8a09317b | 1367 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
2fa5f04f | 1368 | jmp .Lerror_entry_done |
ae24ffe5 | 1369 | |
cb6f64ed | 1370 | .Lbstep_iret: |
ae24ffe5 | 1371 | /* Fix truncated RIP */ |
4d732138 | 1372 | movq %rcx, RIP+8(%rsp) |
b645af2d AL |
1373 | /* fall through */ |
1374 | ||
cb6f64ed | 1375 | .Lerror_bad_iret: |
539f5113 | 1376 | /* |
8a09317b DH |
1377 | * We came from an IRET to user mode, so we have user |
1378 | * gsbase and CR3. Switch to kernel gsbase and CR3: | |
539f5113 | 1379 | */ |
b645af2d | 1380 | SWAPGS |
8a09317b | 1381 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
539f5113 AL |
1382 | |
1383 | /* | |
1384 | * Pretend that the exception came from user mode: set up pt_regs | |
1385 | * as if we faulted immediately after IRET and clear EBX so that | |
1386 | * error_exit knows that we will be returning to user mode. | |
1387 | */ | |
4d732138 IM |
1388 | mov %rsp, %rdi |
1389 | call fixup_bad_iret | |
1390 | mov %rax, %rsp | |
539f5113 | 1391 | decl %ebx |
cb6f64ed | 1392 | jmp .Lerror_entry_from_usermode_after_swapgs |
ddeb8f21 AH |
1393 | END(error_entry) |
1394 | ||
1395 | ||
539f5113 | 1396 | /* |
75ca5b22 | 1397 | * On entry, EBX is a "return to kernel mode" flag: |
539f5113 AL |
1398 | * 1: already in kernel mode, don't need SWAPGS |
1399 | * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode | |
1400 | */ | |
ddeb8f21 | 1401 | ENTRY(error_exit) |
8c1f7558 | 1402 | UNWIND_HINT_REGS |
2140a994 | 1403 | DISABLE_INTERRUPTS(CLBR_ANY) |
ddeb8f21 | 1404 | TRACE_IRQS_OFF |
2140a994 | 1405 | testl %ebx, %ebx |
4d732138 IM |
1406 | jnz retint_kernel |
1407 | jmp retint_user | |
ddeb8f21 AH |
1408 | END(error_exit) |
1409 | ||
929bacec AL |
1410 | /* |
1411 | * Runs on exception stack. Xen PV does not go through this path at all, | |
1412 | * so we can use real assembly here. | |
8a09317b DH |
1413 | * |
1414 | * Registers: | |
1415 | * %r14: Used to save/restore the CR3 of the interrupted context | |
1416 | * when PAGE_TABLE_ISOLATION is in use. Do not clobber. | |
929bacec | 1417 | */ |
ddeb8f21 | 1418 | ENTRY(nmi) |
8c1f7558 | 1419 | UNWIND_HINT_IRET_REGS |
929bacec | 1420 | |
3f3c8b8c SR |
1421 | /* |
1422 | * We allow breakpoints in NMIs. If a breakpoint occurs, then | |
1423 | * the iretq it performs will take us out of NMI context. | |
1424 | * This means that we can have nested NMIs where the next | |
1425 | * NMI is using the top of the stack of the previous NMI. We | |
1426 | * can't let it execute because the nested NMI will corrupt the | |
1427 | * stack of the previous NMI. NMI handlers are not re-entrant | |
1428 | * anyway. | |
1429 | * | |
1430 | * To handle this case we do the following: | |
1431 | * Check the a special location on the stack that contains | |
1432 | * a variable that is set when NMIs are executing. | |
1433 | * The interrupted task's stack is also checked to see if it | |
1434 | * is an NMI stack. | |
1435 | * If the variable is not set and the stack is not the NMI | |
1436 | * stack then: | |
1437 | * o Set the special variable on the stack | |
0b22930e AL |
1438 | * o Copy the interrupt frame into an "outermost" location on the |
1439 | * stack | |
1440 | * o Copy the interrupt frame into an "iret" location on the stack | |
3f3c8b8c SR |
1441 | * o Continue processing the NMI |
1442 | * If the variable is set or the previous stack is the NMI stack: | |
0b22930e | 1443 | * o Modify the "iret" location to jump to the repeat_nmi |
3f3c8b8c SR |
1444 | * o return back to the first NMI |
1445 | * | |
1446 | * Now on exit of the first NMI, we first clear the stack variable | |
1447 | * The NMI stack will tell any nested NMIs at that point that it is | |
1448 | * nested. Then we pop the stack normally with iret, and if there was | |
1449 | * a nested NMI that updated the copy interrupt stack frame, a | |
1450 | * jump will be made to the repeat_nmi code that will handle the second | |
1451 | * NMI. | |
9b6e6a83 AL |
1452 | * |
1453 | * However, espfix prevents us from directly returning to userspace | |
1454 | * with a single IRET instruction. Similarly, IRET to user mode | |
1455 | * can fault. We therefore handle NMIs from user space like | |
1456 | * other IST entries. | |
3f3c8b8c SR |
1457 | */ |
1458 | ||
e93c1730 AL |
1459 | ASM_CLAC |
1460 | ||
146b2b09 | 1461 | /* Use %rdx as our temp variable throughout */ |
4d732138 | 1462 | pushq %rdx |
3f3c8b8c | 1463 | |
9b6e6a83 AL |
1464 | testb $3, CS-RIP+8(%rsp) |
1465 | jz .Lnmi_from_kernel | |
1466 | ||
1467 | /* | |
1468 | * NMI from user mode. We need to run on the thread stack, but we | |
1469 | * can't go through the normal entry paths: NMIs are masked, and | |
1470 | * we don't want to enable interrupts, because then we'll end | |
1471 | * up in an awkward situation in which IRQs are on but NMIs | |
1472 | * are off. | |
83c133cf AL |
1473 | * |
1474 | * We also must not push anything to the stack before switching | |
1475 | * stacks lest we corrupt the "NMI executing" variable. | |
9b6e6a83 AL |
1476 | */ |
1477 | ||
929bacec | 1478 | swapgs |
9b6e6a83 | 1479 | cld |
8a09317b | 1480 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx |
9b6e6a83 AL |
1481 | movq %rsp, %rdx |
1482 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
8c1f7558 | 1483 | UNWIND_HINT_IRET_REGS base=%rdx offset=8 |
9b6e6a83 AL |
1484 | pushq 5*8(%rdx) /* pt_regs->ss */ |
1485 | pushq 4*8(%rdx) /* pt_regs->rsp */ | |
1486 | pushq 3*8(%rdx) /* pt_regs->flags */ | |
1487 | pushq 2*8(%rdx) /* pt_regs->cs */ | |
1488 | pushq 1*8(%rdx) /* pt_regs->rip */ | |
8c1f7558 | 1489 | UNWIND_HINT_IRET_REGS |
9b6e6a83 AL |
1490 | pushq $-1 /* pt_regs->orig_ax */ |
1491 | pushq %rdi /* pt_regs->di */ | |
1492 | pushq %rsi /* pt_regs->si */ | |
1493 | pushq (%rdx) /* pt_regs->dx */ | |
1494 | pushq %rcx /* pt_regs->cx */ | |
1495 | pushq %rax /* pt_regs->ax */ | |
1496 | pushq %r8 /* pt_regs->r8 */ | |
1497 | pushq %r9 /* pt_regs->r9 */ | |
1498 | pushq %r10 /* pt_regs->r10 */ | |
1499 | pushq %r11 /* pt_regs->r11 */ | |
1500 | pushq %rbx /* pt_regs->rbx */ | |
1501 | pushq %rbp /* pt_regs->rbp */ | |
1502 | pushq %r12 /* pt_regs->r12 */ | |
1503 | pushq %r13 /* pt_regs->r13 */ | |
1504 | pushq %r14 /* pt_regs->r14 */ | |
1505 | pushq %r15 /* pt_regs->r15 */ | |
8c1f7558 | 1506 | UNWIND_HINT_REGS |
946c1911 | 1507 | ENCODE_FRAME_POINTER |
9b6e6a83 AL |
1508 | |
1509 | /* | |
1510 | * At this point we no longer need to worry about stack damage | |
1511 | * due to nesting -- we're on the normal thread stack and we're | |
1512 | * done with the NMI stack. | |
1513 | */ | |
1514 | ||
1515 | movq %rsp, %rdi | |
1516 | movq $-1, %rsi | |
1517 | call do_nmi | |
1518 | ||
45d5a168 | 1519 | /* |
9b6e6a83 | 1520 | * Return back to user mode. We must *not* do the normal exit |
946c1911 | 1521 | * work, because we don't want to enable interrupts. |
45d5a168 | 1522 | */ |
8a055d7f | 1523 | jmp swapgs_restore_regs_and_return_to_usermode |
45d5a168 | 1524 | |
9b6e6a83 | 1525 | .Lnmi_from_kernel: |
3f3c8b8c | 1526 | /* |
0b22930e AL |
1527 | * Here's what our stack frame will look like: |
1528 | * +---------------------------------------------------------+ | |
1529 | * | original SS | | |
1530 | * | original Return RSP | | |
1531 | * | original RFLAGS | | |
1532 | * | original CS | | |
1533 | * | original RIP | | |
1534 | * +---------------------------------------------------------+ | |
1535 | * | temp storage for rdx | | |
1536 | * +---------------------------------------------------------+ | |
1537 | * | "NMI executing" variable | | |
1538 | * +---------------------------------------------------------+ | |
1539 | * | iret SS } Copied from "outermost" frame | | |
1540 | * | iret Return RSP } on each loop iteration; overwritten | | |
1541 | * | iret RFLAGS } by a nested NMI to force another | | |
1542 | * | iret CS } iteration if needed. | | |
1543 | * | iret RIP } | | |
1544 | * +---------------------------------------------------------+ | |
1545 | * | outermost SS } initialized in first_nmi; | | |
1546 | * | outermost Return RSP } will not be changed before | | |
1547 | * | outermost RFLAGS } NMI processing is done. | | |
1548 | * | outermost CS } Copied to "iret" frame on each | | |
1549 | * | outermost RIP } iteration. | | |
1550 | * +---------------------------------------------------------+ | |
1551 | * | pt_regs | | |
1552 | * +---------------------------------------------------------+ | |
1553 | * | |
1554 | * The "original" frame is used by hardware. Before re-enabling | |
1555 | * NMIs, we need to be done with it, and we need to leave enough | |
1556 | * space for the asm code here. | |
1557 | * | |
1558 | * We return by executing IRET while RSP points to the "iret" frame. | |
1559 | * That will either return for real or it will loop back into NMI | |
1560 | * processing. | |
1561 | * | |
1562 | * The "outermost" frame is copied to the "iret" frame on each | |
1563 | * iteration of the loop, so each iteration starts with the "iret" | |
1564 | * frame pointing to the final return target. | |
1565 | */ | |
1566 | ||
45d5a168 | 1567 | /* |
0b22930e AL |
1568 | * Determine whether we're a nested NMI. |
1569 | * | |
a27507ca AL |
1570 | * If we interrupted kernel code between repeat_nmi and |
1571 | * end_repeat_nmi, then we are a nested NMI. We must not | |
1572 | * modify the "iret" frame because it's being written by | |
1573 | * the outer NMI. That's okay; the outer NMI handler is | |
1574 | * about to about to call do_nmi anyway, so we can just | |
1575 | * resume the outer NMI. | |
45d5a168 | 1576 | */ |
a27507ca AL |
1577 | |
1578 | movq $repeat_nmi, %rdx | |
1579 | cmpq 8(%rsp), %rdx | |
1580 | ja 1f | |
1581 | movq $end_repeat_nmi, %rdx | |
1582 | cmpq 8(%rsp), %rdx | |
1583 | ja nested_nmi_out | |
1584 | 1: | |
45d5a168 | 1585 | |
3f3c8b8c | 1586 | /* |
a27507ca | 1587 | * Now check "NMI executing". If it's set, then we're nested. |
0b22930e AL |
1588 | * This will not detect if we interrupted an outer NMI just |
1589 | * before IRET. | |
3f3c8b8c | 1590 | */ |
4d732138 IM |
1591 | cmpl $1, -8(%rsp) |
1592 | je nested_nmi | |
3f3c8b8c SR |
1593 | |
1594 | /* | |
0b22930e AL |
1595 | * Now test if the previous stack was an NMI stack. This covers |
1596 | * the case where we interrupt an outer NMI after it clears | |
810bc075 AL |
1597 | * "NMI executing" but before IRET. We need to be careful, though: |
1598 | * there is one case in which RSP could point to the NMI stack | |
1599 | * despite there being no NMI active: naughty userspace controls | |
1600 | * RSP at the very beginning of the SYSCALL targets. We can | |
1601 | * pull a fast one on naughty userspace, though: we program | |
1602 | * SYSCALL to mask DF, so userspace cannot cause DF to be set | |
1603 | * if it controls the kernel's RSP. We set DF before we clear | |
1604 | * "NMI executing". | |
3f3c8b8c | 1605 | */ |
0784b364 DV |
1606 | lea 6*8(%rsp), %rdx |
1607 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ | |
1608 | cmpq %rdx, 4*8(%rsp) | |
1609 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ | |
1610 | ja first_nmi | |
4d732138 | 1611 | |
0784b364 DV |
1612 | subq $EXCEPTION_STKSZ, %rdx |
1613 | cmpq %rdx, 4*8(%rsp) | |
1614 | /* If it is below the NMI stack, it is a normal NMI */ | |
1615 | jb first_nmi | |
810bc075 AL |
1616 | |
1617 | /* Ah, it is within the NMI stack. */ | |
1618 | ||
1619 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) | |
1620 | jz first_nmi /* RSP was user controlled. */ | |
1621 | ||
1622 | /* This is a nested NMI. */ | |
0784b364 | 1623 | |
3f3c8b8c SR |
1624 | nested_nmi: |
1625 | /* | |
0b22930e AL |
1626 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
1627 | * iteration of NMI handling. | |
3f3c8b8c | 1628 | */ |
23a781e9 | 1629 | subq $8, %rsp |
4d732138 IM |
1630 | leaq -10*8(%rsp), %rdx |
1631 | pushq $__KERNEL_DS | |
1632 | pushq %rdx | |
131484c8 | 1633 | pushfq |
4d732138 IM |
1634 | pushq $__KERNEL_CS |
1635 | pushq $repeat_nmi | |
3f3c8b8c SR |
1636 | |
1637 | /* Put stack back */ | |
4d732138 | 1638 | addq $(6*8), %rsp |
3f3c8b8c SR |
1639 | |
1640 | nested_nmi_out: | |
4d732138 | 1641 | popq %rdx |
3f3c8b8c | 1642 | |
0b22930e | 1643 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
929bacec | 1644 | iretq |
3f3c8b8c SR |
1645 | |
1646 | first_nmi: | |
0b22930e | 1647 | /* Restore rdx. */ |
4d732138 | 1648 | movq (%rsp), %rdx |
62610913 | 1649 | |
36f1a77b AL |
1650 | /* Make room for "NMI executing". */ |
1651 | pushq $0 | |
3f3c8b8c | 1652 | |
0b22930e | 1653 | /* Leave room for the "iret" frame */ |
4d732138 | 1654 | subq $(5*8), %rsp |
28696f43 | 1655 | |
0b22930e | 1656 | /* Copy the "original" frame to the "outermost" frame */ |
3f3c8b8c | 1657 | .rept 5 |
4d732138 | 1658 | pushq 11*8(%rsp) |
3f3c8b8c | 1659 | .endr |
8c1f7558 | 1660 | UNWIND_HINT_IRET_REGS |
62610913 | 1661 | |
79fb4ad6 SR |
1662 | /* Everything up to here is safe from nested NMIs */ |
1663 | ||
a97439aa AL |
1664 | #ifdef CONFIG_DEBUG_ENTRY |
1665 | /* | |
1666 | * For ease of testing, unmask NMIs right away. Disabled by | |
1667 | * default because IRET is very expensive. | |
1668 | */ | |
1669 | pushq $0 /* SS */ | |
1670 | pushq %rsp /* RSP (minus 8 because of the previous push) */ | |
1671 | addq $8, (%rsp) /* Fix up RSP */ | |
1672 | pushfq /* RFLAGS */ | |
1673 | pushq $__KERNEL_CS /* CS */ | |
1674 | pushq $1f /* RIP */ | |
929bacec | 1675 | iretq /* continues at repeat_nmi below */ |
8c1f7558 | 1676 | UNWIND_HINT_IRET_REGS |
a97439aa AL |
1677 | 1: |
1678 | #endif | |
1679 | ||
0b22930e | 1680 | repeat_nmi: |
62610913 JB |
1681 | /* |
1682 | * If there was a nested NMI, the first NMI's iret will return | |
1683 | * here. But NMIs are still enabled and we can take another | |
1684 | * nested NMI. The nested NMI checks the interrupted RIP to see | |
1685 | * if it is between repeat_nmi and end_repeat_nmi, and if so | |
1686 | * it will just return, as we are about to repeat an NMI anyway. | |
1687 | * This makes it safe to copy to the stack frame that a nested | |
1688 | * NMI will update. | |
0b22930e AL |
1689 | * |
1690 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if | |
1691 | * we're repeating an NMI, gsbase has the same value that it had on | |
1692 | * the first iteration. paranoid_entry will load the kernel | |
36f1a77b AL |
1693 | * gsbase if needed before we call do_nmi. "NMI executing" |
1694 | * is zero. | |
62610913 | 1695 | */ |
36f1a77b | 1696 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
3f3c8b8c | 1697 | |
62610913 | 1698 | /* |
0b22930e AL |
1699 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
1700 | * here must not modify the "iret" frame while we're writing to | |
1701 | * it or it will end up containing garbage. | |
62610913 | 1702 | */ |
4d732138 | 1703 | addq $(10*8), %rsp |
3f3c8b8c | 1704 | .rept 5 |
4d732138 | 1705 | pushq -6*8(%rsp) |
3f3c8b8c | 1706 | .endr |
4d732138 | 1707 | subq $(5*8), %rsp |
62610913 | 1708 | end_repeat_nmi: |
3f3c8b8c SR |
1709 | |
1710 | /* | |
0b22930e AL |
1711 | * Everything below this point can be preempted by a nested NMI. |
1712 | * If this happens, then the inner NMI will change the "iret" | |
1713 | * frame to point back to repeat_nmi. | |
3f3c8b8c | 1714 | */ |
4d732138 | 1715 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
76f5df43 DV |
1716 | ALLOC_PT_GPREGS_ON_STACK |
1717 | ||
1fd466ef | 1718 | /* |
ebfc453e | 1719 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
1fd466ef SR |
1720 | * as we should not be calling schedule in NMI context. |
1721 | * Even with normal interrupts enabled. An NMI should not be | |
1722 | * setting NEED_RESCHED or anything that normal interrupts and | |
1723 | * exceptions might do. | |
1724 | */ | |
4d732138 | 1725 | call paranoid_entry |
8c1f7558 | 1726 | UNWIND_HINT_REGS |
7fbb98c5 | 1727 | |
ddeb8f21 | 1728 | /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ |
4d732138 IM |
1729 | movq %rsp, %rdi |
1730 | movq $-1, %rsi | |
1731 | call do_nmi | |
7fbb98c5 | 1732 | |
8a09317b DH |
1733 | RESTORE_CR3 save_reg=%r14 |
1734 | ||
4d732138 IM |
1735 | testl %ebx, %ebx /* swapgs needed? */ |
1736 | jnz nmi_restore | |
ddeb8f21 AH |
1737 | nmi_swapgs: |
1738 | SWAPGS_UNSAFE_STACK | |
1739 | nmi_restore: | |
471ee483 AL |
1740 | POP_EXTRA_REGS |
1741 | POP_C_REGS | |
0b22930e | 1742 | |
471ee483 AL |
1743 | /* |
1744 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" | |
1745 | * at the "iret" frame. | |
1746 | */ | |
1747 | addq $6*8, %rsp | |
28696f43 | 1748 | |
810bc075 AL |
1749 | /* |
1750 | * Clear "NMI executing". Set DF first so that we can easily | |
1751 | * distinguish the remaining code between here and IRET from | |
929bacec AL |
1752 | * the SYSCALL entry and exit paths. |
1753 | * | |
1754 | * We arguably should just inspect RIP instead, but I (Andy) wrote | |
1755 | * this code when I had the misapprehension that Xen PV supported | |
1756 | * NMIs, and Xen PV would break that approach. | |
810bc075 AL |
1757 | */ |
1758 | std | |
1759 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ | |
0b22930e AL |
1760 | |
1761 | /* | |
929bacec AL |
1762 | * iretq reads the "iret" frame and exits the NMI stack in a |
1763 | * single instruction. We are returning to kernel mode, so this | |
1764 | * cannot result in a fault. Similarly, we don't need to worry | |
1765 | * about espfix64 on the way back to kernel mode. | |
0b22930e | 1766 | */ |
929bacec | 1767 | iretq |
ddeb8f21 AH |
1768 | END(nmi) |
1769 | ||
1770 | ENTRY(ignore_sysret) | |
8c1f7558 | 1771 | UNWIND_HINT_EMPTY |
4d732138 | 1772 | mov $-ENOSYS, %eax |
ddeb8f21 | 1773 | sysret |
ddeb8f21 | 1774 | END(ignore_sysret) |
2deb4be2 AL |
1775 | |
1776 | ENTRY(rewind_stack_do_exit) | |
8c1f7558 | 1777 | UNWIND_HINT_FUNC |
2deb4be2 AL |
1778 | /* Prevent any naive code from trying to unwind to our caller. */ |
1779 | xorl %ebp, %ebp | |
1780 | ||
1781 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax | |
8c1f7558 JP |
1782 | leaq -PTREGS_SIZE(%rax), %rsp |
1783 | UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE | |
2deb4be2 AL |
1784 | |
1785 | call do_exit | |
2deb4be2 | 1786 | END(rewind_stack_do_exit) |