]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/entry/entry_64.S
Merge tag 'objtool-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/linux.git] / arch / x86 / entry / entry_64.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/arch/x86_64/entry.S
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
8 *
9 * entry.S contains the system-call and fault low-level handling routines.
10 *
11 * Some of this is documented in Documentation/arch/x86/entry_64.rst
12 *
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
16 *
17 * Some macro usage:
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
20 */
21 #include <linux/linkage.h>
22 #include <asm/segment.h>
23 #include <asm/cache.h>
24 #include <asm/errno.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/msr.h>
27 #include <asm/unistd.h>
28 #include <asm/thread_info.h>
29 #include <asm/hw_irq.h>
30 #include <asm/page_types.h>
31 #include <asm/irqflags.h>
32 #include <asm/paravirt.h>
33 #include <asm/percpu.h>
34 #include <asm/asm.h>
35 #include <asm/smap.h>
36 #include <asm/pgtable_types.h>
37 #include <asm/export.h>
38 #include <asm/frame.h>
39 #include <asm/trapnr.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/fsgsbase.h>
42 #include <linux/err.h>
43
44 #include "calling.h"
45
46 .code64
47 .section .entry.text, "ax"
48
49 /*
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
51 *
52 * This is the only entry point used for 64-bit system calls. The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
56 *
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
61 *
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
67 *
68 * Registers on entry:
69 * rax system call number
70 * rcx return address
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
72 * rdi arg0
73 * rsi arg1
74 * rdx arg2
75 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
76 * r8 arg4
77 * r9 arg5
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
79 *
80 * Only called from user space.
81 *
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
85 */
86
87 SYM_CODE_START(entry_SYSCALL_64)
88 UNWIND_HINT_ENTRY
89 ENDBR
90
91 swapgs
92 /* tss.sp2 is scratch space. */
93 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
94 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
95 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
96
97 SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
98 ANNOTATE_NOENDBR
99
100 /* Construct struct pt_regs on stack */
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
106 SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
107 pushq %rax /* pt_regs->orig_ax */
108
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
110
111 /* IRQs are off. */
112 movq %rsp, %rdi
113 /* Sign extend the lower 32bit as syscall numbers are treated as int */
114 movslq %eax, %rsi
115
116 /* clobbers %rax, make sure it is after saving the syscall nr */
117 IBRS_ENTER
118 UNTRAIN_RET
119
120 call do_syscall_64 /* returns with IRQs disabled */
121
122 /*
123 * Try to use SYSRET instead of IRET if we're returning to
124 * a completely clean 64-bit userspace context. If we're not,
125 * go to the slow exit path.
126 * In the Xen PV case we must use iret anyway.
127 */
128
129 ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \
130 X86_FEATURE_XENPV
131
132 movq RCX(%rsp), %rcx
133 movq RIP(%rsp), %r11
134
135 cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */
136 jne swapgs_restore_regs_and_return_to_usermode
137
138 /*
139 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
140 * in kernel space. This essentially lets the user take over
141 * the kernel, since userspace controls RSP.
142 *
143 * If width of "canonical tail" ever becomes variable, this will need
144 * to be updated to remain correct on both old and new CPUs.
145 *
146 * Change top bits to match most significant bit (47th or 56th bit
147 * depending on paging mode) in the address.
148 */
149 #ifdef CONFIG_X86_5LEVEL
150 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
151 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
152 #else
153 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
154 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
155 #endif
156
157 /* If this changed %rcx, it was not canonical */
158 cmpq %rcx, %r11
159 jne swapgs_restore_regs_and_return_to_usermode
160
161 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
162 jne swapgs_restore_regs_and_return_to_usermode
163
164 movq R11(%rsp), %r11
165 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
166 jne swapgs_restore_regs_and_return_to_usermode
167
168 /*
169 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
170 * restore RF properly. If the slowpath sets it for whatever reason, we
171 * need to restore it correctly.
172 *
173 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
174 * trap from userspace immediately after SYSRET. This would cause an
175 * infinite loop whenever #DB happens with register state that satisfies
176 * the opportunistic SYSRET conditions. For example, single-stepping
177 * this user code:
178 *
179 * movq $stuck_here, %rcx
180 * pushfq
181 * popq %r11
182 * stuck_here:
183 *
184 * would never get past 'stuck_here'.
185 */
186 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
187 jnz swapgs_restore_regs_and_return_to_usermode
188
189 /* nothing to check for RSP */
190
191 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
192 jne swapgs_restore_regs_and_return_to_usermode
193
194 /*
195 * We win! This label is here just for ease of understanding
196 * perf profiles. Nothing jumps here.
197 */
198 syscall_return_via_sysret:
199 IBRS_EXIT
200 POP_REGS pop_rdi=0
201
202 /*
203 * Now all regs are restored except RSP and RDI.
204 * Save old stack pointer and switch to trampoline stack.
205 */
206 movq %rsp, %rdi
207 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
208 UNWIND_HINT_END_OF_STACK
209
210 pushq RSP-RDI(%rdi) /* RSP */
211 pushq (%rdi) /* RDI */
212
213 /*
214 * We are on the trampoline stack. All regs except RDI are live.
215 * We can do future final exit work right here.
216 */
217 STACKLEAK_ERASE_NOCLOBBER
218
219 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
220
221 popq %rdi
222 popq %rsp
223 SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
224 ANNOTATE_NOENDBR
225 swapgs
226 sysretq
227 SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
228 ANNOTATE_NOENDBR
229 int3
230 SYM_CODE_END(entry_SYSCALL_64)
231
232 /*
233 * %rdi: prev task
234 * %rsi: next task
235 */
236 .pushsection .text, "ax"
237 SYM_FUNC_START(__switch_to_asm)
238 /*
239 * Save callee-saved registers
240 * This must match the order in inactive_task_frame
241 */
242 pushq %rbp
243 pushq %rbx
244 pushq %r12
245 pushq %r13
246 pushq %r14
247 pushq %r15
248
249 /* switch stack */
250 movq %rsp, TASK_threadsp(%rdi)
251 movq TASK_threadsp(%rsi), %rsp
252
253 #ifdef CONFIG_STACKPROTECTOR
254 movq TASK_stack_canary(%rsi), %rbx
255 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + FIXED_stack_canary
256 #endif
257
258 /*
259 * When switching from a shallower to a deeper call stack
260 * the RSB may either underflow or use entries populated
261 * with userspace addresses. On CPUs where those concerns
262 * exist, overwrite the RSB with entries which capture
263 * speculative execution to prevent attack.
264 */
265 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
266
267 /* restore callee-saved registers */
268 popq %r15
269 popq %r14
270 popq %r13
271 popq %r12
272 popq %rbx
273 popq %rbp
274
275 jmp __switch_to
276 SYM_FUNC_END(__switch_to_asm)
277 .popsection
278
279 /*
280 * A newly forked process directly context switches into this address.
281 *
282 * rax: prev task we switched from
283 * rbx: kernel thread func (NULL for user thread)
284 * r12: kernel thread arg
285 */
286 .pushsection .text, "ax"
287 __FUNC_ALIGN
288 SYM_CODE_START_NOALIGN(ret_from_fork)
289 UNWIND_HINT_END_OF_STACK
290 ANNOTATE_NOENDBR // copy_thread
291 CALL_DEPTH_ACCOUNT
292 movq %rax, %rdi
293 call schedule_tail /* rdi: 'prev' task parameter */
294
295 testq %rbx, %rbx /* from kernel_thread? */
296 jnz 1f /* kernel threads are uncommon */
297
298 2:
299 UNWIND_HINT_REGS
300 movq %rsp, %rdi
301 call syscall_exit_to_user_mode /* returns with IRQs disabled */
302 jmp swapgs_restore_regs_and_return_to_usermode
303
304 1:
305 /* kernel thread */
306 UNWIND_HINT_END_OF_STACK
307 movq %r12, %rdi
308 CALL_NOSPEC rbx
309 /*
310 * A kernel thread is allowed to return here after successfully
311 * calling kernel_execve(). Exit to userspace to complete the execve()
312 * syscall.
313 */
314 movq $0, RAX(%rsp)
315 jmp 2b
316 SYM_CODE_END(ret_from_fork)
317 .popsection
318
319 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
320 #ifdef CONFIG_DEBUG_ENTRY
321 pushq %rax
322 SAVE_FLAGS
323 testl $X86_EFLAGS_IF, %eax
324 jz .Lokay_\@
325 ud2
326 .Lokay_\@:
327 popq %rax
328 #endif
329 .endm
330
331 SYM_CODE_START(xen_error_entry)
332 ANNOTATE_NOENDBR
333 UNWIND_HINT_FUNC
334 PUSH_AND_CLEAR_REGS save_ret=1
335 ENCODE_FRAME_POINTER 8
336 UNTRAIN_RET_FROM_CALL
337 RET
338 SYM_CODE_END(xen_error_entry)
339
340 /**
341 * idtentry_body - Macro to emit code calling the C function
342 * @cfunc: C function to be called
343 * @has_error_code: Hardware pushed error code on stack
344 */
345 .macro idtentry_body cfunc has_error_code:req
346
347 /*
348 * Call error_entry() and switch to the task stack if from userspace.
349 *
350 * When in XENPV, it is already in the task stack, and it can't fault
351 * for native_iret() nor native_load_gs_index() since XENPV uses its
352 * own pvops for IRET and load_gs_index(). And it doesn't need to
353 * switch the CR3. So it can skip invoking error_entry().
354 */
355 ALTERNATIVE "call error_entry; movq %rax, %rsp", \
356 "call xen_error_entry", X86_FEATURE_XENPV
357
358 ENCODE_FRAME_POINTER
359 UNWIND_HINT_REGS
360
361 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
362
363 .if \has_error_code == 1
364 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
365 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
366 .endif
367
368 call \cfunc
369
370 /* For some configurations \cfunc ends up being a noreturn. */
371 REACHABLE
372
373 jmp error_return
374 .endm
375
376 /**
377 * idtentry - Macro to generate entry stubs for simple IDT entries
378 * @vector: Vector number
379 * @asmsym: ASM symbol for the entry point
380 * @cfunc: C function to be called
381 * @has_error_code: Hardware pushed error code on stack
382 *
383 * The macro emits code to set up the kernel context for straight forward
384 * and simple IDT entries. No IST stack, no paranoid entry checks.
385 */
386 .macro idtentry vector asmsym cfunc has_error_code:req
387 SYM_CODE_START(\asmsym)
388
389 .if \vector == X86_TRAP_BP
390 /* #BP advances %rip to the next instruction */
391 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
392 .else
393 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
394 .endif
395
396 ENDBR
397 ASM_CLAC
398 cld
399
400 .if \has_error_code == 0
401 pushq $-1 /* ORIG_RAX: no syscall to restart */
402 .endif
403
404 .if \vector == X86_TRAP_BP
405 /*
406 * If coming from kernel space, create a 6-word gap to allow the
407 * int3 handler to emulate a call instruction.
408 */
409 testb $3, CS-ORIG_RAX(%rsp)
410 jnz .Lfrom_usermode_no_gap_\@
411 .rept 6
412 pushq 5*8(%rsp)
413 .endr
414 UNWIND_HINT_IRET_REGS offset=8
415 .Lfrom_usermode_no_gap_\@:
416 .endif
417
418 idtentry_body \cfunc \has_error_code
419
420 _ASM_NOKPROBE(\asmsym)
421 SYM_CODE_END(\asmsym)
422 .endm
423
424 /*
425 * Interrupt entry/exit.
426 *
427 + The interrupt stubs push (vector) onto the stack, which is the error_code
428 * position of idtentry exceptions, and jump to one of the two idtentry points
429 * (common/spurious).
430 *
431 * common_interrupt is a hotpath, align it to a cache line
432 */
433 .macro idtentry_irq vector cfunc
434 .p2align CONFIG_X86_L1_CACHE_SHIFT
435 idtentry \vector asm_\cfunc \cfunc has_error_code=1
436 .endm
437
438 /*
439 * System vectors which invoke their handlers directly and are not
440 * going through the regular common device interrupt handling code.
441 */
442 .macro idtentry_sysvec vector cfunc
443 idtentry \vector asm_\cfunc \cfunc has_error_code=0
444 .endm
445
446 /**
447 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
448 * @vector: Vector number
449 * @asmsym: ASM symbol for the entry point
450 * @cfunc: C function to be called
451 *
452 * The macro emits code to set up the kernel context for #MC and #DB
453 *
454 * If the entry comes from user space it uses the normal entry path
455 * including the return to user space work and preemption checks on
456 * exit.
457 *
458 * If hits in kernel mode then it needs to go through the paranoid
459 * entry as the exception can hit any random state. No preemption
460 * check on exit to keep the paranoid path simple.
461 */
462 .macro idtentry_mce_db vector asmsym cfunc
463 SYM_CODE_START(\asmsym)
464 UNWIND_HINT_IRET_ENTRY
465 ENDBR
466 ASM_CLAC
467 cld
468
469 pushq $-1 /* ORIG_RAX: no syscall to restart */
470
471 /*
472 * If the entry is from userspace, switch stacks and treat it as
473 * a normal entry.
474 */
475 testb $3, CS-ORIG_RAX(%rsp)
476 jnz .Lfrom_usermode_switch_stack_\@
477
478 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
479 call paranoid_entry
480
481 UNWIND_HINT_REGS
482
483 movq %rsp, %rdi /* pt_regs pointer */
484
485 call \cfunc
486
487 jmp paranoid_exit
488
489 /* Switch to the regular task stack and use the noist entry point */
490 .Lfrom_usermode_switch_stack_\@:
491 idtentry_body noist_\cfunc, has_error_code=0
492
493 _ASM_NOKPROBE(\asmsym)
494 SYM_CODE_END(\asmsym)
495 .endm
496
497 #ifdef CONFIG_AMD_MEM_ENCRYPT
498 /**
499 * idtentry_vc - Macro to generate entry stub for #VC
500 * @vector: Vector number
501 * @asmsym: ASM symbol for the entry point
502 * @cfunc: C function to be called
503 *
504 * The macro emits code to set up the kernel context for #VC. The #VC handler
505 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
506 *
507 * To make this work the #VC entry code tries its best to pretend it doesn't use
508 * an IST stack by switching to the task stack if coming from user-space (which
509 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
510 * entered from kernel-mode.
511 *
512 * If entered from kernel-mode the return stack is validated first, and if it is
513 * not safe to use (e.g. because it points to the entry stack) the #VC handler
514 * will switch to a fall-back stack (VC2) and call a special handler function.
515 *
516 * The macro is only used for one vector, but it is planned to be extended in
517 * the future for the #HV exception.
518 */
519 .macro idtentry_vc vector asmsym cfunc
520 SYM_CODE_START(\asmsym)
521 UNWIND_HINT_IRET_ENTRY
522 ENDBR
523 ASM_CLAC
524 cld
525
526 /*
527 * If the entry is from userspace, switch stacks and treat it as
528 * a normal entry.
529 */
530 testb $3, CS-ORIG_RAX(%rsp)
531 jnz .Lfrom_usermode_switch_stack_\@
532
533 /*
534 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
535 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
536 */
537 call paranoid_entry
538
539 UNWIND_HINT_REGS
540
541 /*
542 * Switch off the IST stack to make it free for nested exceptions. The
543 * vc_switch_off_ist() function will switch back to the interrupted
544 * stack if it is safe to do so. If not it switches to the VC fall-back
545 * stack.
546 */
547 movq %rsp, %rdi /* pt_regs pointer */
548 call vc_switch_off_ist
549 movq %rax, %rsp /* Switch to new stack */
550
551 ENCODE_FRAME_POINTER
552 UNWIND_HINT_REGS
553
554 /* Update pt_regs */
555 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
556 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
557
558 movq %rsp, %rdi /* pt_regs pointer */
559
560 call kernel_\cfunc
561
562 /*
563 * No need to switch back to the IST stack. The current stack is either
564 * identical to the stack in the IRET frame or the VC fall-back stack,
565 * so it is definitely mapped even with PTI enabled.
566 */
567 jmp paranoid_exit
568
569 /* Switch to the regular task stack */
570 .Lfrom_usermode_switch_stack_\@:
571 idtentry_body user_\cfunc, has_error_code=1
572
573 _ASM_NOKPROBE(\asmsym)
574 SYM_CODE_END(\asmsym)
575 .endm
576 #endif
577
578 /*
579 * Double fault entry. Straight paranoid. No checks from which context
580 * this comes because for the espfix induced #DF this would do the wrong
581 * thing.
582 */
583 .macro idtentry_df vector asmsym cfunc
584 SYM_CODE_START(\asmsym)
585 UNWIND_HINT_IRET_ENTRY offset=8
586 ENDBR
587 ASM_CLAC
588 cld
589
590 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
591 call paranoid_entry
592 UNWIND_HINT_REGS
593
594 movq %rsp, %rdi /* pt_regs pointer into first argument */
595 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
596 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
597 call \cfunc
598
599 /* For some configurations \cfunc ends up being a noreturn. */
600 REACHABLE
601
602 jmp paranoid_exit
603
604 _ASM_NOKPROBE(\asmsym)
605 SYM_CODE_END(\asmsym)
606 .endm
607
608 /*
609 * Include the defines which emit the idt entries which are shared
610 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
611 * so the stacktrace boundary checks work.
612 */
613 __ALIGN
614 .globl __irqentry_text_start
615 __irqentry_text_start:
616
617 #include <asm/idtentry.h>
618
619 __ALIGN
620 .globl __irqentry_text_end
621 __irqentry_text_end:
622 ANNOTATE_NOENDBR
623
624 SYM_CODE_START_LOCAL(common_interrupt_return)
625 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
626 IBRS_EXIT
627 #ifdef CONFIG_DEBUG_ENTRY
628 /* Assert that pt_regs indicates user mode. */
629 testb $3, CS(%rsp)
630 jnz 1f
631 ud2
632 1:
633 #endif
634 #ifdef CONFIG_XEN_PV
635 ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
636 #endif
637
638 POP_REGS pop_rdi=0
639
640 /*
641 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
642 * Save old stack pointer and switch to trampoline stack.
643 */
644 movq %rsp, %rdi
645 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
646 UNWIND_HINT_END_OF_STACK
647
648 /* Copy the IRET frame to the trampoline stack. */
649 pushq 6*8(%rdi) /* SS */
650 pushq 5*8(%rdi) /* RSP */
651 pushq 4*8(%rdi) /* EFLAGS */
652 pushq 3*8(%rdi) /* CS */
653 pushq 2*8(%rdi) /* RIP */
654
655 /* Push user RDI on the trampoline stack. */
656 pushq (%rdi)
657
658 /*
659 * We are on the trampoline stack. All regs except RDI are live.
660 * We can do future final exit work right here.
661 */
662 STACKLEAK_ERASE_NOCLOBBER
663
664 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
665
666 /* Restore RDI. */
667 popq %rdi
668 swapgs
669 jmp .Lnative_iret
670
671
672 SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
673 #ifdef CONFIG_DEBUG_ENTRY
674 /* Assert that pt_regs indicates kernel mode. */
675 testb $3, CS(%rsp)
676 jz 1f
677 ud2
678 1:
679 #endif
680 POP_REGS
681 addq $8, %rsp /* skip regs->orig_ax */
682 /*
683 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
684 * when returning from IPI handler.
685 */
686 #ifdef CONFIG_XEN_PV
687 SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
688 ANNOTATE_NOENDBR
689 .byte 0xe9
690 .long .Lnative_iret - (. + 4)
691 #endif
692
693 .Lnative_iret:
694 UNWIND_HINT_IRET_REGS
695 /*
696 * Are we returning to a stack segment from the LDT? Note: in
697 * 64-bit mode SS:RSP on the exception stack is always valid.
698 */
699 #ifdef CONFIG_X86_ESPFIX64
700 testb $4, (SS-RIP)(%rsp)
701 jnz native_irq_return_ldt
702 #endif
703
704 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
705 ANNOTATE_NOENDBR // exc_double_fault
706 /*
707 * This may fault. Non-paranoid faults on return to userspace are
708 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
709 * Double-faults due to espfix64 are handled in exc_double_fault.
710 * Other faults here are fatal.
711 */
712 iretq
713
714 #ifdef CONFIG_X86_ESPFIX64
715 native_irq_return_ldt:
716 /*
717 * We are running with user GSBASE. All GPRs contain their user
718 * values. We have a percpu ESPFIX stack that is eight slots
719 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
720 * of the ESPFIX stack.
721 *
722 * We clobber RAX and RDI in this code. We stash RDI on the
723 * normal stack and RAX on the ESPFIX stack.
724 *
725 * The ESPFIX stack layout we set up looks like this:
726 *
727 * --- top of ESPFIX stack ---
728 * SS
729 * RSP
730 * RFLAGS
731 * CS
732 * RIP <-- RSP points here when we're done
733 * RAX <-- espfix_waddr points here
734 * --- bottom of ESPFIX stack ---
735 */
736
737 pushq %rdi /* Stash user RDI */
738 swapgs /* to kernel GS */
739 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
740
741 movq PER_CPU_VAR(espfix_waddr), %rdi
742 movq %rax, (0*8)(%rdi) /* user RAX */
743 movq (1*8)(%rsp), %rax /* user RIP */
744 movq %rax, (1*8)(%rdi)
745 movq (2*8)(%rsp), %rax /* user CS */
746 movq %rax, (2*8)(%rdi)
747 movq (3*8)(%rsp), %rax /* user RFLAGS */
748 movq %rax, (3*8)(%rdi)
749 movq (5*8)(%rsp), %rax /* user SS */
750 movq %rax, (5*8)(%rdi)
751 movq (4*8)(%rsp), %rax /* user RSP */
752 movq %rax, (4*8)(%rdi)
753 /* Now RAX == RSP. */
754
755 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
756
757 /*
758 * espfix_stack[31:16] == 0. The page tables are set up such that
759 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
760 * espfix_waddr for any X. That is, there are 65536 RO aliases of
761 * the same page. Set up RSP so that RSP[31:16] contains the
762 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
763 * still points to an RO alias of the ESPFIX stack.
764 */
765 orq PER_CPU_VAR(espfix_stack), %rax
766
767 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
768 swapgs /* to user GS */
769 popq %rdi /* Restore user RDI */
770
771 movq %rax, %rsp
772 UNWIND_HINT_IRET_REGS offset=8
773
774 /*
775 * At this point, we cannot write to the stack any more, but we can
776 * still read.
777 */
778 popq %rax /* Restore user RAX */
779
780 /*
781 * RSP now points to an ordinary IRET frame, except that the page
782 * is read-only and RSP[31:16] are preloaded with the userspace
783 * values. We can now IRET back to userspace.
784 */
785 jmp native_irq_return_iret
786 #endif
787 SYM_CODE_END(common_interrupt_return)
788 _ASM_NOKPROBE(common_interrupt_return)
789
790 /*
791 * Reload gs selector with exception handling
792 * di: new selector
793 *
794 * Is in entry.text as it shouldn't be instrumented.
795 */
796 SYM_FUNC_START(asm_load_gs_index)
797 FRAME_BEGIN
798 swapgs
799 .Lgs_change:
800 ANNOTATE_NOENDBR // error_entry
801 movl %edi, %gs
802 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
803 swapgs
804 FRAME_END
805 RET
806
807 /* running with kernelgs */
808 .Lbad_gs:
809 swapgs /* switch back to user gs */
810 .macro ZAP_GS
811 /* This can't be a string because the preprocessor needs to see it. */
812 movl $__USER_DS, %eax
813 movl %eax, %gs
814 .endm
815 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
816 xorl %eax, %eax
817 movl %eax, %gs
818 jmp 2b
819
820 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
821
822 SYM_FUNC_END(asm_load_gs_index)
823 EXPORT_SYMBOL(asm_load_gs_index)
824
825 #ifdef CONFIG_XEN_PV
826 /*
827 * A note on the "critical region" in our callback handler.
828 * We want to avoid stacking callback handlers due to events occurring
829 * during handling of the last event. To do this, we keep events disabled
830 * until we've done all processing. HOWEVER, we must enable events before
831 * popping the stack frame (can't be done atomically) and so it would still
832 * be possible to get enough handler activations to overflow the stack.
833 * Although unlikely, bugs of that kind are hard to track down, so we'd
834 * like to avoid the possibility.
835 * So, on entry to the handler we detect whether we interrupted an
836 * existing activation in its critical region -- if so, we pop the current
837 * activation and restart the handler using the previous one.
838 *
839 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
840 */
841 __FUNC_ALIGN
842 SYM_CODE_START_LOCAL_NOALIGN(exc_xen_hypervisor_callback)
843
844 /*
845 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
846 * see the correct pointer to the pt_regs
847 */
848 UNWIND_HINT_FUNC
849 movq %rdi, %rsp /* we don't return, adjust the stack frame */
850 UNWIND_HINT_REGS
851
852 call xen_pv_evtchn_do_upcall
853
854 jmp error_return
855 SYM_CODE_END(exc_xen_hypervisor_callback)
856
857 /*
858 * Hypervisor uses this for application faults while it executes.
859 * We get here for two reasons:
860 * 1. Fault while reloading DS, ES, FS or GS
861 * 2. Fault while executing IRET
862 * Category 1 we do not need to fix up as Xen has already reloaded all segment
863 * registers that could be reloaded and zeroed the others.
864 * Category 2 we fix up by killing the current process. We cannot use the
865 * normal Linux return path in this case because if we use the IRET hypercall
866 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
867 * We distinguish between categories by comparing each saved segment register
868 * with its current contents: any discrepancy means we in category 1.
869 */
870 __FUNC_ALIGN
871 SYM_CODE_START_NOALIGN(xen_failsafe_callback)
872 UNWIND_HINT_UNDEFINED
873 ENDBR
874 movl %ds, %ecx
875 cmpw %cx, 0x10(%rsp)
876 jne 1f
877 movl %es, %ecx
878 cmpw %cx, 0x18(%rsp)
879 jne 1f
880 movl %fs, %ecx
881 cmpw %cx, 0x20(%rsp)
882 jne 1f
883 movl %gs, %ecx
884 cmpw %cx, 0x28(%rsp)
885 jne 1f
886 /* All segments match their saved values => Category 2 (Bad IRET). */
887 movq (%rsp), %rcx
888 movq 8(%rsp), %r11
889 addq $0x30, %rsp
890 pushq $0 /* RIP */
891 UNWIND_HINT_IRET_REGS offset=8
892 jmp asm_exc_general_protection
893 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
894 movq (%rsp), %rcx
895 movq 8(%rsp), %r11
896 addq $0x30, %rsp
897 UNWIND_HINT_IRET_REGS
898 pushq $-1 /* orig_ax = -1 => not a system call */
899 PUSH_AND_CLEAR_REGS
900 ENCODE_FRAME_POINTER
901 jmp error_return
902 SYM_CODE_END(xen_failsafe_callback)
903 #endif /* CONFIG_XEN_PV */
904
905 /*
906 * Save all registers in pt_regs. Return GSBASE related information
907 * in EBX depending on the availability of the FSGSBASE instructions:
908 *
909 * FSGSBASE R/EBX
910 * N 0 -> SWAPGS on exit
911 * 1 -> no SWAPGS on exit
912 *
913 * Y GSBASE value at entry, must be restored in paranoid_exit
914 *
915 * R14 - old CR3
916 * R15 - old SPEC_CTRL
917 */
918 SYM_CODE_START(paranoid_entry)
919 ANNOTATE_NOENDBR
920 UNWIND_HINT_FUNC
921 PUSH_AND_CLEAR_REGS save_ret=1
922 ENCODE_FRAME_POINTER 8
923
924 /*
925 * Always stash CR3 in %r14. This value will be restored,
926 * verbatim, at exit. Needed if paranoid_entry interrupted
927 * another entry that already switched to the user CR3 value
928 * but has not yet returned to userspace.
929 *
930 * This is also why CS (stashed in the "iret frame" by the
931 * hardware at entry) can not be used: this may be a return
932 * to kernel code, but with a user CR3 value.
933 *
934 * Switching CR3 does not depend on kernel GSBASE so it can
935 * be done before switching to the kernel GSBASE. This is
936 * required for FSGSBASE because the kernel GSBASE has to
937 * be retrieved from a kernel internal table.
938 */
939 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
940
941 /*
942 * Handling GSBASE depends on the availability of FSGSBASE.
943 *
944 * Without FSGSBASE the kernel enforces that negative GSBASE
945 * values indicate kernel GSBASE. With FSGSBASE no assumptions
946 * can be made about the GSBASE value when entering from user
947 * space.
948 */
949 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
950
951 /*
952 * Read the current GSBASE and store it in %rbx unconditionally,
953 * retrieve and set the current CPUs kernel GSBASE. The stored value
954 * has to be restored in paranoid_exit unconditionally.
955 *
956 * The unconditional write to GS base below ensures that no subsequent
957 * loads based on a mispredicted GS base can happen, therefore no LFENCE
958 * is needed here.
959 */
960 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
961 jmp .Lparanoid_gsbase_done
962
963 .Lparanoid_entry_checkgs:
964 /* EBX = 1 -> kernel GSBASE active, no restore required */
965 movl $1, %ebx
966
967 /*
968 * The kernel-enforced convention is a negative GSBASE indicates
969 * a kernel value. No SWAPGS needed on entry and exit.
970 */
971 movl $MSR_GS_BASE, %ecx
972 rdmsr
973 testl %edx, %edx
974 js .Lparanoid_kernel_gsbase
975
976 /* EBX = 0 -> SWAPGS required on exit */
977 xorl %ebx, %ebx
978 swapgs
979 .Lparanoid_kernel_gsbase:
980 FENCE_SWAPGS_KERNEL_ENTRY
981 .Lparanoid_gsbase_done:
982
983 /*
984 * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
985 * CR3 above, keep the old value in a callee saved register.
986 */
987 IBRS_ENTER save_reg=%r15
988 UNTRAIN_RET_FROM_CALL
989
990 RET
991 SYM_CODE_END(paranoid_entry)
992
993 /*
994 * "Paranoid" exit path from exception stack. This is invoked
995 * only on return from non-NMI IST interrupts that came
996 * from kernel space.
997 *
998 * We may be returning to very strange contexts (e.g. very early
999 * in syscall entry), so checking for preemption here would
1000 * be complicated. Fortunately, there's no good reason to try
1001 * to handle preemption here.
1002 *
1003 * R/EBX contains the GSBASE related information depending on the
1004 * availability of the FSGSBASE instructions:
1005 *
1006 * FSGSBASE R/EBX
1007 * N 0 -> SWAPGS on exit
1008 * 1 -> no SWAPGS on exit
1009 *
1010 * Y User space GSBASE, must be restored unconditionally
1011 *
1012 * R14 - old CR3
1013 * R15 - old SPEC_CTRL
1014 */
1015 SYM_CODE_START_LOCAL(paranoid_exit)
1016 UNWIND_HINT_REGS
1017
1018 /*
1019 * Must restore IBRS state before both CR3 and %GS since we need access
1020 * to the per-CPU x86_spec_ctrl_shadow variable.
1021 */
1022 IBRS_EXIT save_reg=%r15
1023
1024 /*
1025 * The order of operations is important. RESTORE_CR3 requires
1026 * kernel GSBASE.
1027 *
1028 * NB to anyone to try to optimize this code: this code does
1029 * not execute at all for exceptions from user mode. Those
1030 * exceptions go through error_return instead.
1031 */
1032 RESTORE_CR3 scratch_reg=%rax save_reg=%r14
1033
1034 /* Handle the three GSBASE cases */
1035 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
1036
1037 /* With FSGSBASE enabled, unconditionally restore GSBASE */
1038 wrgsbase %rbx
1039 jmp restore_regs_and_return_to_kernel
1040
1041 .Lparanoid_exit_checkgs:
1042 /* On non-FSGSBASE systems, conditionally do SWAPGS */
1043 testl %ebx, %ebx
1044 jnz restore_regs_and_return_to_kernel
1045
1046 /* We are returning to a context with user GSBASE */
1047 swapgs
1048 jmp restore_regs_and_return_to_kernel
1049 SYM_CODE_END(paranoid_exit)
1050
1051 /*
1052 * Switch GS and CR3 if needed.
1053 */
1054 SYM_CODE_START(error_entry)
1055 ANNOTATE_NOENDBR
1056 UNWIND_HINT_FUNC
1057
1058 PUSH_AND_CLEAR_REGS save_ret=1
1059 ENCODE_FRAME_POINTER 8
1060
1061 testb $3, CS+8(%rsp)
1062 jz .Lerror_kernelspace
1063
1064 /*
1065 * We entered from user mode or we're pretending to have entered
1066 * from user mode due to an IRET fault.
1067 */
1068 swapgs
1069 FENCE_SWAPGS_USER_ENTRY
1070 /* We have user CR3. Change to kernel CR3. */
1071 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1072 IBRS_ENTER
1073 UNTRAIN_RET_FROM_CALL
1074
1075 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1076 /* Put us onto the real thread stack. */
1077 jmp sync_regs
1078
1079 /*
1080 * There are two places in the kernel that can potentially fault with
1081 * usergs. Handle them here. B stepping K8s sometimes report a
1082 * truncated RIP for IRET exceptions returning to compat mode. Check
1083 * for these here too.
1084 */
1085 .Lerror_kernelspace:
1086 leaq native_irq_return_iret(%rip), %rcx
1087 cmpq %rcx, RIP+8(%rsp)
1088 je .Lerror_bad_iret
1089 movl %ecx, %eax /* zero extend */
1090 cmpq %rax, RIP+8(%rsp)
1091 je .Lbstep_iret
1092 cmpq $.Lgs_change, RIP+8(%rsp)
1093 jne .Lerror_entry_done_lfence
1094
1095 /*
1096 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1097 * gsbase and proceed. We'll fix up the exception and land in
1098 * .Lgs_change's error handler with kernel gsbase.
1099 */
1100 swapgs
1101
1102 /*
1103 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1104 * kernel or user gsbase.
1105 */
1106 .Lerror_entry_done_lfence:
1107 FENCE_SWAPGS_KERNEL_ENTRY
1108 CALL_DEPTH_ACCOUNT
1109 leaq 8(%rsp), %rax /* return pt_regs pointer */
1110 VALIDATE_UNRET_END
1111 RET
1112
1113 .Lbstep_iret:
1114 /* Fix truncated RIP */
1115 movq %rcx, RIP+8(%rsp)
1116 /* fall through */
1117
1118 .Lerror_bad_iret:
1119 /*
1120 * We came from an IRET to user mode, so we have user
1121 * gsbase and CR3. Switch to kernel gsbase and CR3:
1122 */
1123 swapgs
1124 FENCE_SWAPGS_USER_ENTRY
1125 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1126 IBRS_ENTER
1127 UNTRAIN_RET_FROM_CALL
1128
1129 /*
1130 * Pretend that the exception came from user mode: set up pt_regs
1131 * as if we faulted immediately after IRET.
1132 */
1133 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1134 call fixup_bad_iret
1135 mov %rax, %rdi
1136 jmp sync_regs
1137 SYM_CODE_END(error_entry)
1138
1139 SYM_CODE_START_LOCAL(error_return)
1140 UNWIND_HINT_REGS
1141 DEBUG_ENTRY_ASSERT_IRQS_OFF
1142 testb $3, CS(%rsp)
1143 jz restore_regs_and_return_to_kernel
1144 jmp swapgs_restore_regs_and_return_to_usermode
1145 SYM_CODE_END(error_return)
1146
1147 /*
1148 * Runs on exception stack. Xen PV does not go through this path at all,
1149 * so we can use real assembly here.
1150 *
1151 * Registers:
1152 * %r14: Used to save/restore the CR3 of the interrupted context
1153 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1154 */
1155 SYM_CODE_START(asm_exc_nmi)
1156 UNWIND_HINT_IRET_ENTRY
1157 ENDBR
1158
1159 /*
1160 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1161 * the iretq it performs will take us out of NMI context.
1162 * This means that we can have nested NMIs where the next
1163 * NMI is using the top of the stack of the previous NMI. We
1164 * can't let it execute because the nested NMI will corrupt the
1165 * stack of the previous NMI. NMI handlers are not re-entrant
1166 * anyway.
1167 *
1168 * To handle this case we do the following:
1169 * Check the a special location on the stack that contains
1170 * a variable that is set when NMIs are executing.
1171 * The interrupted task's stack is also checked to see if it
1172 * is an NMI stack.
1173 * If the variable is not set and the stack is not the NMI
1174 * stack then:
1175 * o Set the special variable on the stack
1176 * o Copy the interrupt frame into an "outermost" location on the
1177 * stack
1178 * o Copy the interrupt frame into an "iret" location on the stack
1179 * o Continue processing the NMI
1180 * If the variable is set or the previous stack is the NMI stack:
1181 * o Modify the "iret" location to jump to the repeat_nmi
1182 * o return back to the first NMI
1183 *
1184 * Now on exit of the first NMI, we first clear the stack variable
1185 * The NMI stack will tell any nested NMIs at that point that it is
1186 * nested. Then we pop the stack normally with iret, and if there was
1187 * a nested NMI that updated the copy interrupt stack frame, a
1188 * jump will be made to the repeat_nmi code that will handle the second
1189 * NMI.
1190 *
1191 * However, espfix prevents us from directly returning to userspace
1192 * with a single IRET instruction. Similarly, IRET to user mode
1193 * can fault. We therefore handle NMIs from user space like
1194 * other IST entries.
1195 */
1196
1197 ASM_CLAC
1198 cld
1199
1200 /* Use %rdx as our temp variable throughout */
1201 pushq %rdx
1202
1203 testb $3, CS-RIP+8(%rsp)
1204 jz .Lnmi_from_kernel
1205
1206 /*
1207 * NMI from user mode. We need to run on the thread stack, but we
1208 * can't go through the normal entry paths: NMIs are masked, and
1209 * we don't want to enable interrupts, because then we'll end
1210 * up in an awkward situation in which IRQs are on but NMIs
1211 * are off.
1212 *
1213 * We also must not push anything to the stack before switching
1214 * stacks lest we corrupt the "NMI executing" variable.
1215 */
1216
1217 swapgs
1218 FENCE_SWAPGS_USER_ENTRY
1219 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1220 movq %rsp, %rdx
1221 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
1222 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1223 pushq 5*8(%rdx) /* pt_regs->ss */
1224 pushq 4*8(%rdx) /* pt_regs->rsp */
1225 pushq 3*8(%rdx) /* pt_regs->flags */
1226 pushq 2*8(%rdx) /* pt_regs->cs */
1227 pushq 1*8(%rdx) /* pt_regs->rip */
1228 UNWIND_HINT_IRET_REGS
1229 pushq $-1 /* pt_regs->orig_ax */
1230 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1231 ENCODE_FRAME_POINTER
1232
1233 IBRS_ENTER
1234 UNTRAIN_RET
1235
1236 /*
1237 * At this point we no longer need to worry about stack damage
1238 * due to nesting -- we're on the normal thread stack and we're
1239 * done with the NMI stack.
1240 */
1241
1242 movq %rsp, %rdi
1243 movq $-1, %rsi
1244 call exc_nmi
1245
1246 /*
1247 * Return back to user mode. We must *not* do the normal exit
1248 * work, because we don't want to enable interrupts.
1249 */
1250 jmp swapgs_restore_regs_and_return_to_usermode
1251
1252 .Lnmi_from_kernel:
1253 /*
1254 * Here's what our stack frame will look like:
1255 * +---------------------------------------------------------+
1256 * | original SS |
1257 * | original Return RSP |
1258 * | original RFLAGS |
1259 * | original CS |
1260 * | original RIP |
1261 * +---------------------------------------------------------+
1262 * | temp storage for rdx |
1263 * +---------------------------------------------------------+
1264 * | "NMI executing" variable |
1265 * +---------------------------------------------------------+
1266 * | iret SS } Copied from "outermost" frame |
1267 * | iret Return RSP } on each loop iteration; overwritten |
1268 * | iret RFLAGS } by a nested NMI to force another |
1269 * | iret CS } iteration if needed. |
1270 * | iret RIP } |
1271 * +---------------------------------------------------------+
1272 * | outermost SS } initialized in first_nmi; |
1273 * | outermost Return RSP } will not be changed before |
1274 * | outermost RFLAGS } NMI processing is done. |
1275 * | outermost CS } Copied to "iret" frame on each |
1276 * | outermost RIP } iteration. |
1277 * +---------------------------------------------------------+
1278 * | pt_regs |
1279 * +---------------------------------------------------------+
1280 *
1281 * The "original" frame is used by hardware. Before re-enabling
1282 * NMIs, we need to be done with it, and we need to leave enough
1283 * space for the asm code here.
1284 *
1285 * We return by executing IRET while RSP points to the "iret" frame.
1286 * That will either return for real or it will loop back into NMI
1287 * processing.
1288 *
1289 * The "outermost" frame is copied to the "iret" frame on each
1290 * iteration of the loop, so each iteration starts with the "iret"
1291 * frame pointing to the final return target.
1292 */
1293
1294 /*
1295 * Determine whether we're a nested NMI.
1296 *
1297 * If we interrupted kernel code between repeat_nmi and
1298 * end_repeat_nmi, then we are a nested NMI. We must not
1299 * modify the "iret" frame because it's being written by
1300 * the outer NMI. That's okay; the outer NMI handler is
1301 * about to about to call exc_nmi() anyway, so we can just
1302 * resume the outer NMI.
1303 */
1304
1305 movq $repeat_nmi, %rdx
1306 cmpq 8(%rsp), %rdx
1307 ja 1f
1308 movq $end_repeat_nmi, %rdx
1309 cmpq 8(%rsp), %rdx
1310 ja nested_nmi_out
1311 1:
1312
1313 /*
1314 * Now check "NMI executing". If it's set, then we're nested.
1315 * This will not detect if we interrupted an outer NMI just
1316 * before IRET.
1317 */
1318 cmpl $1, -8(%rsp)
1319 je nested_nmi
1320
1321 /*
1322 * Now test if the previous stack was an NMI stack. This covers
1323 * the case where we interrupt an outer NMI after it clears
1324 * "NMI executing" but before IRET. We need to be careful, though:
1325 * there is one case in which RSP could point to the NMI stack
1326 * despite there being no NMI active: naughty userspace controls
1327 * RSP at the very beginning of the SYSCALL targets. We can
1328 * pull a fast one on naughty userspace, though: we program
1329 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1330 * if it controls the kernel's RSP. We set DF before we clear
1331 * "NMI executing".
1332 */
1333 lea 6*8(%rsp), %rdx
1334 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1335 cmpq %rdx, 4*8(%rsp)
1336 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1337 ja first_nmi
1338
1339 subq $EXCEPTION_STKSZ, %rdx
1340 cmpq %rdx, 4*8(%rsp)
1341 /* If it is below the NMI stack, it is a normal NMI */
1342 jb first_nmi
1343
1344 /* Ah, it is within the NMI stack. */
1345
1346 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1347 jz first_nmi /* RSP was user controlled. */
1348
1349 /* This is a nested NMI. */
1350
1351 nested_nmi:
1352 /*
1353 * Modify the "iret" frame to point to repeat_nmi, forcing another
1354 * iteration of NMI handling.
1355 */
1356 subq $8, %rsp
1357 leaq -10*8(%rsp), %rdx
1358 pushq $__KERNEL_DS
1359 pushq %rdx
1360 pushfq
1361 pushq $__KERNEL_CS
1362 pushq $repeat_nmi
1363
1364 /* Put stack back */
1365 addq $(6*8), %rsp
1366
1367 nested_nmi_out:
1368 popq %rdx
1369
1370 /* We are returning to kernel mode, so this cannot result in a fault. */
1371 iretq
1372
1373 first_nmi:
1374 /* Restore rdx. */
1375 movq (%rsp), %rdx
1376
1377 /* Make room for "NMI executing". */
1378 pushq $0
1379
1380 /* Leave room for the "iret" frame */
1381 subq $(5*8), %rsp
1382
1383 /* Copy the "original" frame to the "outermost" frame */
1384 .rept 5
1385 pushq 11*8(%rsp)
1386 .endr
1387 UNWIND_HINT_IRET_REGS
1388
1389 /* Everything up to here is safe from nested NMIs */
1390
1391 #ifdef CONFIG_DEBUG_ENTRY
1392 /*
1393 * For ease of testing, unmask NMIs right away. Disabled by
1394 * default because IRET is very expensive.
1395 */
1396 pushq $0 /* SS */
1397 pushq %rsp /* RSP (minus 8 because of the previous push) */
1398 addq $8, (%rsp) /* Fix up RSP */
1399 pushfq /* RFLAGS */
1400 pushq $__KERNEL_CS /* CS */
1401 pushq $1f /* RIP */
1402 iretq /* continues at repeat_nmi below */
1403 UNWIND_HINT_IRET_REGS
1404 1:
1405 #endif
1406
1407 repeat_nmi:
1408 ANNOTATE_NOENDBR // this code
1409 /*
1410 * If there was a nested NMI, the first NMI's iret will return
1411 * here. But NMIs are still enabled and we can take another
1412 * nested NMI. The nested NMI checks the interrupted RIP to see
1413 * if it is between repeat_nmi and end_repeat_nmi, and if so
1414 * it will just return, as we are about to repeat an NMI anyway.
1415 * This makes it safe to copy to the stack frame that a nested
1416 * NMI will update.
1417 *
1418 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1419 * we're repeating an NMI, gsbase has the same value that it had on
1420 * the first iteration. paranoid_entry will load the kernel
1421 * gsbase if needed before we call exc_nmi(). "NMI executing"
1422 * is zero.
1423 */
1424 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1425
1426 /*
1427 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1428 * here must not modify the "iret" frame while we're writing to
1429 * it or it will end up containing garbage.
1430 */
1431 addq $(10*8), %rsp
1432 .rept 5
1433 pushq -6*8(%rsp)
1434 .endr
1435 subq $(5*8), %rsp
1436 end_repeat_nmi:
1437 ANNOTATE_NOENDBR // this code
1438
1439 /*
1440 * Everything below this point can be preempted by a nested NMI.
1441 * If this happens, then the inner NMI will change the "iret"
1442 * frame to point back to repeat_nmi.
1443 */
1444 pushq $-1 /* ORIG_RAX: no syscall to restart */
1445
1446 /*
1447 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1448 * as we should not be calling schedule in NMI context.
1449 * Even with normal interrupts enabled. An NMI should not be
1450 * setting NEED_RESCHED or anything that normal interrupts and
1451 * exceptions might do.
1452 */
1453 call paranoid_entry
1454 UNWIND_HINT_REGS
1455
1456 movq %rsp, %rdi
1457 movq $-1, %rsi
1458 call exc_nmi
1459
1460 /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
1461 IBRS_EXIT save_reg=%r15
1462
1463 /* Always restore stashed CR3 value (see paranoid_entry) */
1464 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1465
1466 /*
1467 * The above invocation of paranoid_entry stored the GSBASE
1468 * related information in R/EBX depending on the availability
1469 * of FSGSBASE.
1470 *
1471 * If FSGSBASE is enabled, restore the saved GSBASE value
1472 * unconditionally, otherwise take the conditional SWAPGS path.
1473 */
1474 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1475
1476 wrgsbase %rbx
1477 jmp nmi_restore
1478
1479 nmi_no_fsgsbase:
1480 /* EBX == 0 -> invoke SWAPGS */
1481 testl %ebx, %ebx
1482 jnz nmi_restore
1483
1484 nmi_swapgs:
1485 swapgs
1486
1487 nmi_restore:
1488 POP_REGS
1489
1490 /*
1491 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1492 * at the "iret" frame.
1493 */
1494 addq $6*8, %rsp
1495
1496 /*
1497 * Clear "NMI executing". Set DF first so that we can easily
1498 * distinguish the remaining code between here and IRET from
1499 * the SYSCALL entry and exit paths.
1500 *
1501 * We arguably should just inspect RIP instead, but I (Andy) wrote
1502 * this code when I had the misapprehension that Xen PV supported
1503 * NMIs, and Xen PV would break that approach.
1504 */
1505 std
1506 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1507
1508 /*
1509 * iretq reads the "iret" frame and exits the NMI stack in a
1510 * single instruction. We are returning to kernel mode, so this
1511 * cannot result in a fault. Similarly, we don't need to worry
1512 * about espfix64 on the way back to kernel mode.
1513 */
1514 iretq
1515 SYM_CODE_END(asm_exc_nmi)
1516
1517 #ifndef CONFIG_IA32_EMULATION
1518 /*
1519 * This handles SYSCALL from 32-bit code. There is no way to program
1520 * MSRs to fully disable 32-bit SYSCALL.
1521 */
1522 SYM_CODE_START(ignore_sysret)
1523 UNWIND_HINT_END_OF_STACK
1524 ENDBR
1525 mov $-ENOSYS, %eax
1526 sysretl
1527 SYM_CODE_END(ignore_sysret)
1528 #endif
1529
1530 .pushsection .text, "ax"
1531 __FUNC_ALIGN
1532 SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
1533 UNWIND_HINT_FUNC
1534 /* Prevent any naive code from trying to unwind to our caller. */
1535 xorl %ebp, %ebp
1536
1537 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
1538 leaq -PTREGS_SIZE(%rax), %rsp
1539 UNWIND_HINT_REGS
1540
1541 call make_task_dead
1542 SYM_CODE_END(rewind_stack_and_make_dead)
1543 .popsection