]> git.ipfire.org Git - people/arne_f/kernel.git/blob - arch/x86/kernel/entry_32.S
Merge tag 'v3.10.50' into linux-3.10.x-grsecurity-2.9.1
[people/arne_f/kernel.git] / arch / x86 / kernel / entry_32.S
1 /*
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6 /*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
32 * 24(%esp) - %fs
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
44 #include <linux/linkage.h>
45 #include <linux/err.h>
46 #include <asm/thread_info.h>
47 #include <asm/irqflags.h>
48 #include <asm/errno.h>
49 #include <asm/segment.h>
50 #include <asm/smp.h>
51 #include <asm/page_types.h>
52 #include <asm/percpu.h>
53 #include <asm/dwarf2.h>
54 #include <asm/processor-flags.h>
55 #include <asm/ftrace.h>
56 #include <asm/irq_vectors.h>
57 #include <asm/cpufeature.h>
58 #include <asm/alternative-asm.h>
59 #include <asm/asm.h>
60 #include <asm/smap.h>
61
62 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
63 #include <linux/elf-em.h>
64 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
65 #define __AUDIT_ARCH_LE 0x40000000
66
67 #ifndef CONFIG_AUDITSYSCALL
68 #define sysenter_audit syscall_trace_entry
69 #define sysexit_audit syscall_exit_work
70 #endif
71
72 .section .entry.text, "ax"
73
74 /*
75 * We use macros for low-level operations which need to be overridden
76 * for paravirtualization. The following will never clobber any registers:
77 * INTERRUPT_RETURN (aka. "iret")
78 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
79 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
80 *
81 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
82 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
83 * Allowing a register to be clobbered can shrink the paravirt replacement
84 * enough to patch inline, increasing performance.
85 */
86
87 #ifdef CONFIG_PREEMPT
88 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89 #else
90 #define preempt_stop(clobbers)
91 #define resume_kernel restore_all
92 #endif
93
94 .macro TRACE_IRQS_IRET
95 #ifdef CONFIG_TRACE_IRQFLAGS
96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
97 jz 1f
98 TRACE_IRQS_ON
99 1:
100 #endif
101 .endm
102
103 /*
104 * User gs save/restore
105 *
106 * %gs is used for userland TLS and kernel only uses it for stack
107 * canary which is required to be at %gs:20 by gcc. Read the comment
108 * at the top of stackprotector.h for more info.
109 *
110 * Local labels 98 and 99 are used.
111 */
112 #ifdef CONFIG_X86_32_LAZY_GS
113
114 /* unfortunately push/pop can't be no-op */
115 .macro PUSH_GS
116 pushl_cfi $0
117 .endm
118 .macro POP_GS pop=0
119 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121 .endm
122 .macro POP_GS_EX
123 .endm
124
125 /* all the rest are no-op */
126 .macro PTGS_TO_GS
127 .endm
128 .macro PTGS_TO_GS_EX
129 .endm
130 .macro GS_TO_REG reg
131 .endm
132 .macro REG_TO_PTGS reg
133 .endm
134 .macro SET_KERNEL_GS reg
135 .endm
136
137 #else /* CONFIG_X86_32_LAZY_GS */
138
139 .macro PUSH_GS
140 pushl_cfi %gs
141 /*CFI_REL_OFFSET gs, 0*/
142 .endm
143
144 .macro POP_GS pop=0
145 98: popl_cfi %gs
146 /*CFI_RESTORE gs*/
147 .if \pop <> 0
148 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif
151 .endm
152 .macro POP_GS_EX
153 .pushsection .fixup, "ax"
154 99: movl $0, (%esp)
155 jmp 98b
156 .popsection
157 _ASM_EXTABLE(98b,99b)
158 .endm
159
160 .macro PTGS_TO_GS
161 98: mov PT_GS(%esp), %gs
162 .endm
163 .macro PTGS_TO_GS_EX
164 .pushsection .fixup, "ax"
165 99: movl $0, PT_GS(%esp)
166 jmp 98b
167 .popsection
168 _ASM_EXTABLE(98b,99b)
169 .endm
170
171 .macro GS_TO_REG reg
172 movl %gs, \reg
173 /*CFI_REGISTER gs, \reg*/
174 .endm
175 .macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp)
177 /*CFI_REL_OFFSET gs, PT_GS*/
178 .endm
179 .macro SET_KERNEL_GS reg
180
181 #ifdef CONFIG_CC_STACKPROTECTOR
182 movl $(__KERNEL_STACK_CANARY), \reg
183 #elif defined(CONFIG_PAX_MEMORY_UDEREF)
184 movl $(__USER_DS), \reg
185 #else
186 xorl \reg, \reg
187 #endif
188
189 movl \reg, %gs
190 .endm
191
192 #endif /* CONFIG_X86_32_LAZY_GS */
193
194 .macro pax_enter_kernel
195 #ifdef CONFIG_PAX_KERNEXEC
196 call pax_enter_kernel
197 #endif
198 .endm
199
200 .macro pax_exit_kernel
201 #ifdef CONFIG_PAX_KERNEXEC
202 call pax_exit_kernel
203 #endif
204 .endm
205
206 #ifdef CONFIG_PAX_KERNEXEC
207 ENTRY(pax_enter_kernel)
208 #ifdef CONFIG_PARAVIRT
209 pushl %eax
210 pushl %ecx
211 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
212 mov %eax, %esi
213 #else
214 mov %cr0, %esi
215 #endif
216 bts $16, %esi
217 jnc 1f
218 mov %cs, %esi
219 cmp $__KERNEL_CS, %esi
220 jz 3f
221 ljmp $__KERNEL_CS, $3f
222 1: ljmp $__KERNEXEC_KERNEL_CS, $2f
223 2:
224 #ifdef CONFIG_PARAVIRT
225 mov %esi, %eax
226 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
227 #else
228 mov %esi, %cr0
229 #endif
230 3:
231 #ifdef CONFIG_PARAVIRT
232 popl %ecx
233 popl %eax
234 #endif
235 ret
236 ENDPROC(pax_enter_kernel)
237
238 ENTRY(pax_exit_kernel)
239 #ifdef CONFIG_PARAVIRT
240 pushl %eax
241 pushl %ecx
242 #endif
243 mov %cs, %esi
244 cmp $__KERNEXEC_KERNEL_CS, %esi
245 jnz 2f
246 #ifdef CONFIG_PARAVIRT
247 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
248 mov %eax, %esi
249 #else
250 mov %cr0, %esi
251 #endif
252 btr $16, %esi
253 ljmp $__KERNEL_CS, $1f
254 1:
255 #ifdef CONFIG_PARAVIRT
256 mov %esi, %eax
257 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
258 #else
259 mov %esi, %cr0
260 #endif
261 2:
262 #ifdef CONFIG_PARAVIRT
263 popl %ecx
264 popl %eax
265 #endif
266 ret
267 ENDPROC(pax_exit_kernel)
268 #endif
269
270 .macro pax_erase_kstack
271 #ifdef CONFIG_PAX_MEMORY_STACKLEAK
272 call pax_erase_kstack
273 #endif
274 .endm
275
276 #ifdef CONFIG_PAX_MEMORY_STACKLEAK
277 /*
278 * ebp: thread_info
279 */
280 ENTRY(pax_erase_kstack)
281 pushl %edi
282 pushl %ecx
283 pushl %eax
284
285 mov TI_lowest_stack(%ebp), %edi
286 mov $-0xBEEF, %eax
287 std
288
289 1: mov %edi, %ecx
290 and $THREAD_SIZE_asm - 1, %ecx
291 shr $2, %ecx
292 repne scasl
293 jecxz 2f
294
295 cmp $2*16, %ecx
296 jc 2f
297
298 mov $2*16, %ecx
299 repe scasl
300 jecxz 2f
301 jne 1b
302
303 2: cld
304 mov %esp, %ecx
305 sub %edi, %ecx
306
307 cmp $THREAD_SIZE_asm, %ecx
308 jb 3f
309 ud2
310 3:
311
312 shr $2, %ecx
313 rep stosl
314
315 mov TI_task_thread_sp0(%ebp), %edi
316 sub $128, %edi
317 mov %edi, TI_lowest_stack(%ebp)
318
319 popl %eax
320 popl %ecx
321 popl %edi
322 ret
323 ENDPROC(pax_erase_kstack)
324 #endif
325
326 .macro __SAVE_ALL _DS
327 cld
328 PUSH_GS
329 pushl_cfi %fs
330 /*CFI_REL_OFFSET fs, 0;*/
331 pushl_cfi %es
332 /*CFI_REL_OFFSET es, 0;*/
333 pushl_cfi %ds
334 /*CFI_REL_OFFSET ds, 0;*/
335 pushl_cfi %eax
336 CFI_REL_OFFSET eax, 0
337 pushl_cfi %ebp
338 CFI_REL_OFFSET ebp, 0
339 pushl_cfi %edi
340 CFI_REL_OFFSET edi, 0
341 pushl_cfi %esi
342 CFI_REL_OFFSET esi, 0
343 pushl_cfi %edx
344 CFI_REL_OFFSET edx, 0
345 pushl_cfi %ecx
346 CFI_REL_OFFSET ecx, 0
347 pushl_cfi %ebx
348 CFI_REL_OFFSET ebx, 0
349 movl $\_DS, %edx
350 movl %edx, %ds
351 movl %edx, %es
352 movl $(__KERNEL_PERCPU), %edx
353 movl %edx, %fs
354 SET_KERNEL_GS %edx
355 .endm
356
357 .macro SAVE_ALL
358 #if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
359 __SAVE_ALL __KERNEL_DS
360 pax_enter_kernel
361 #else
362 __SAVE_ALL __USER_DS
363 #endif
364 .endm
365
366 .macro RESTORE_INT_REGS
367 popl_cfi %ebx
368 CFI_RESTORE ebx
369 popl_cfi %ecx
370 CFI_RESTORE ecx
371 popl_cfi %edx
372 CFI_RESTORE edx
373 popl_cfi %esi
374 CFI_RESTORE esi
375 popl_cfi %edi
376 CFI_RESTORE edi
377 popl_cfi %ebp
378 CFI_RESTORE ebp
379 popl_cfi %eax
380 CFI_RESTORE eax
381 .endm
382
383 .macro RESTORE_REGS pop=0
384 RESTORE_INT_REGS
385 1: popl_cfi %ds
386 /*CFI_RESTORE ds;*/
387 2: popl_cfi %es
388 /*CFI_RESTORE es;*/
389 3: popl_cfi %fs
390 /*CFI_RESTORE fs;*/
391 POP_GS \pop
392 .pushsection .fixup, "ax"
393 4: movl $0, (%esp)
394 jmp 1b
395 5: movl $0, (%esp)
396 jmp 2b
397 6: movl $0, (%esp)
398 jmp 3b
399 .popsection
400 _ASM_EXTABLE(1b,4b)
401 _ASM_EXTABLE(2b,5b)
402 _ASM_EXTABLE(3b,6b)
403 POP_GS_EX
404 .endm
405
406 .macro RING0_INT_FRAME
407 CFI_STARTPROC simple
408 CFI_SIGNAL_FRAME
409 CFI_DEF_CFA esp, 3*4
410 /*CFI_OFFSET cs, -2*4;*/
411 CFI_OFFSET eip, -3*4
412 .endm
413
414 .macro RING0_EC_FRAME
415 CFI_STARTPROC simple
416 CFI_SIGNAL_FRAME
417 CFI_DEF_CFA esp, 4*4
418 /*CFI_OFFSET cs, -2*4;*/
419 CFI_OFFSET eip, -3*4
420 .endm
421
422 .macro RING0_PTREGS_FRAME
423 CFI_STARTPROC simple
424 CFI_SIGNAL_FRAME
425 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
426 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
427 CFI_OFFSET eip, PT_EIP-PT_OLDESP
428 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
429 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
430 CFI_OFFSET eax, PT_EAX-PT_OLDESP
431 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
432 CFI_OFFSET edi, PT_EDI-PT_OLDESP
433 CFI_OFFSET esi, PT_ESI-PT_OLDESP
434 CFI_OFFSET edx, PT_EDX-PT_OLDESP
435 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
436 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
437 .endm
438
439 ENTRY(ret_from_fork)
440 CFI_STARTPROC
441 pushl_cfi %eax
442 call schedule_tail
443 GET_THREAD_INFO(%ebp)
444 popl_cfi %eax
445 pushl_cfi $0x0202 # Reset kernel eflags
446 popfl_cfi
447 jmp syscall_exit
448 CFI_ENDPROC
449 ENDPROC(ret_from_fork)
450
451 ENTRY(ret_from_kernel_thread)
452 CFI_STARTPROC
453 pushl_cfi %eax
454 call schedule_tail
455 GET_THREAD_INFO(%ebp)
456 popl_cfi %eax
457 pushl_cfi $0x0202 # Reset kernel eflags
458 popfl_cfi
459 movl PT_EBP(%esp),%eax
460 call *PT_EBX(%esp)
461 movl $0,PT_EAX(%esp)
462 jmp syscall_exit
463 CFI_ENDPROC
464 ENDPROC(ret_from_kernel_thread)
465
466 /*
467 * Interrupt exit functions should be protected against kprobes
468 */
469 .pushsection .kprobes.text, "ax"
470 /*
471 * Return to user mode is not as complex as all this looks,
472 * but we want the default path for a system call return to
473 * go as quickly as possible which is why some of this is
474 * less clear than it otherwise should be.
475 */
476
477 # userspace resumption stub bypassing syscall exit tracing
478 ALIGN
479 RING0_PTREGS_FRAME
480 ret_from_exception:
481 preempt_stop(CLBR_ANY)
482 ret_from_intr:
483 GET_THREAD_INFO(%ebp)
484 #ifdef CONFIG_VM86
485 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
486 movb PT_CS(%esp), %al
487 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
488 #else
489 /*
490 * We can be coming here from child spawned by kernel_thread().
491 */
492 movl PT_CS(%esp), %eax
493 andl $SEGMENT_RPL_MASK, %eax
494 #endif
495 cmpl $USER_RPL, %eax
496
497 #ifdef CONFIG_PAX_KERNEXEC
498 jae resume_userspace
499
500 pax_exit_kernel
501 jmp resume_kernel
502 #else
503 jb resume_kernel # not returning to v8086 or userspace
504 #endif
505
506 ENTRY(resume_userspace)
507 LOCKDEP_SYS_EXIT
508 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
509 # setting need_resched or sigpending
510 # between sampling and the iret
511 TRACE_IRQS_OFF
512 movl TI_flags(%ebp), %ecx
513 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
514 # int/exception return?
515 jne work_pending
516 jmp restore_all_pax
517 ENDPROC(ret_from_exception)
518
519 #ifdef CONFIG_PREEMPT
520 ENTRY(resume_kernel)
521 DISABLE_INTERRUPTS(CLBR_ANY)
522 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
523 jnz restore_all
524 need_resched:
525 movl TI_flags(%ebp), %ecx # need_resched set ?
526 testb $_TIF_NEED_RESCHED, %cl
527 jz restore_all
528 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
529 jz restore_all
530 call preempt_schedule_irq
531 jmp need_resched
532 ENDPROC(resume_kernel)
533 #endif
534 CFI_ENDPROC
535 /*
536 * End of kprobes section
537 */
538 .popsection
539
540 /* SYSENTER_RETURN points to after the "sysenter" instruction in
541 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
542
543 # sysenter call handler stub
544 ENTRY(ia32_sysenter_target)
545 CFI_STARTPROC simple
546 CFI_SIGNAL_FRAME
547 CFI_DEF_CFA esp, 0
548 CFI_REGISTER esp, ebp
549 movl TSS_sysenter_sp0(%esp),%esp
550 sysenter_past_esp:
551 /*
552 * Interrupts are disabled here, but we can't trace it until
553 * enough kernel state to call TRACE_IRQS_OFF can be called - but
554 * we immediately enable interrupts at that point anyway.
555 */
556 pushl_cfi $__USER_DS
557 /*CFI_REL_OFFSET ss, 0*/
558 pushl_cfi %ebp
559 CFI_REL_OFFSET esp, 0
560 pushfl_cfi
561 orl $X86_EFLAGS_IF, (%esp)
562 pushl_cfi $__USER_CS
563 /*CFI_REL_OFFSET cs, 0*/
564 /*
565 * Push current_thread_info()->sysenter_return to the stack.
566 */
567 pushl_cfi $0
568 CFI_REL_OFFSET eip, 0
569
570 pushl_cfi %eax
571 SAVE_ALL
572 GET_THREAD_INFO(%ebp)
573 movl TI_sysenter_return(%ebp),%ebp
574 movl %ebp,PT_EIP(%esp)
575 ENABLE_INTERRUPTS(CLBR_NONE)
576
577 /*
578 * Load the potential sixth argument from user stack.
579 * Careful about security.
580 */
581 movl PT_OLDESP(%esp),%ebp
582
583 #ifdef CONFIG_PAX_MEMORY_UDEREF
584 mov PT_OLDSS(%esp),%ds
585 1: movl %ds:(%ebp),%ebp
586 push %ss
587 pop %ds
588 #else
589 cmpl $__PAGE_OFFSET-3,%ebp
590 jae syscall_fault
591 ASM_STAC
592 1: movl (%ebp),%ebp
593 ASM_CLAC
594 #endif
595
596 movl %ebp,PT_EBP(%esp)
597 _ASM_EXTABLE(1b,syscall_fault)
598
599 GET_THREAD_INFO(%ebp)
600
601 #ifdef CONFIG_PAX_RANDKSTACK
602 pax_erase_kstack
603 #endif
604
605 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
606 jnz sysenter_audit
607 sysenter_do_call:
608 cmpl $(NR_syscalls), %eax
609 jae sysenter_badsys
610 call *sys_call_table(,%eax,4)
611 movl %eax,PT_EAX(%esp)
612 sysenter_after_call:
613 LOCKDEP_SYS_EXIT
614 DISABLE_INTERRUPTS(CLBR_ANY)
615 TRACE_IRQS_OFF
616 movl TI_flags(%ebp), %ecx
617 testl $_TIF_ALLWORK_MASK, %ecx
618 jne sysexit_audit
619 sysenter_exit:
620
621 #ifdef CONFIG_PAX_RANDKSTACK
622 pushl_cfi %eax
623 movl %esp, %eax
624 call pax_randomize_kstack
625 popl_cfi %eax
626 #endif
627
628 pax_erase_kstack
629
630 /* if something modifies registers it must also disable sysexit */
631 movl PT_EIP(%esp), %edx
632 movl PT_OLDESP(%esp), %ecx
633 xorl %ebp,%ebp
634 TRACE_IRQS_ON
635 1: mov PT_FS(%esp), %fs
636 2: mov PT_DS(%esp), %ds
637 3: mov PT_ES(%esp), %es
638 PTGS_TO_GS
639 ENABLE_INTERRUPTS_SYSEXIT
640
641 #ifdef CONFIG_AUDITSYSCALL
642 sysenter_audit:
643 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
644 jnz syscall_trace_entry
645 addl $4,%esp
646 CFI_ADJUST_CFA_OFFSET -4
647 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
648 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
649 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
650 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
651 movl %eax,%edx /* 2nd arg: syscall number */
652 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
653 call __audit_syscall_entry
654
655 pax_erase_kstack
656
657 pushl_cfi %ebx
658 movl PT_EAX(%esp),%eax /* reload syscall number */
659 jmp sysenter_do_call
660
661 sysexit_audit:
662 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
663 jne syscall_exit_work
664 TRACE_IRQS_ON
665 ENABLE_INTERRUPTS(CLBR_ANY)
666 movl %eax,%edx /* second arg, syscall return value */
667 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
668 setbe %al /* 1 if so, 0 if not */
669 movzbl %al,%eax /* zero-extend that */
670 call __audit_syscall_exit
671 DISABLE_INTERRUPTS(CLBR_ANY)
672 TRACE_IRQS_OFF
673 movl TI_flags(%ebp), %ecx
674 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
675 jne syscall_exit_work
676 movl PT_EAX(%esp),%eax /* reload syscall return value */
677 jmp sysenter_exit
678 #endif
679
680 CFI_ENDPROC
681 .pushsection .fixup,"ax"
682 4: movl $0,PT_FS(%esp)
683 jmp 1b
684 5: movl $0,PT_DS(%esp)
685 jmp 1b
686 6: movl $0,PT_ES(%esp)
687 jmp 1b
688 .popsection
689 _ASM_EXTABLE(1b,4b)
690 _ASM_EXTABLE(2b,5b)
691 _ASM_EXTABLE(3b,6b)
692 PTGS_TO_GS_EX
693 ENDPROC(ia32_sysenter_target)
694
695 /*
696 * syscall stub including irq exit should be protected against kprobes
697 */
698 .pushsection .kprobes.text, "ax"
699 # system call handler stub
700 ENTRY(system_call)
701 RING0_INT_FRAME # can't unwind into user space anyway
702 ASM_CLAC
703 pushl_cfi %eax # save orig_eax
704 SAVE_ALL
705 GET_THREAD_INFO(%ebp)
706
707 #ifdef CONFIG_PAX_RANDKSTACK
708 pax_erase_kstack
709 #endif
710
711 # system call tracing in operation / emulation
712 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
713 jnz syscall_trace_entry
714 cmpl $(NR_syscalls), %eax
715 jae syscall_badsys
716 syscall_call:
717 call *sys_call_table(,%eax,4)
718 movl %eax,PT_EAX(%esp) # store the return value
719 syscall_exit:
720 LOCKDEP_SYS_EXIT
721 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
722 # setting need_resched or sigpending
723 # between sampling and the iret
724 TRACE_IRQS_OFF
725 movl TI_flags(%ebp), %ecx
726 testl $_TIF_ALLWORK_MASK, %ecx # current->work
727 jne syscall_exit_work
728
729 restore_all_pax:
730
731 #ifdef CONFIG_PAX_RANDKSTACK
732 movl %esp, %eax
733 call pax_randomize_kstack
734 #endif
735
736 pax_erase_kstack
737
738 restore_all:
739 TRACE_IRQS_IRET
740 restore_all_notrace:
741 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
742 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
743 # are returning to the kernel.
744 # See comments in process.c:copy_thread() for details.
745 movb PT_OLDSS(%esp), %ah
746 movb PT_CS(%esp), %al
747 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
748 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
749 CFI_REMEMBER_STATE
750 je ldt_ss # returning to user-space with LDT SS
751 restore_nocheck:
752 RESTORE_REGS 4 # skip orig_eax/error_code
753 irq_return:
754 INTERRUPT_RETURN
755 .section .fixup,"ax"
756 ENTRY(iret_exc)
757 pushl $0 # no error code
758 pushl $do_iret_error
759 jmp error_code
760 .previous
761 _ASM_EXTABLE(irq_return,iret_exc)
762
763 CFI_RESTORE_STATE
764 ldt_ss:
765 #ifdef CONFIG_PARAVIRT
766 /*
767 * The kernel can't run on a non-flat stack if paravirt mode
768 * is active. Rather than try to fixup the high bits of
769 * ESP, bypass this code entirely. This may break DOSemu
770 * and/or Wine support in a paravirt VM, although the option
771 * is still available to implement the setting of the high
772 * 16-bits in the INTERRUPT_RETURN paravirt-op.
773 */
774 cmpl $0, pv_info+PARAVIRT_enabled
775 jne restore_nocheck
776 #endif
777
778 /*
779 * Setup and switch to ESPFIX stack
780 *
781 * We're returning to userspace with a 16 bit stack. The CPU will not
782 * restore the high word of ESP for us on executing iret... This is an
783 * "official" bug of all the x86-compatible CPUs, which we can work
784 * around to make dosemu and wine happy. We do this by preloading the
785 * high word of ESP with the high word of the userspace ESP while
786 * compensating for the offset by changing to the ESPFIX segment with
787 * a base address that matches for the difference.
788 */
789 #define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
790 mov %esp, %edx /* load kernel esp */
791 mov PT_OLDESP(%esp), %eax /* load userspace esp */
792 mov %dx, %ax /* eax: new kernel esp */
793 sub %eax, %edx /* offset (low word is 0) */
794 #ifdef CONFIG_SMP
795 movl PER_CPU_VAR(cpu_number), %ebx
796 shll $PAGE_SHIFT_asm, %ebx
797 addl $cpu_gdt_table, %ebx
798 #else
799 movl $cpu_gdt_table, %ebx
800 #endif
801 shr $16, %edx
802
803 #ifdef CONFIG_PAX_KERNEXEC
804 mov %cr0, %esi
805 btr $16, %esi
806 mov %esi, %cr0
807 #endif
808
809 mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
810 mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
811
812 #ifdef CONFIG_PAX_KERNEXEC
813 bts $16, %esi
814 mov %esi, %cr0
815 #endif
816
817 pushl_cfi $__ESPFIX_SS
818 pushl_cfi %eax /* new kernel esp */
819 /* Disable interrupts, but do not irqtrace this section: we
820 * will soon execute iret and the tracer was already set to
821 * the irqstate after the iret */
822 DISABLE_INTERRUPTS(CLBR_EAX)
823 lss (%esp), %esp /* switch to espfix segment */
824 CFI_ADJUST_CFA_OFFSET -8
825 jmp restore_nocheck
826 CFI_ENDPROC
827 ENDPROC(system_call)
828
829 # perform work that needs to be done immediately before resumption
830 ALIGN
831 RING0_PTREGS_FRAME # can't unwind into user space anyway
832 work_pending:
833 testb $_TIF_NEED_RESCHED, %cl
834 jz work_notifysig
835 work_resched:
836 call schedule
837 LOCKDEP_SYS_EXIT
838 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
839 # setting need_resched or sigpending
840 # between sampling and the iret
841 TRACE_IRQS_OFF
842 movl TI_flags(%ebp), %ecx
843 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
844 # than syscall tracing?
845 jz restore_all_pax
846 testb $_TIF_NEED_RESCHED, %cl
847 jnz work_resched
848
849 work_notifysig: # deal with pending signals and
850 # notify-resume requests
851 movl %esp, %eax
852 #ifdef CONFIG_VM86
853 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
854 jne work_notifysig_v86 # returning to kernel-space or
855 # vm86-space
856 1:
857 #endif
858 TRACE_IRQS_ON
859 ENABLE_INTERRUPTS(CLBR_NONE)
860 movb PT_CS(%esp), %bl
861 andb $SEGMENT_RPL_MASK, %bl
862 cmpb $USER_RPL, %bl
863 jb resume_kernel
864 xorl %edx, %edx
865 call do_notify_resume
866 jmp resume_userspace
867
868 #ifdef CONFIG_VM86
869 ALIGN
870 work_notifysig_v86:
871 pushl_cfi %ecx # save ti_flags for do_notify_resume
872 call save_v86_state # %eax contains pt_regs pointer
873 popl_cfi %ecx
874 movl %eax, %esp
875 jmp 1b
876 #endif
877 ENDPROC(work_pending)
878
879 # perform syscall exit tracing
880 ALIGN
881 syscall_trace_entry:
882 movl $-ENOSYS,PT_EAX(%esp)
883 movl %esp, %eax
884 call syscall_trace_enter
885
886 pax_erase_kstack
887
888 /* What it returned is what we'll actually use. */
889 cmpl $(NR_syscalls), %eax
890 jnae syscall_call
891 jmp syscall_exit
892 ENDPROC(syscall_trace_entry)
893
894 # perform syscall exit tracing
895 ALIGN
896 syscall_exit_work:
897 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
898 jz work_pending
899 TRACE_IRQS_ON
900 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
901 # schedule() instead
902 movl %esp, %eax
903 call syscall_trace_leave
904 jmp resume_userspace
905 ENDPROC(syscall_exit_work)
906 CFI_ENDPROC
907
908 RING0_INT_FRAME # can't unwind into user space anyway
909 syscall_fault:
910 #ifdef CONFIG_PAX_MEMORY_UDEREF
911 push %ss
912 pop %ds
913 #endif
914 ASM_CLAC
915 GET_THREAD_INFO(%ebp)
916 movl $-EFAULT,PT_EAX(%esp)
917 jmp resume_userspace
918 ENDPROC(syscall_fault)
919
920 syscall_badsys:
921 movl $-ENOSYS,PT_EAX(%esp)
922 jmp syscall_exit
923 ENDPROC(syscall_badsys)
924
925 sysenter_badsys:
926 movl $-ENOSYS,PT_EAX(%esp)
927 jmp sysenter_after_call
928 ENDPROC(syscall_badsys)
929 CFI_ENDPROC
930 /*
931 * End of kprobes section
932 */
933 .popsection
934
935 .macro FIXUP_ESPFIX_STACK
936 /*
937 * Switch back for ESPFIX stack to the normal zerobased stack
938 *
939 * We can't call C functions using the ESPFIX stack. This code reads
940 * the high word of the segment base from the GDT and swiches to the
941 * normal stack and adjusts ESP with the matching offset.
942 */
943 /* fixup the stack */
944 #ifdef CONFIG_SMP
945 movl PER_CPU_VAR(cpu_number), %ebx
946 shll $PAGE_SHIFT_asm, %ebx
947 addl $cpu_gdt_table, %ebx
948 #else
949 movl $cpu_gdt_table, %ebx
950 #endif
951 mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
952 mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
953 shl $16, %eax
954 addl %esp, %eax /* the adjusted stack pointer */
955 pushl_cfi $__KERNEL_DS
956 pushl_cfi %eax
957 lss (%esp), %esp /* switch to the normal stack segment */
958 CFI_ADJUST_CFA_OFFSET -8
959 .endm
960 .macro UNWIND_ESPFIX_STACK
961 movl %ss, %eax
962 /* see if on espfix stack */
963 cmpw $__ESPFIX_SS, %ax
964 jne 27f
965 movl $__KERNEL_DS, %eax
966 movl %eax, %ds
967 movl %eax, %es
968 /* switch to normal stack */
969 FIXUP_ESPFIX_STACK
970 27:
971 .endm
972
973 /*
974 * Build the entry stubs and pointer table with some assembler magic.
975 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
976 * single cache line on all modern x86 implementations.
977 */
978 .section .init.rodata,"a"
979 ENTRY(interrupt)
980 .section .entry.text, "ax"
981 .p2align 5
982 .p2align CONFIG_X86_L1_CACHE_SHIFT
983 ENTRY(irq_entries_start)
984 RING0_INT_FRAME
985 vector=FIRST_EXTERNAL_VECTOR
986 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
987 .balign 32
988 .rept 7
989 .if vector < NR_VECTORS
990 .if vector <> FIRST_EXTERNAL_VECTOR
991 CFI_ADJUST_CFA_OFFSET -4
992 .endif
993 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
994 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
995 jmp 2f
996 .endif
997 .previous
998 .long 1b
999 .section .entry.text, "ax"
1000 vector=vector+1
1001 .endif
1002 .endr
1003 2: jmp common_interrupt
1004 .endr
1005 ENDPROC(irq_entries_start)
1006
1007 .previous
1008 END(interrupt)
1009 .previous
1010
1011 /*
1012 * the CPU automatically disables interrupts when executing an IRQ vector,
1013 * so IRQ-flags tracing has to follow that:
1014 */
1015 .p2align CONFIG_X86_L1_CACHE_SHIFT
1016 common_interrupt:
1017 ASM_CLAC
1018 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
1019 SAVE_ALL
1020 TRACE_IRQS_OFF
1021 movl %esp,%eax
1022 call do_IRQ
1023 jmp ret_from_intr
1024 ENDPROC(common_interrupt)
1025 CFI_ENDPROC
1026
1027 /*
1028 * Irq entries should be protected against kprobes
1029 */
1030 .pushsection .kprobes.text, "ax"
1031 #define BUILD_INTERRUPT3(name, nr, fn) \
1032 ENTRY(name) \
1033 RING0_INT_FRAME; \
1034 ASM_CLAC; \
1035 pushl_cfi $~(nr); \
1036 SAVE_ALL; \
1037 TRACE_IRQS_OFF \
1038 movl %esp,%eax; \
1039 call fn; \
1040 jmp ret_from_intr; \
1041 CFI_ENDPROC; \
1042 ENDPROC(name)
1043
1044 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
1045
1046 /* The include is where all of the SMP etc. interrupts come from */
1047 #include <asm/entry_arch.h>
1048
1049 ENTRY(coprocessor_error)
1050 RING0_INT_FRAME
1051 ASM_CLAC
1052 pushl_cfi $0
1053 pushl_cfi $do_coprocessor_error
1054 jmp error_code
1055 CFI_ENDPROC
1056 ENDPROC(coprocessor_error)
1057
1058 ENTRY(simd_coprocessor_error)
1059 RING0_INT_FRAME
1060 ASM_CLAC
1061 pushl_cfi $0
1062 #ifdef CONFIG_X86_INVD_BUG
1063 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
1064 661: pushl_cfi $do_general_protection
1065 662:
1066 .section .altinstructions,"a"
1067 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
1068 .previous
1069 .section .altinstr_replacement,"a"
1070 663: pushl $do_simd_coprocessor_error
1071 664:
1072 .previous
1073 #else
1074 pushl_cfi $do_simd_coprocessor_error
1075 #endif
1076 jmp error_code
1077 CFI_ENDPROC
1078 ENDPROC(simd_coprocessor_error)
1079
1080 ENTRY(device_not_available)
1081 RING0_INT_FRAME
1082 ASM_CLAC
1083 pushl_cfi $-1 # mark this as an int
1084 pushl_cfi $do_device_not_available
1085 jmp error_code
1086 CFI_ENDPROC
1087 ENDPROC(device_not_available)
1088
1089 #ifdef CONFIG_PARAVIRT
1090 ENTRY(native_iret)
1091 iret
1092 _ASM_EXTABLE(native_iret, iret_exc)
1093 ENDPROC(native_iret)
1094
1095 ENTRY(native_irq_enable_sysexit)
1096 sti
1097 sysexit
1098 ENDPROC(native_irq_enable_sysexit)
1099 #endif
1100
1101 ENTRY(overflow)
1102 RING0_INT_FRAME
1103 ASM_CLAC
1104 pushl_cfi $0
1105 pushl_cfi $do_overflow
1106 jmp error_code
1107 CFI_ENDPROC
1108 ENDPROC(overflow)
1109
1110 ENTRY(bounds)
1111 RING0_INT_FRAME
1112 ASM_CLAC
1113 pushl_cfi $0
1114 pushl_cfi $do_bounds
1115 jmp error_code
1116 CFI_ENDPROC
1117 ENDPROC(bounds)
1118
1119 ENTRY(invalid_op)
1120 RING0_INT_FRAME
1121 ASM_CLAC
1122 pushl_cfi $0
1123 pushl_cfi $do_invalid_op
1124 jmp error_code
1125 CFI_ENDPROC
1126 ENDPROC(invalid_op)
1127
1128 ENTRY(coprocessor_segment_overrun)
1129 RING0_INT_FRAME
1130 ASM_CLAC
1131 pushl_cfi $0
1132 pushl_cfi $do_coprocessor_segment_overrun
1133 jmp error_code
1134 CFI_ENDPROC
1135 ENDPROC(coprocessor_segment_overrun)
1136
1137 ENTRY(invalid_TSS)
1138 RING0_EC_FRAME
1139 ASM_CLAC
1140 pushl_cfi $do_invalid_TSS
1141 jmp error_code
1142 CFI_ENDPROC
1143 ENDPROC(invalid_TSS)
1144
1145 ENTRY(segment_not_present)
1146 RING0_EC_FRAME
1147 ASM_CLAC
1148 pushl_cfi $do_segment_not_present
1149 jmp error_code
1150 CFI_ENDPROC
1151 ENDPROC(segment_not_present)
1152
1153 ENTRY(stack_segment)
1154 RING0_EC_FRAME
1155 ASM_CLAC
1156 pushl_cfi $do_stack_segment
1157 jmp error_code
1158 CFI_ENDPROC
1159 ENDPROC(stack_segment)
1160
1161 ENTRY(alignment_check)
1162 RING0_EC_FRAME
1163 ASM_CLAC
1164 pushl_cfi $do_alignment_check
1165 jmp error_code
1166 CFI_ENDPROC
1167 ENDPROC(alignment_check)
1168
1169 ENTRY(divide_error)
1170 RING0_INT_FRAME
1171 ASM_CLAC
1172 pushl_cfi $0 # no error code
1173 pushl_cfi $do_divide_error
1174 jmp error_code
1175 CFI_ENDPROC
1176 ENDPROC(divide_error)
1177
1178 #ifdef CONFIG_X86_MCE
1179 ENTRY(machine_check)
1180 RING0_INT_FRAME
1181 ASM_CLAC
1182 pushl_cfi $0
1183 pushl_cfi machine_check_vector
1184 jmp error_code
1185 CFI_ENDPROC
1186 ENDPROC(machine_check)
1187 #endif
1188
1189 ENTRY(spurious_interrupt_bug)
1190 RING0_INT_FRAME
1191 ASM_CLAC
1192 pushl_cfi $0
1193 pushl_cfi $do_spurious_interrupt_bug
1194 jmp error_code
1195 CFI_ENDPROC
1196 ENDPROC(spurious_interrupt_bug)
1197 /*
1198 * End of kprobes section
1199 */
1200 .popsection
1201
1202 #ifdef CONFIG_XEN
1203 /* Xen doesn't set %esp to be precisely what the normal sysenter
1204 entrypoint expects, so fix it up before using the normal path. */
1205 ENTRY(xen_sysenter_target)
1206 RING0_INT_FRAME
1207 addl $5*4, %esp /* remove xen-provided frame */
1208 CFI_ADJUST_CFA_OFFSET -5*4
1209 jmp sysenter_past_esp
1210 CFI_ENDPROC
1211
1212 ENTRY(xen_hypervisor_callback)
1213 CFI_STARTPROC
1214 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1215 SAVE_ALL
1216 TRACE_IRQS_OFF
1217
1218 /* Check to see if we got the event in the critical
1219 region in xen_iret_direct, after we've reenabled
1220 events and checked for pending events. This simulates
1221 iret instruction's behaviour where it delivers a
1222 pending interrupt when enabling interrupts. */
1223 movl PT_EIP(%esp),%eax
1224 cmpl $xen_iret_start_crit,%eax
1225 jb 1f
1226 cmpl $xen_iret_end_crit,%eax
1227 jae 1f
1228
1229 jmp xen_iret_crit_fixup
1230
1231 ENTRY(xen_do_upcall)
1232 1: mov %esp, %eax
1233 call xen_evtchn_do_upcall
1234 jmp ret_from_intr
1235 CFI_ENDPROC
1236 ENDPROC(xen_hypervisor_callback)
1237
1238 # Hypervisor uses this for application faults while it executes.
1239 # We get here for two reasons:
1240 # 1. Fault while reloading DS, ES, FS or GS
1241 # 2. Fault while executing IRET
1242 # Category 1 we fix up by reattempting the load, and zeroing the segment
1243 # register if the load fails.
1244 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1245 # normal Linux return path in this case because if we use the IRET hypercall
1246 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1247 # We distinguish between categories by maintaining a status value in EAX.
1248 ENTRY(xen_failsafe_callback)
1249 CFI_STARTPROC
1250 pushl_cfi %eax
1251 movl $1,%eax
1252 1: mov 4(%esp),%ds
1253 2: mov 8(%esp),%es
1254 3: mov 12(%esp),%fs
1255 4: mov 16(%esp),%gs
1256 /* EAX == 0 => Category 1 (Bad segment)
1257 EAX != 0 => Category 2 (Bad IRET) */
1258 testl %eax,%eax
1259 popl_cfi %eax
1260 lea 16(%esp),%esp
1261 CFI_ADJUST_CFA_OFFSET -16
1262 jz 5f
1263 jmp iret_exc
1264 5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
1265 SAVE_ALL
1266 jmp ret_from_exception
1267 CFI_ENDPROC
1268
1269 .section .fixup,"ax"
1270 6: xorl %eax,%eax
1271 movl %eax,4(%esp)
1272 jmp 1b
1273 7: xorl %eax,%eax
1274 movl %eax,8(%esp)
1275 jmp 2b
1276 8: xorl %eax,%eax
1277 movl %eax,12(%esp)
1278 jmp 3b
1279 9: xorl %eax,%eax
1280 movl %eax,16(%esp)
1281 jmp 4b
1282 .previous
1283 _ASM_EXTABLE(1b,6b)
1284 _ASM_EXTABLE(2b,7b)
1285 _ASM_EXTABLE(3b,8b)
1286 _ASM_EXTABLE(4b,9b)
1287 ENDPROC(xen_failsafe_callback)
1288
1289 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1290 xen_evtchn_do_upcall)
1291
1292 #endif /* CONFIG_XEN */
1293
1294 #if IS_ENABLED(CONFIG_HYPERV)
1295
1296 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1297 hyperv_vector_handler)
1298
1299 #endif /* CONFIG_HYPERV */
1300
1301 #ifdef CONFIG_FUNCTION_TRACER
1302 #ifdef CONFIG_DYNAMIC_FTRACE
1303
1304 ENTRY(mcount)
1305 ret
1306 ENDPROC(mcount)
1307
1308 ENTRY(ftrace_caller)
1309 cmpl $0, function_trace_stop
1310 jne ftrace_stub
1311
1312 pushl %eax
1313 pushl %ecx
1314 pushl %edx
1315 pushl $0 /* Pass NULL as regs pointer */
1316 movl 4*4(%esp), %eax
1317 movl 0x4(%ebp), %edx
1318 movl function_trace_op, %ecx
1319 subl $MCOUNT_INSN_SIZE, %eax
1320
1321 .globl ftrace_call
1322 ftrace_call:
1323 call ftrace_stub
1324
1325 addl $4,%esp /* skip NULL pointer */
1326 popl %edx
1327 popl %ecx
1328 popl %eax
1329 ftrace_ret:
1330 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1331 .globl ftrace_graph_call
1332 ftrace_graph_call:
1333 jmp ftrace_stub
1334 #endif
1335
1336 .globl ftrace_stub
1337 ftrace_stub:
1338 ret
1339 ENDPROC(ftrace_caller)
1340
1341 ENTRY(ftrace_regs_caller)
1342 pushf /* push flags before compare (in cs location) */
1343 cmpl $0, function_trace_stop
1344 jne ftrace_restore_flags
1345
1346 /*
1347 * i386 does not save SS and ESP when coming from kernel.
1348 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1349 * Unfortunately, that means eflags must be at the same location
1350 * as the current return ip is. We move the return ip into the
1351 * ip location, and move flags into the return ip location.
1352 */
1353 pushl 4(%esp) /* save return ip into ip slot */
1354
1355 pushl $0 /* Load 0 into orig_ax */
1356 pushl %gs
1357 pushl %fs
1358 pushl %es
1359 pushl %ds
1360 pushl %eax
1361 pushl %ebp
1362 pushl %edi
1363 pushl %esi
1364 pushl %edx
1365 pushl %ecx
1366 pushl %ebx
1367
1368 movl 13*4(%esp), %eax /* Get the saved flags */
1369 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1370 /* clobbering return ip */
1371 movl $__KERNEL_CS,13*4(%esp)
1372
1373 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
1374 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
1375 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1376 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
1377 pushl %esp /* Save pt_regs as 4th parameter */
1378
1379 GLOBAL(ftrace_regs_call)
1380 call ftrace_stub
1381
1382 addl $4, %esp /* Skip pt_regs */
1383 movl 14*4(%esp), %eax /* Move flags back into cs */
1384 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1385 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
1386 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1387
1388 popl %ebx
1389 popl %ecx
1390 popl %edx
1391 popl %esi
1392 popl %edi
1393 popl %ebp
1394 popl %eax
1395 popl %ds
1396 popl %es
1397 popl %fs
1398 popl %gs
1399 addl $8, %esp /* Skip orig_ax and ip */
1400 popf /* Pop flags at end (no addl to corrupt flags) */
1401 jmp ftrace_ret
1402
1403 ftrace_restore_flags:
1404 popf
1405 jmp ftrace_stub
1406 #else /* ! CONFIG_DYNAMIC_FTRACE */
1407
1408 ENTRY(mcount)
1409 cmpl $0, function_trace_stop
1410 jne ftrace_stub
1411
1412 cmpl $ftrace_stub, ftrace_trace_function
1413 jnz trace
1414 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1415 cmpl $ftrace_stub, ftrace_graph_return
1416 jnz ftrace_graph_caller
1417
1418 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1419 jnz ftrace_graph_caller
1420 #endif
1421 .globl ftrace_stub
1422 ftrace_stub:
1423 ret
1424
1425 /* taken from glibc */
1426 trace:
1427 pushl %eax
1428 pushl %ecx
1429 pushl %edx
1430 movl 0xc(%esp), %eax
1431 movl 0x4(%ebp), %edx
1432 subl $MCOUNT_INSN_SIZE, %eax
1433
1434 call *ftrace_trace_function
1435
1436 popl %edx
1437 popl %ecx
1438 popl %eax
1439 jmp ftrace_stub
1440 ENDPROC(mcount)
1441 #endif /* CONFIG_DYNAMIC_FTRACE */
1442 #endif /* CONFIG_FUNCTION_TRACER */
1443
1444 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1445 ENTRY(ftrace_graph_caller)
1446 pushl %eax
1447 pushl %ecx
1448 pushl %edx
1449 movl 0xc(%esp), %edx
1450 lea 0x4(%ebp), %eax
1451 movl (%ebp), %ecx
1452 subl $MCOUNT_INSN_SIZE, %edx
1453 call prepare_ftrace_return
1454 popl %edx
1455 popl %ecx
1456 popl %eax
1457 ret
1458 ENDPROC(ftrace_graph_caller)
1459
1460 .globl return_to_handler
1461 return_to_handler:
1462 pushl %eax
1463 pushl %edx
1464 movl %ebp, %eax
1465 call ftrace_return_to_handler
1466 movl %eax, %ecx
1467 popl %edx
1468 popl %eax
1469 jmp *%ecx
1470 #endif
1471
1472 /*
1473 * Some functions should be protected against kprobes
1474 */
1475 .pushsection .kprobes.text, "ax"
1476
1477 ENTRY(page_fault)
1478 RING0_EC_FRAME
1479 ASM_CLAC
1480 pushl_cfi $do_page_fault
1481 ALIGN
1482 error_code:
1483 /* the function address is in %gs's slot on the stack */
1484 pushl_cfi %fs
1485 /*CFI_REL_OFFSET fs, 0*/
1486 pushl_cfi %es
1487 /*CFI_REL_OFFSET es, 0*/
1488 pushl_cfi %ds
1489 /*CFI_REL_OFFSET ds, 0*/
1490 pushl_cfi %eax
1491 CFI_REL_OFFSET eax, 0
1492 pushl_cfi %ebp
1493 CFI_REL_OFFSET ebp, 0
1494 pushl_cfi %edi
1495 CFI_REL_OFFSET edi, 0
1496 pushl_cfi %esi
1497 CFI_REL_OFFSET esi, 0
1498 pushl_cfi %edx
1499 CFI_REL_OFFSET edx, 0
1500 pushl_cfi %ecx
1501 CFI_REL_OFFSET ecx, 0
1502 pushl_cfi %ebx
1503 CFI_REL_OFFSET ebx, 0
1504 cld
1505 movl $(__KERNEL_PERCPU), %ecx
1506 movl %ecx, %fs
1507 UNWIND_ESPFIX_STACK
1508 GS_TO_REG %ecx
1509 movl PT_GS(%esp), %edi # get the function address
1510 movl PT_ORIG_EAX(%esp), %edx # get the error code
1511 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1512 REG_TO_PTGS %ecx
1513 SET_KERNEL_GS %ecx
1514 movl $(__KERNEL_DS), %ecx
1515 movl %ecx, %ds
1516 movl %ecx, %es
1517
1518 pax_enter_kernel
1519
1520 TRACE_IRQS_OFF
1521 movl %esp,%eax # pt_regs pointer
1522 call *%edi
1523 jmp ret_from_exception
1524 CFI_ENDPROC
1525 ENDPROC(page_fault)
1526
1527 /*
1528 * Debug traps and NMI can happen at the one SYSENTER instruction
1529 * that sets up the real kernel stack. Check here, since we can't
1530 * allow the wrong stack to be used.
1531 *
1532 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1533 * already pushed 3 words if it hits on the sysenter instruction:
1534 * eflags, cs and eip.
1535 *
1536 * We just load the right stack, and push the three (known) values
1537 * by hand onto the new stack - while updating the return eip past
1538 * the instruction that would have done it for sysenter.
1539 */
1540 .macro FIX_STACK offset ok label
1541 cmpw $__KERNEL_CS, 4(%esp)
1542 jne \ok
1543 \label:
1544 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1545 CFI_DEF_CFA esp, 0
1546 CFI_UNDEFINED eip
1547 pushfl_cfi
1548 pushl_cfi $__KERNEL_CS
1549 pushl_cfi $sysenter_past_esp
1550 CFI_REL_OFFSET eip, 0
1551 .endm
1552
1553 ENTRY(debug)
1554 RING0_INT_FRAME
1555 ASM_CLAC
1556 cmpl $ia32_sysenter_target,(%esp)
1557 jne debug_stack_correct
1558 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1559 debug_stack_correct:
1560 pushl_cfi $-1 # mark this as an int
1561 SAVE_ALL
1562 TRACE_IRQS_OFF
1563 xorl %edx,%edx # error code 0
1564 movl %esp,%eax # pt_regs pointer
1565 call do_debug
1566 jmp ret_from_exception
1567 CFI_ENDPROC
1568 ENDPROC(debug)
1569
1570 /*
1571 * NMI is doubly nasty. It can happen _while_ we're handling
1572 * a debug fault, and the debug fault hasn't yet been able to
1573 * clear up the stack. So we first check whether we got an
1574 * NMI on the sysenter entry path, but after that we need to
1575 * check whether we got an NMI on the debug path where the debug
1576 * fault happened on the sysenter path.
1577 */
1578 ENTRY(nmi)
1579 RING0_INT_FRAME
1580 ASM_CLAC
1581 pushl_cfi %eax
1582 movl %ss, %eax
1583 cmpw $__ESPFIX_SS, %ax
1584 popl_cfi %eax
1585 je nmi_espfix_stack
1586 cmpl $ia32_sysenter_target,(%esp)
1587 je nmi_stack_fixup
1588 pushl_cfi %eax
1589 movl %esp,%eax
1590 /* Do not access memory above the end of our stack page,
1591 * it might not exist.
1592 */
1593 andl $(THREAD_SIZE-1),%eax
1594 cmpl $(THREAD_SIZE-20),%eax
1595 popl_cfi %eax
1596 jae nmi_stack_correct
1597 cmpl $ia32_sysenter_target,12(%esp)
1598 je nmi_debug_stack_check
1599 nmi_stack_correct:
1600 /* We have a RING0_INT_FRAME here */
1601 pushl_cfi %eax
1602 SAVE_ALL
1603 xorl %edx,%edx # zero error code
1604 movl %esp,%eax # pt_regs pointer
1605 call do_nmi
1606
1607 pax_exit_kernel
1608
1609 jmp restore_all_notrace
1610 CFI_ENDPROC
1611
1612 nmi_stack_fixup:
1613 RING0_INT_FRAME
1614 FIX_STACK 12, nmi_stack_correct, 1
1615 jmp nmi_stack_correct
1616
1617 nmi_debug_stack_check:
1618 /* We have a RING0_INT_FRAME here */
1619 cmpw $__KERNEL_CS,16(%esp)
1620 jne nmi_stack_correct
1621 cmpl $debug,(%esp)
1622 jb nmi_stack_correct
1623 cmpl $debug_esp_fix_insn,(%esp)
1624 ja nmi_stack_correct
1625 FIX_STACK 24, nmi_stack_correct, 1
1626 jmp nmi_stack_correct
1627
1628 nmi_espfix_stack:
1629 /* We have a RING0_INT_FRAME here.
1630 *
1631 * create the pointer to lss back
1632 */
1633 pushl_cfi %ss
1634 pushl_cfi %esp
1635 addl $4, (%esp)
1636 /* copy the iret frame of 12 bytes */
1637 .rept 3
1638 pushl_cfi 16(%esp)
1639 .endr
1640 pushl_cfi %eax
1641 SAVE_ALL
1642 FIXUP_ESPFIX_STACK # %eax == %esp
1643 xorl %edx,%edx # zero error code
1644 call do_nmi
1645
1646 pax_exit_kernel
1647
1648 RESTORE_REGS
1649 lss 12+4(%esp), %esp # back to espfix stack
1650 CFI_ADJUST_CFA_OFFSET -24
1651 jmp irq_return
1652 CFI_ENDPROC
1653 ENDPROC(nmi)
1654
1655 ENTRY(int3)
1656 RING0_INT_FRAME
1657 ASM_CLAC
1658 pushl_cfi $-1 # mark this as an int
1659 SAVE_ALL
1660 TRACE_IRQS_OFF
1661 xorl %edx,%edx # zero error code
1662 movl %esp,%eax # pt_regs pointer
1663 call do_int3
1664 jmp ret_from_exception
1665 CFI_ENDPROC
1666 ENDPROC(int3)
1667
1668 ENTRY(general_protection)
1669 RING0_EC_FRAME
1670 pushl_cfi $do_general_protection
1671 jmp error_code
1672 CFI_ENDPROC
1673 ENDPROC(general_protection)
1674
1675 #ifdef CONFIG_KVM_GUEST
1676 ENTRY(async_page_fault)
1677 RING0_EC_FRAME
1678 ASM_CLAC
1679 pushl_cfi $do_async_page_fault
1680 jmp error_code
1681 CFI_ENDPROC
1682 ENDPROC(async_page_fault)
1683 #endif
1684
1685 /*
1686 * End of kprobes section
1687 */
1688 .popsection