]>
Commit | Line | Data |
---|---|---|
f05e798a DH |
1 | #ifndef _ASM_X86_SWITCH_TO_H |
2 | #define _ASM_X86_SWITCH_TO_H | |
3 | ||
4 | struct task_struct; /* one of the stranger aspects of C forward declarations */ | |
0100301b BG |
5 | |
6 | struct task_struct *__switch_to_asm(struct task_struct *prev, | |
7 | struct task_struct *next); | |
8 | ||
35ea7903 | 9 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
0100301b | 10 | struct task_struct *next); |
f05e798a DH |
11 | struct tss_struct; |
12 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
13 | struct tss_struct *tss); | |
14 | ||
e37e43a4 AL |
15 | /* This runs runs on the previous thread's stack. */ |
16 | static inline void prepare_switch_to(struct task_struct *prev, | |
17 | struct task_struct *next) | |
18 | { | |
19 | #ifdef CONFIG_VMAP_STACK | |
20 | /* | |
21 | * If we switch to a stack that has a top-level paging entry | |
22 | * that is not present in the current mm, the resulting #PF will | |
23 | * will be promoted to a double-fault and we'll panic. Probe | |
24 | * the new stack now so that vmalloc_fault can fix up the page | |
25 | * tables if needed. This can only happen if we use a stack | |
26 | * in vmap space. | |
27 | * | |
28 | * We assume that the stack is aligned so that it never spans | |
29 | * more than one top-level paging entry. | |
30 | * | |
31 | * To minimize cache pollution, just follow the stack pointer. | |
32 | */ | |
33 | READ_ONCE(*(unsigned char *)next->thread.sp); | |
34 | #endif | |
35 | } | |
36 | ||
616d2483 BG |
37 | asmlinkage void ret_from_fork(void); |
38 | ||
2c96b2fe JP |
39 | /* |
40 | * This is the structure pointed to by thread.sp for an inactive task. The | |
41 | * order of the fields must match the code in __switch_to_asm(). | |
42 | */ | |
7b32aead | 43 | struct inactive_task_frame { |
0100301b BG |
44 | #ifdef CONFIG_X86_64 |
45 | unsigned long r15; | |
46 | unsigned long r14; | |
47 | unsigned long r13; | |
48 | unsigned long r12; | |
49 | #else | |
50 | unsigned long si; | |
51 | unsigned long di; | |
52 | #endif | |
53 | unsigned long bx; | |
2c96b2fe JP |
54 | |
55 | /* | |
56 | * These two fields must be together. They form a stack frame header, | |
57 | * needed by get_frame_pointer(). | |
58 | */ | |
7b32aead | 59 | unsigned long bp; |
0100301b | 60 | unsigned long ret_addr; |
7b32aead BG |
61 | }; |
62 | ||
0100301b BG |
63 | struct fork_frame { |
64 | struct inactive_task_frame frame; | |
65 | struct pt_regs regs; | |
66 | }; | |
f05e798a | 67 | |
f05e798a DH |
68 | #define switch_to(prev, next, last) \ |
69 | do { \ | |
e37e43a4 AL |
70 | prepare_switch_to(prev, next); \ |
71 | \ | |
0100301b | 72 | ((last) = __switch_to_asm((prev), (next))); \ |
f05e798a DH |
73 | } while (0) |
74 | ||
bd7dc5a6 AL |
75 | #ifdef CONFIG_X86_32 |
76 | static inline void refresh_sysenter_cs(struct thread_struct *thread) | |
77 | { | |
78 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ | |
79 | if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) | |
80 | return; | |
81 | ||
82 | this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); | |
83 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
84 | } | |
85 | #endif | |
86 | ||
46f5a10a AL |
87 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
88 | static inline void update_sp0(struct task_struct *task) | |
89 | { | |
90 | load_sp0(task->thread.sp0); | |
91 | } | |
92 | ||
f05e798a | 93 | #endif /* _ASM_X86_SWITCH_TO_H */ |