]>
Commit | Line | Data |
---|---|---|
453eafbe SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/linkage.h> | |
3 | #include <asm/asm.h> | |
5e0781df SC |
4 | #include <asm/bitsperlong.h> |
5 | #include <asm/kvm_vcpu_regs.h> | |
6 | ||
7 | #define WORD_SIZE (BITS_PER_LONG / 8) | |
8 | ||
9 | #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE | |
10 | #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE | |
11 | #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE | |
12 | #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE | |
13 | /* Intentionally omit RSP as it's context switched by hardware */ | |
14 | #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE | |
15 | #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE | |
16 | #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE | |
17 | ||
18 | #ifdef CONFIG_X86_64 | |
19 | #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE | |
20 | #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE | |
21 | #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE | |
22 | #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE | |
23 | #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE | |
24 | #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE | |
25 | #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE | |
26 | #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE | |
27 | #endif | |
453eafbe SC |
28 | |
29 | .text | |
30 | ||
31 | /** | |
32 | * vmx_vmenter - VM-Enter the current loaded VMCS | |
33 | * | |
34 | * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME | |
35 | * | |
36 | * Returns: | |
37 | * %RFLAGS.CF is set on VM-Fail Invalid | |
38 | * %RFLAGS.ZF is set on VM-Fail Valid | |
39 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit | |
40 | * | |
41 | * Note that VMRESUME/VMLAUNCH fall-through and return directly if | |
42 | * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump | |
43 | * to vmx_vmexit. | |
44 | */ | |
45 | ENTRY(vmx_vmenter) | |
46 | /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ | |
47 | je 2f | |
48 | ||
49 | 1: vmresume | |
50 | ret | |
51 | ||
52 | 2: vmlaunch | |
53 | ret | |
54 | ||
55 | 3: cmpb $0, kvm_rebooting | |
56 | jne 4f | |
57 | call kvm_spurious_fault | |
58 | 4: ret | |
59 | ||
60 | .pushsection .fixup, "ax" | |
61 | 5: jmp 3b | |
62 | .popsection | |
63 | ||
64 | _ASM_EXTABLE(1b, 5b) | |
65 | _ASM_EXTABLE(2b, 5b) | |
66 | ||
67 | ENDPROC(vmx_vmenter) | |
68 | ||
69 | /** | |
70 | * vmx_vmexit - Handle a VMX VM-Exit | |
71 | * | |
72 | * Returns: | |
73 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit | |
74 | * | |
75 | * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump | |
76 | * here after hardware loads the host's state, i.e. this is the destination | |
77 | * referred to by VMCS.HOST_RIP. | |
78 | */ | |
79 | ENTRY(vmx_vmexit) | |
80 | ret | |
81 | ENDPROC(vmx_vmexit) | |
5e0781df SC |
82 | |
83 | /** | |
ee2fc635 | 84 | * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode |
5e0781df SC |
85 | * @vmx: struct vcpu_vmx * |
86 | * @regs: unsigned long * (to guest registers) | |
77df5495 | 87 | * @launched: %true if the VMCS has been launched |
5e0781df SC |
88 | * |
89 | * Returns: | |
e75c3c3a | 90 | * 0 on VM-Exit, 1 on VM-Fail |
5e0781df | 91 | */ |
ee2fc635 | 92 | ENTRY(__vmx_vcpu_run) |
5e0781df SC |
93 | push %_ASM_BP |
94 | mov %_ASM_SP, %_ASM_BP | |
3b895ef4 SC |
95 | #ifdef CONFIG_X86_64 |
96 | push %r15 | |
97 | push %r14 | |
98 | push %r13 | |
99 | push %r12 | |
100 | #else | |
101 | push %edi | |
102 | push %esi | |
103 | #endif | |
104 | push %_ASM_BX | |
5e0781df SC |
105 | |
106 | /* | |
107 | * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and | |
108 | * @regs is needed after VM-Exit to save the guest's register values. | |
109 | */ | |
110 | push %_ASM_ARG2 | |
111 | ||
77df5495 SC |
112 | /* Copy @launched to BL, _ASM_ARG3 is volatile. */ |
113 | mov %_ASM_ARG3B, %bl | |
114 | ||
5e0781df SC |
115 | /* Adjust RSP to account for the CALL to vmx_vmenter(). */ |
116 | lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 | |
117 | call vmx_update_host_rsp | |
118 | ||
a62fd5a7 SC |
119 | /* Load @regs to RAX. */ |
120 | mov (%_ASM_SP), %_ASM_AX | |
5e0781df SC |
121 | |
122 | /* Check if vmlaunch or vmresume is needed */ | |
123 | cmpb $0, %bl | |
124 | ||
125 | /* Load guest registers. Don't clobber flags. */ | |
a62fd5a7 SC |
126 | mov VCPU_RBX(%_ASM_AX), %_ASM_BX |
127 | mov VCPU_RCX(%_ASM_AX), %_ASM_CX | |
128 | mov VCPU_RDX(%_ASM_AX), %_ASM_DX | |
129 | mov VCPU_RSI(%_ASM_AX), %_ASM_SI | |
130 | mov VCPU_RDI(%_ASM_AX), %_ASM_DI | |
131 | mov VCPU_RBP(%_ASM_AX), %_ASM_BP | |
5e0781df | 132 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
133 | mov VCPU_R8 (%_ASM_AX), %r8 |
134 | mov VCPU_R9 (%_ASM_AX), %r9 | |
135 | mov VCPU_R10(%_ASM_AX), %r10 | |
136 | mov VCPU_R11(%_ASM_AX), %r11 | |
137 | mov VCPU_R12(%_ASM_AX), %r12 | |
138 | mov VCPU_R13(%_ASM_AX), %r13 | |
139 | mov VCPU_R14(%_ASM_AX), %r14 | |
140 | mov VCPU_R15(%_ASM_AX), %r15 | |
5e0781df | 141 | #endif |
a62fd5a7 SC |
142 | /* Load guest RAX. This kills the vmx_vcpu pointer! */ |
143 | mov VCPU_RAX(%_ASM_AX), %_ASM_AX | |
5e0781df SC |
144 | |
145 | /* Enter guest mode */ | |
146 | call vmx_vmenter | |
147 | ||
148 | /* Jump on VM-Fail. */ | |
149 | jbe 2f | |
150 | ||
a62fd5a7 SC |
151 | /* Temporarily save guest's RAX. */ |
152 | push %_ASM_AX | |
5e0781df | 153 | |
a62fd5a7 SC |
154 | /* Reload @regs to RAX. */ |
155 | mov WORD_SIZE(%_ASM_SP), %_ASM_AX | |
5e0781df | 156 | |
a62fd5a7 SC |
157 | /* Save all guest registers, including RAX from the stack */ |
158 | __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX) | |
159 | mov %_ASM_BX, VCPU_RBX(%_ASM_AX) | |
160 | mov %_ASM_CX, VCPU_RCX(%_ASM_AX) | |
161 | mov %_ASM_DX, VCPU_RDX(%_ASM_AX) | |
162 | mov %_ASM_SI, VCPU_RSI(%_ASM_AX) | |
163 | mov %_ASM_DI, VCPU_RDI(%_ASM_AX) | |
164 | mov %_ASM_BP, VCPU_RBP(%_ASM_AX) | |
5e0781df | 165 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
166 | mov %r8, VCPU_R8 (%_ASM_AX) |
167 | mov %r9, VCPU_R9 (%_ASM_AX) | |
168 | mov %r10, VCPU_R10(%_ASM_AX) | |
169 | mov %r11, VCPU_R11(%_ASM_AX) | |
170 | mov %r12, VCPU_R12(%_ASM_AX) | |
171 | mov %r13, VCPU_R13(%_ASM_AX) | |
172 | mov %r14, VCPU_R14(%_ASM_AX) | |
173 | mov %r15, VCPU_R15(%_ASM_AX) | |
5e0781df SC |
174 | #endif |
175 | ||
e75c3c3a SC |
176 | /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ |
177 | xor %eax, %eax | |
5e0781df SC |
178 | |
179 | /* | |
e75c3c3a | 180 | * Clear all general purpose registers except RSP and RAX to prevent |
5e0781df SC |
181 | * speculative use of the guest's values, even those that are reloaded |
182 | * via the stack. In theory, an L1 cache miss when restoring registers | |
183 | * could lead to speculative execution with the guest's values. | |
184 | * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially | |
e75c3c3a SC |
185 | * free. RSP and RAX are exempt as RSP is restored by hardware during |
186 | * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. | |
5e0781df SC |
187 | */ |
188 | 1: | |
189 | #ifdef CONFIG_X86_64 | |
190 | xor %r8d, %r8d | |
191 | xor %r9d, %r9d | |
192 | xor %r10d, %r10d | |
193 | xor %r11d, %r11d | |
194 | xor %r12d, %r12d | |
195 | xor %r13d, %r13d | |
196 | xor %r14d, %r14d | |
197 | xor %r15d, %r15d | |
198 | #endif | |
e75c3c3a | 199 | xor %ebx, %ebx |
5e0781df SC |
200 | xor %ecx, %ecx |
201 | xor %edx, %edx | |
202 | xor %esi, %esi | |
203 | xor %edi, %edi | |
204 | xor %ebp, %ebp | |
205 | ||
206 | /* "POP" @regs. */ | |
207 | add $WORD_SIZE, %_ASM_SP | |
3b895ef4 SC |
208 | pop %_ASM_BX |
209 | ||
210 | #ifdef CONFIG_X86_64 | |
211 | pop %r12 | |
212 | pop %r13 | |
213 | pop %r14 | |
214 | pop %r15 | |
215 | #else | |
216 | pop %esi | |
217 | pop %edi | |
218 | #endif | |
5e0781df SC |
219 | pop %_ASM_BP |
220 | ret | |
221 | ||
222 | /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ | |
e75c3c3a | 223 | 2: mov $1, %eax |
5e0781df | 224 | jmp 1b |
ee2fc635 | 225 | ENDPROC(__vmx_vcpu_run) |