]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/kvm/vmx/vmenter.S
e06a3f33311efdb737487ad046da868a26037892
[thirdparty/linux.git] / arch / x86 / kvm / vmx / vmenter.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
3 #include <asm/asm.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6
7 #define WORD_SIZE (BITS_PER_LONG / 8)
8
9 #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
10 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
11 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
12 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
13 /* Intentionally omit RSP as it's context switched by hardware */
14 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
15 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
16 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
17
18 #ifdef CONFIG_X86_64
19 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
20 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
21 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
22 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
23 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
24 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
25 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
26 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
27 #endif
28
29 .text
30
31 /**
32 * vmx_vmenter - VM-Enter the current loaded VMCS
33 *
34 * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
35 *
36 * Returns:
37 * %RFLAGS.CF is set on VM-Fail Invalid
38 * %RFLAGS.ZF is set on VM-Fail Valid
39 * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
40 *
41 * Note that VMRESUME/VMLAUNCH fall-through and return directly if
42 * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
43 * to vmx_vmexit.
44 */
45 ENTRY(vmx_vmenter)
46 /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
47 je 2f
48
49 1: vmresume
50 ret
51
52 2: vmlaunch
53 ret
54
55 3: cmpb $0, kvm_rebooting
56 jne 4f
57 call kvm_spurious_fault
58 4: ret
59
60 .pushsection .fixup, "ax"
61 5: jmp 3b
62 .popsection
63
64 _ASM_EXTABLE(1b, 5b)
65 _ASM_EXTABLE(2b, 5b)
66
67 ENDPROC(vmx_vmenter)
68
69 /**
70 * vmx_vmexit - Handle a VMX VM-Exit
71 *
72 * Returns:
73 * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
74 *
75 * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
76 * here after hardware loads the host's state, i.e. this is the destination
77 * referred to by VMCS.HOST_RIP.
78 */
79 ENTRY(vmx_vmexit)
80 ret
81 ENDPROC(vmx_vmexit)
82
83 /**
84 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
85 * @vmx: struct vcpu_vmx *
86 * @regs: unsigned long * (to guest registers)
87 * @launched: %true if the VMCS has been launched
88 *
89 * Returns:
90 * 0 on VM-Exit, 1 on VM-Fail
91 */
92 ENTRY(__vmx_vcpu_run)
93 push %_ASM_BP
94 mov %_ASM_SP, %_ASM_BP
95
96 /*
97 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
98 * @regs is needed after VM-Exit to save the guest's register values.
99 */
100 push %_ASM_ARG2
101
102 /* Copy @launched to BL, _ASM_ARG3 is volatile. */
103 mov %_ASM_ARG3B, %bl
104
105 /* Adjust RSP to account for the CALL to vmx_vmenter(). */
106 lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
107 call vmx_update_host_rsp
108
109 /* Load @regs to RAX. */
110 mov (%_ASM_SP), %_ASM_AX
111
112 /* Check if vmlaunch or vmresume is needed */
113 cmpb $0, %bl
114
115 /* Load guest registers. Don't clobber flags. */
116 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
117 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
118 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
119 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
120 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
121 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
122 #ifdef CONFIG_X86_64
123 mov VCPU_R8 (%_ASM_AX), %r8
124 mov VCPU_R9 (%_ASM_AX), %r9
125 mov VCPU_R10(%_ASM_AX), %r10
126 mov VCPU_R11(%_ASM_AX), %r11
127 mov VCPU_R12(%_ASM_AX), %r12
128 mov VCPU_R13(%_ASM_AX), %r13
129 mov VCPU_R14(%_ASM_AX), %r14
130 mov VCPU_R15(%_ASM_AX), %r15
131 #endif
132 /* Load guest RAX. This kills the vmx_vcpu pointer! */
133 mov VCPU_RAX(%_ASM_AX), %_ASM_AX
134
135 /* Enter guest mode */
136 call vmx_vmenter
137
138 /* Jump on VM-Fail. */
139 jbe 2f
140
141 /* Temporarily save guest's RAX. */
142 push %_ASM_AX
143
144 /* Reload @regs to RAX. */
145 mov WORD_SIZE(%_ASM_SP), %_ASM_AX
146
147 /* Save all guest registers, including RAX from the stack */
148 __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
149 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
150 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
151 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
152 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
153 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
154 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
155 #ifdef CONFIG_X86_64
156 mov %r8, VCPU_R8 (%_ASM_AX)
157 mov %r9, VCPU_R9 (%_ASM_AX)
158 mov %r10, VCPU_R10(%_ASM_AX)
159 mov %r11, VCPU_R11(%_ASM_AX)
160 mov %r12, VCPU_R12(%_ASM_AX)
161 mov %r13, VCPU_R13(%_ASM_AX)
162 mov %r14, VCPU_R14(%_ASM_AX)
163 mov %r15, VCPU_R15(%_ASM_AX)
164 #endif
165
166 /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
167 xor %eax, %eax
168
169 /*
170 * Clear all general purpose registers except RSP and RAX to prevent
171 * speculative use of the guest's values, even those that are reloaded
172 * via the stack. In theory, an L1 cache miss when restoring registers
173 * could lead to speculative execution with the guest's values.
174 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
175 * free. RSP and RAX are exempt as RSP is restored by hardware during
176 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
177 */
178 1:
179 #ifdef CONFIG_X86_64
180 xor %r8d, %r8d
181 xor %r9d, %r9d
182 xor %r10d, %r10d
183 xor %r11d, %r11d
184 xor %r12d, %r12d
185 xor %r13d, %r13d
186 xor %r14d, %r14d
187 xor %r15d, %r15d
188 #endif
189 xor %ebx, %ebx
190 xor %ecx, %ecx
191 xor %edx, %edx
192 xor %esi, %esi
193 xor %edi, %edi
194 xor %ebp, %ebp
195
196 /* "POP" @regs. */
197 add $WORD_SIZE, %_ASM_SP
198 pop %_ASM_BP
199 ret
200
201 /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
202 2: mov $1, %eax
203 jmp 1b
204 ENDPROC(__vmx_vcpu_run)