]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/x86/kvm/vmx/vmenter.S
KVM: VMX: Move vCPU-run code to a proper assembly routine
[thirdparty/linux.git] / arch / x86 / kvm / vmx / vmenter.S
CommitLineData
453eafbe
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
5e0781df
SC
4#include <asm/bitsperlong.h>
5#include <asm/kvm_vcpu_regs.h>
6
7#define WORD_SIZE (BITS_PER_LONG / 8)
8
9#define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
10#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
11#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
12#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
13/* Intentionally omit RSP as it's context switched by hardware */
14#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
15#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
16#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
17
18#ifdef CONFIG_X86_64
19#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
20#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
21#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
22#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
23#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
24#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
25#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
26#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
27#endif
453eafbe
SC
28
29 .text
30
31/**
32 * vmx_vmenter - VM-Enter the current loaded VMCS
33 *
34 * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
35 *
36 * Returns:
37 * %RFLAGS.CF is set on VM-Fail Invalid
38 * %RFLAGS.ZF is set on VM-Fail Valid
39 * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
40 *
41 * Note that VMRESUME/VMLAUNCH fall-through and return directly if
42 * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
43 * to vmx_vmexit.
44 */
45ENTRY(vmx_vmenter)
46 /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
47 je 2f
48
491: vmresume
50 ret
51
522: vmlaunch
53 ret
54
553: cmpb $0, kvm_rebooting
56 jne 4f
57 call kvm_spurious_fault
584: ret
59
60 .pushsection .fixup, "ax"
615: jmp 3b
62 .popsection
63
64 _ASM_EXTABLE(1b, 5b)
65 _ASM_EXTABLE(2b, 5b)
66
67ENDPROC(vmx_vmenter)
68
69/**
70 * vmx_vmexit - Handle a VMX VM-Exit
71 *
72 * Returns:
73 * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
74 *
75 * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
76 * here after hardware loads the host's state, i.e. this is the destination
77 * referred to by VMCS.HOST_RIP.
78 */
79ENTRY(vmx_vmexit)
80 ret
81ENDPROC(vmx_vmexit)
5e0781df
SC
82
83/**
84 * ____vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
85 * @vmx: struct vcpu_vmx *
86 * @regs: unsigned long * (to guest registers)
87 * %RBX: VMCS launched status (non-zero indicates already launched)
88 *
89 * Returns:
90 * %RBX is 0 on VM-Exit, 1 on VM-Fail
91 */
92ENTRY(____vmx_vcpu_run)
93 push %_ASM_BP
94 mov %_ASM_SP, %_ASM_BP
95
96 /*
97 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
98 * @regs is needed after VM-Exit to save the guest's register values.
99 */
100 push %_ASM_ARG2
101
102 /* Adjust RSP to account for the CALL to vmx_vmenter(). */
103 lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
104 call vmx_update_host_rsp
105
106 /* Load @regs to RCX. */
107 mov (%_ASM_SP), %_ASM_CX
108
109 /* Check if vmlaunch or vmresume is needed */
110 cmpb $0, %bl
111
112 /* Load guest registers. Don't clobber flags. */
113 mov VCPU_RAX(%_ASM_CX), %_ASM_AX
114 mov VCPU_RBX(%_ASM_CX), %_ASM_BX
115 mov VCPU_RDX(%_ASM_CX), %_ASM_DX
116 mov VCPU_RSI(%_ASM_CX), %_ASM_SI
117 mov VCPU_RDI(%_ASM_CX), %_ASM_DI
118 mov VCPU_RBP(%_ASM_CX), %_ASM_BP
119#ifdef CONFIG_X86_64
120 mov VCPU_R8 (%_ASM_CX), %r8
121 mov VCPU_R9 (%_ASM_CX), %r9
122 mov VCPU_R10(%_ASM_CX), %r10
123 mov VCPU_R11(%_ASM_CX), %r11
124 mov VCPU_R12(%_ASM_CX), %r12
125 mov VCPU_R13(%_ASM_CX), %r13
126 mov VCPU_R14(%_ASM_CX), %r14
127 mov VCPU_R15(%_ASM_CX), %r15
128#endif
129 /* Load guest RCX. This kills the vmx_vcpu pointer! */
130 mov VCPU_RCX(%_ASM_CX), %_ASM_CX
131
132 /* Enter guest mode */
133 call vmx_vmenter
134
135 /* Jump on VM-Fail. */
136 jbe 2f
137
138 /* Temporarily save guest's RCX. */
139 push %_ASM_CX
140
141 /* Reload @regs to RCX. */
142 mov WORD_SIZE(%_ASM_SP), %_ASM_CX
143
144 /* Save all guest registers, including RCX from the stack */
145 mov %_ASM_AX, VCPU_RAX(%_ASM_CX)
146 mov %_ASM_BX, VCPU_RBX(%_ASM_CX)
147 __ASM_SIZE(pop) VCPU_RCX(%_ASM_CX)
148 mov %_ASM_DX, VCPU_RDX(%_ASM_CX)
149 mov %_ASM_SI, VCPU_RSI(%_ASM_CX)
150 mov %_ASM_DI, VCPU_RDI(%_ASM_CX)
151 mov %_ASM_BP, VCPU_RBP(%_ASM_CX)
152#ifdef CONFIG_X86_64
153 mov %r8, VCPU_R8 (%_ASM_CX)
154 mov %r9, VCPU_R9 (%_ASM_CX)
155 mov %r10, VCPU_R10(%_ASM_CX)
156 mov %r11, VCPU_R11(%_ASM_CX)
157 mov %r12, VCPU_R12(%_ASM_CX)
158 mov %r13, VCPU_R13(%_ASM_CX)
159 mov %r14, VCPU_R14(%_ASM_CX)
160 mov %r15, VCPU_R15(%_ASM_CX)
161#endif
162
163 /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */
164 xor %ebx, %ebx
165
166 /*
167 * Clear all general purpose registers except RSP and RBX to prevent
168 * speculative use of the guest's values, even those that are reloaded
169 * via the stack. In theory, an L1 cache miss when restoring registers
170 * could lead to speculative execution with the guest's values.
171 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
172 * free. RSP and RBX are exempt as RSP is restored by hardware during
173 * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail.
174 */
1751:
176#ifdef CONFIG_X86_64
177 xor %r8d, %r8d
178 xor %r9d, %r9d
179 xor %r10d, %r10d
180 xor %r11d, %r11d
181 xor %r12d, %r12d
182 xor %r13d, %r13d
183 xor %r14d, %r14d
184 xor %r15d, %r15d
185#endif
186 xor %eax, %eax
187 xor %ecx, %ecx
188 xor %edx, %edx
189 xor %esi, %esi
190 xor %edi, %edi
191 xor %ebp, %ebp
192
193 /* "POP" @regs. */
194 add $WORD_SIZE, %_ASM_SP
195 pop %_ASM_BP
196 ret
197
198 /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
1992: mov $1, %ebx
200 jmp 1b
201ENDPROC(____vmx_vcpu_run)