]>
Commit | Line | Data |
---|---|---|
55d2375e SC |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | #include <linux/frame.h> | |
4 | #include <linux/percpu.h> | |
5 | ||
6 | #include <asm/debugreg.h> | |
7 | #include <asm/mmu_context.h> | |
8 | ||
9 | #include "cpuid.h" | |
10 | #include "hyperv.h" | |
11 | #include "mmu.h" | |
12 | #include "nested.h" | |
bfc6ad6a | 13 | #include "pmu.h" |
55d2375e SC |
14 | #include "trace.h" |
15 | #include "x86.h" | |
16 | ||
17 | static bool __read_mostly enable_shadow_vmcs = 1; | |
18 | module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO); | |
19 | ||
20 | static bool __read_mostly nested_early_check = 0; | |
21 | module_param(nested_early_check, bool, S_IRUGO); | |
22 | ||
5497b955 SC |
23 | #define CC(consistency_check) \ |
24 | ({ \ | |
25 | bool failed = (consistency_check); \ | |
26 | if (failed) \ | |
380e0055 | 27 | trace_kvm_nested_vmenter_failed(#consistency_check, 0); \ |
5497b955 SC |
28 | failed; \ |
29 | }) | |
30 | ||
55d2375e SC |
31 | /* |
32 | * Hyper-V requires all of these, so mark them as supported even though | |
33 | * they are just treated the same as all-context. | |
34 | */ | |
35 | #define VMX_VPID_EXTENT_SUPPORTED_MASK \ | |
36 | (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \ | |
37 | VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \ | |
38 | VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \ | |
39 | VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT) | |
40 | ||
41 | #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5 | |
42 | ||
43 | enum { | |
44 | VMX_VMREAD_BITMAP, | |
45 | VMX_VMWRITE_BITMAP, | |
46 | VMX_BITMAP_NR | |
47 | }; | |
48 | static unsigned long *vmx_bitmap[VMX_BITMAP_NR]; | |
49 | ||
50 | #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP]) | |
51 | #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP]) | |
52 | ||
1c6f0b47 SC |
53 | struct shadow_vmcs_field { |
54 | u16 encoding; | |
55 | u16 offset; | |
56 | }; | |
57 | static struct shadow_vmcs_field shadow_read_only_fields[] = { | |
58 | #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) }, | |
55d2375e SC |
59 | #include "vmcs_shadow_fields.h" |
60 | }; | |
61 | static int max_shadow_read_only_fields = | |
62 | ARRAY_SIZE(shadow_read_only_fields); | |
63 | ||
1c6f0b47 SC |
64 | static struct shadow_vmcs_field shadow_read_write_fields[] = { |
65 | #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) }, | |
55d2375e SC |
66 | #include "vmcs_shadow_fields.h" |
67 | }; | |
68 | static int max_shadow_read_write_fields = | |
69 | ARRAY_SIZE(shadow_read_write_fields); | |
70 | ||
8997f657 | 71 | static void init_vmcs_shadow_fields(void) |
55d2375e SC |
72 | { |
73 | int i, j; | |
74 | ||
75 | memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); | |
76 | memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); | |
77 | ||
78 | for (i = j = 0; i < max_shadow_read_only_fields; i++) { | |
1c6f0b47 SC |
79 | struct shadow_vmcs_field entry = shadow_read_only_fields[i]; |
80 | u16 field = entry.encoding; | |
55d2375e SC |
81 | |
82 | if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && | |
83 | (i + 1 == max_shadow_read_only_fields || | |
1c6f0b47 | 84 | shadow_read_only_fields[i + 1].encoding != field + 1)) |
55d2375e SC |
85 | pr_err("Missing field from shadow_read_only_field %x\n", |
86 | field + 1); | |
87 | ||
88 | clear_bit(field, vmx_vmread_bitmap); | |
55d2375e | 89 | if (field & 1) |
1c6f0b47 | 90 | #ifdef CONFIG_X86_64 |
55d2375e | 91 | continue; |
1c6f0b47 SC |
92 | #else |
93 | entry.offset += sizeof(u32); | |
55d2375e | 94 | #endif |
1c6f0b47 | 95 | shadow_read_only_fields[j++] = entry; |
55d2375e SC |
96 | } |
97 | max_shadow_read_only_fields = j; | |
98 | ||
99 | for (i = j = 0; i < max_shadow_read_write_fields; i++) { | |
1c6f0b47 SC |
100 | struct shadow_vmcs_field entry = shadow_read_write_fields[i]; |
101 | u16 field = entry.encoding; | |
55d2375e SC |
102 | |
103 | if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 && | |
104 | (i + 1 == max_shadow_read_write_fields || | |
1c6f0b47 | 105 | shadow_read_write_fields[i + 1].encoding != field + 1)) |
55d2375e SC |
106 | pr_err("Missing field from shadow_read_write_field %x\n", |
107 | field + 1); | |
108 | ||
b6437805 SC |
109 | WARN_ONCE(field >= GUEST_ES_AR_BYTES && |
110 | field <= GUEST_TR_AR_BYTES, | |
1c6f0b47 | 111 | "Update vmcs12_write_any() to drop reserved bits from AR_BYTES"); |
b6437805 | 112 | |
55d2375e SC |
113 | /* |
114 | * PML and the preemption timer can be emulated, but the | |
115 | * processor cannot vmwrite to fields that don't exist | |
116 | * on bare metal. | |
117 | */ | |
118 | switch (field) { | |
119 | case GUEST_PML_INDEX: | |
120 | if (!cpu_has_vmx_pml()) | |
121 | continue; | |
122 | break; | |
123 | case VMX_PREEMPTION_TIMER_VALUE: | |
124 | if (!cpu_has_vmx_preemption_timer()) | |
125 | continue; | |
126 | break; | |
127 | case GUEST_INTR_STATUS: | |
128 | if (!cpu_has_vmx_apicv()) | |
129 | continue; | |
130 | break; | |
131 | default: | |
132 | break; | |
133 | } | |
134 | ||
135 | clear_bit(field, vmx_vmwrite_bitmap); | |
136 | clear_bit(field, vmx_vmread_bitmap); | |
55d2375e | 137 | if (field & 1) |
1c6f0b47 | 138 | #ifdef CONFIG_X86_64 |
55d2375e | 139 | continue; |
1c6f0b47 SC |
140 | #else |
141 | entry.offset += sizeof(u32); | |
55d2375e | 142 | #endif |
1c6f0b47 | 143 | shadow_read_write_fields[j++] = entry; |
55d2375e SC |
144 | } |
145 | max_shadow_read_write_fields = j; | |
146 | } | |
147 | ||
148 | /* | |
149 | * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(), | |
150 | * set the success or error code of an emulated VMX instruction (as specified | |
151 | * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated | |
152 | * instruction. | |
153 | */ | |
154 | static int nested_vmx_succeed(struct kvm_vcpu *vcpu) | |
155 | { | |
156 | vmx_set_rflags(vcpu, vmx_get_rflags(vcpu) | |
157 | & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | | |
158 | X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF)); | |
159 | return kvm_skip_emulated_instruction(vcpu); | |
160 | } | |
161 | ||
162 | static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu) | |
163 | { | |
164 | vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) | |
165 | & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF | | |
166 | X86_EFLAGS_SF | X86_EFLAGS_OF)) | |
167 | | X86_EFLAGS_CF); | |
168 | return kvm_skip_emulated_instruction(vcpu); | |
169 | } | |
170 | ||
171 | static int nested_vmx_failValid(struct kvm_vcpu *vcpu, | |
172 | u32 vm_instruction_error) | |
173 | { | |
174 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
175 | ||
176 | /* | |
177 | * failValid writes the error number to the current VMCS, which | |
178 | * can't be done if there isn't a current VMCS. | |
179 | */ | |
180 | if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) | |
181 | return nested_vmx_failInvalid(vcpu); | |
182 | ||
183 | vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu) | |
184 | & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | | |
185 | X86_EFLAGS_SF | X86_EFLAGS_OF)) | |
186 | | X86_EFLAGS_ZF); | |
187 | get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; | |
188 | /* | |
189 | * We don't need to force a shadow sync because | |
190 | * VM_INSTRUCTION_ERROR is not shadowed | |
191 | */ | |
192 | return kvm_skip_emulated_instruction(vcpu); | |
193 | } | |
194 | ||
195 | static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator) | |
196 | { | |
197 | /* TODO: not to reset guest simply here. */ | |
198 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); | |
199 | pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); | |
200 | } | |
201 | ||
f0b5105a MO |
202 | static inline bool vmx_control_verify(u32 control, u32 low, u32 high) |
203 | { | |
204 | return fixed_bits_valid(control, low, high); | |
205 | } | |
206 | ||
207 | static inline u64 vmx_control_msr(u32 low, u32 high) | |
208 | { | |
209 | return low | ((u64)high << 32); | |
210 | } | |
211 | ||
55d2375e SC |
212 | static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) |
213 | { | |
fe7f895d | 214 | secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); |
55d2375e | 215 | vmcs_write64(VMCS_LINK_POINTER, -1ull); |
88dddc11 | 216 | vmx->nested.need_vmcs12_to_shadow_sync = false; |
55d2375e SC |
217 | } |
218 | ||
219 | static inline void nested_release_evmcs(struct kvm_vcpu *vcpu) | |
220 | { | |
221 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
222 | ||
223 | if (!vmx->nested.hv_evmcs) | |
224 | return; | |
225 | ||
dee9c049 | 226 | kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); |
95fa1010 | 227 | vmx->nested.hv_evmcs_vmptr = 0; |
55d2375e SC |
228 | vmx->nested.hv_evmcs = NULL; |
229 | } | |
230 | ||
231 | /* | |
232 | * Free whatever needs to be freed from vmx->nested when L1 goes down, or | |
233 | * just stops using VMX. | |
234 | */ | |
235 | static void free_nested(struct kvm_vcpu *vcpu) | |
236 | { | |
237 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
238 | ||
239 | if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) | |
240 | return; | |
241 | ||
cf64527b JK |
242 | kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); |
243 | ||
55d2375e SC |
244 | vmx->nested.vmxon = false; |
245 | vmx->nested.smm.vmxon = false; | |
246 | free_vpid(vmx->nested.vpid02); | |
247 | vmx->nested.posted_intr_nv = -1; | |
248 | vmx->nested.current_vmptr = -1ull; | |
249 | if (enable_shadow_vmcs) { | |
250 | vmx_disable_shadow_vmcs(vmx); | |
251 | vmcs_clear(vmx->vmcs01.shadow_vmcs); | |
252 | free_vmcs(vmx->vmcs01.shadow_vmcs); | |
253 | vmx->vmcs01.shadow_vmcs = NULL; | |
254 | } | |
255 | kfree(vmx->nested.cached_vmcs12); | |
c6bf2ae9 | 256 | vmx->nested.cached_vmcs12 = NULL; |
55d2375e | 257 | kfree(vmx->nested.cached_shadow_vmcs12); |
c6bf2ae9 | 258 | vmx->nested.cached_shadow_vmcs12 = NULL; |
55d2375e SC |
259 | /* Unpin physical memory we referred to in the vmcs02 */ |
260 | if (vmx->nested.apic_access_page) { | |
b11494bc | 261 | kvm_release_page_clean(vmx->nested.apic_access_page); |
55d2375e SC |
262 | vmx->nested.apic_access_page = NULL; |
263 | } | |
96c66e87 | 264 | kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); |
3278e049 KA |
265 | kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); |
266 | vmx->nested.pi_desc = NULL; | |
55d2375e SC |
267 | |
268 | kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); | |
269 | ||
270 | nested_release_evmcs(vcpu); | |
271 | ||
272 | free_loaded_vmcs(&vmx->nested.vmcs02); | |
273 | } | |
274 | ||
13b964a2 SC |
275 | static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, |
276 | struct loaded_vmcs *prev) | |
277 | { | |
278 | struct vmcs_host_state *dest, *src; | |
279 | ||
280 | if (unlikely(!vmx->guest_state_loaded)) | |
281 | return; | |
282 | ||
283 | src = &prev->host_state; | |
284 | dest = &vmx->loaded_vmcs->host_state; | |
285 | ||
286 | vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base); | |
287 | dest->ldt_sel = src->ldt_sel; | |
288 | #ifdef CONFIG_X86_64 | |
289 | dest->ds_sel = src->ds_sel; | |
290 | dest->es_sel = src->es_sel; | |
291 | #endif | |
292 | } | |
293 | ||
55d2375e SC |
294 | static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) |
295 | { | |
296 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
13b964a2 | 297 | struct loaded_vmcs *prev; |
55d2375e SC |
298 | int cpu; |
299 | ||
300 | if (vmx->loaded_vmcs == vmcs) | |
301 | return; | |
302 | ||
303 | cpu = get_cpu(); | |
13b964a2 | 304 | prev = vmx->loaded_vmcs; |
55d2375e | 305 | vmx->loaded_vmcs = vmcs; |
8ef863e6 | 306 | vmx_vcpu_load_vmcs(vcpu, cpu); |
13b964a2 | 307 | vmx_sync_vmcs_host_state(vmx, prev); |
55d2375e SC |
308 | put_cpu(); |
309 | ||
55d2375e SC |
310 | vmx_segment_cache_clear(vmx); |
311 | } | |
312 | ||
313 | /* | |
314 | * Ensure that the current vmcs of the logical processor is the | |
315 | * vmcs01 of the vcpu before calling free_nested(). | |
316 | */ | |
317 | void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) | |
318 | { | |
319 | vcpu_load(vcpu); | |
b4b65b56 | 320 | vmx_leave_nested(vcpu); |
55d2375e SC |
321 | vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01); |
322 | free_nested(vcpu); | |
323 | vcpu_put(vcpu); | |
324 | } | |
325 | ||
326 | static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, | |
327 | struct x86_exception *fault) | |
328 | { | |
329 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
330 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
331 | u32 exit_reason; | |
332 | unsigned long exit_qualification = vcpu->arch.exit_qualification; | |
333 | ||
334 | if (vmx->nested.pml_full) { | |
335 | exit_reason = EXIT_REASON_PML_FULL; | |
336 | vmx->nested.pml_full = false; | |
337 | exit_qualification &= INTR_INFO_UNBLOCK_NMI; | |
338 | } else if (fault->error_code & PFERR_RSVD_MASK) | |
339 | exit_reason = EXIT_REASON_EPT_MISCONFIG; | |
340 | else | |
341 | exit_reason = EXIT_REASON_EPT_VIOLATION; | |
342 | ||
343 | nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification); | |
344 | vmcs12->guest_physical_address = fault->address; | |
345 | } | |
346 | ||
347 | static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) | |
348 | { | |
349 | WARN_ON(mmu_is_nested(vcpu)); | |
350 | ||
351 | vcpu->arch.mmu = &vcpu->arch.guest_mmu; | |
352 | kvm_init_shadow_ept_mmu(vcpu, | |
353 | to_vmx(vcpu)->nested.msrs.ept_caps & | |
354 | VMX_EPT_EXECUTE_ONLY_BIT, | |
355 | nested_ept_ad_enabled(vcpu), | |
ac69dfaa | 356 | nested_ept_get_eptp(vcpu)); |
d8dd54e0 | 357 | vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp; |
55d2375e SC |
358 | vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault; |
359 | vcpu->arch.mmu->get_pdptr = kvm_pdptr_read; | |
360 | ||
361 | vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu; | |
362 | } | |
363 | ||
364 | static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu) | |
365 | { | |
366 | vcpu->arch.mmu = &vcpu->arch.root_mmu; | |
367 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; | |
368 | } | |
369 | ||
370 | static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, | |
371 | u16 error_code) | |
372 | { | |
373 | bool inequality, bit; | |
374 | ||
375 | bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; | |
376 | inequality = | |
377 | (error_code & vmcs12->page_fault_error_code_mask) != | |
378 | vmcs12->page_fault_error_code_match; | |
379 | return inequality ^ bit; | |
380 | } | |
381 | ||
382 | ||
383 | /* | |
384 | * KVM wants to inject page-faults which it got to the guest. This function | |
385 | * checks whether in a nested guest, we need to inject them to L1 or L2. | |
386 | */ | |
387 | static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual) | |
388 | { | |
389 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
390 | unsigned int nr = vcpu->arch.exception.nr; | |
391 | bool has_payload = vcpu->arch.exception.has_payload; | |
392 | unsigned long payload = vcpu->arch.exception.payload; | |
393 | ||
394 | if (nr == PF_VECTOR) { | |
395 | if (vcpu->arch.exception.nested_apf) { | |
396 | *exit_qual = vcpu->arch.apf.nested_apf_token; | |
397 | return 1; | |
398 | } | |
399 | if (nested_vmx_is_page_fault_vmexit(vmcs12, | |
400 | vcpu->arch.exception.error_code)) { | |
401 | *exit_qual = has_payload ? payload : vcpu->arch.cr2; | |
402 | return 1; | |
403 | } | |
404 | } else if (vmcs12->exception_bitmap & (1u << nr)) { | |
405 | if (nr == DB_VECTOR) { | |
406 | if (!has_payload) { | |
407 | payload = vcpu->arch.dr6; | |
408 | payload &= ~(DR6_FIXED_1 | DR6_BT); | |
409 | payload ^= DR6_RTM; | |
410 | } | |
411 | *exit_qual = payload; | |
412 | } else | |
413 | *exit_qual = 0; | |
414 | return 1; | |
415 | } | |
416 | ||
417 | return 0; | |
418 | } | |
419 | ||
420 | ||
421 | static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu, | |
422 | struct x86_exception *fault) | |
423 | { | |
424 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
425 | ||
426 | WARN_ON(!is_guest_mode(vcpu)); | |
427 | ||
428 | if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) && | |
429 | !to_vmx(vcpu)->nested.nested_run_pending) { | |
430 | vmcs12->vm_exit_intr_error_code = fault->error_code; | |
431 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, | |
432 | PF_VECTOR | INTR_TYPE_HARD_EXCEPTION | | |
433 | INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK, | |
434 | fault->address); | |
435 | } else { | |
436 | kvm_inject_page_fault(vcpu, fault); | |
437 | } | |
438 | } | |
439 | ||
440 | static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa) | |
441 | { | |
442 | return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu)); | |
443 | } | |
444 | ||
445 | static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu, | |
446 | struct vmcs12 *vmcs12) | |
447 | { | |
448 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) | |
449 | return 0; | |
450 | ||
5497b955 SC |
451 | if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) || |
452 | CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b))) | |
55d2375e SC |
453 | return -EINVAL; |
454 | ||
455 | return 0; | |
456 | } | |
457 | ||
458 | static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu, | |
459 | struct vmcs12 *vmcs12) | |
460 | { | |
461 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) | |
462 | return 0; | |
463 | ||
5497b955 | 464 | if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap))) |
55d2375e SC |
465 | return -EINVAL; |
466 | ||
467 | return 0; | |
468 | } | |
469 | ||
470 | static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu, | |
471 | struct vmcs12 *vmcs12) | |
472 | { | |
473 | if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) | |
474 | return 0; | |
475 | ||
5497b955 | 476 | if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))) |
55d2375e SC |
477 | return -EINVAL; |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | /* | |
483 | * Check if MSR is intercepted for L01 MSR bitmap. | |
484 | */ | |
485 | static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr) | |
486 | { | |
487 | unsigned long *msr_bitmap; | |
488 | int f = sizeof(unsigned long); | |
489 | ||
490 | if (!cpu_has_vmx_msr_bitmap()) | |
491 | return true; | |
492 | ||
493 | msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; | |
494 | ||
495 | if (msr <= 0x1fff) { | |
496 | return !!test_bit(msr, msr_bitmap + 0x800 / f); | |
497 | } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { | |
498 | msr &= 0x1fff; | |
499 | return !!test_bit(msr, msr_bitmap + 0xc00 / f); | |
500 | } | |
501 | ||
502 | return true; | |
503 | } | |
504 | ||
505 | /* | |
506 | * If a msr is allowed by L0, we should check whether it is allowed by L1. | |
507 | * The corresponding bit will be cleared unless both of L0 and L1 allow it. | |
508 | */ | |
509 | static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1, | |
510 | unsigned long *msr_bitmap_nested, | |
511 | u32 msr, int type) | |
512 | { | |
513 | int f = sizeof(unsigned long); | |
514 | ||
515 | /* | |
516 | * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals | |
517 | * have the write-low and read-high bitmap offsets the wrong way round. | |
518 | * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. | |
519 | */ | |
520 | if (msr <= 0x1fff) { | |
521 | if (type & MSR_TYPE_R && | |
522 | !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) | |
523 | /* read-low */ | |
524 | __clear_bit(msr, msr_bitmap_nested + 0x000 / f); | |
525 | ||
526 | if (type & MSR_TYPE_W && | |
527 | !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) | |
528 | /* write-low */ | |
529 | __clear_bit(msr, msr_bitmap_nested + 0x800 / f); | |
530 | ||
531 | } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { | |
532 | msr &= 0x1fff; | |
533 | if (type & MSR_TYPE_R && | |
534 | !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) | |
535 | /* read-high */ | |
536 | __clear_bit(msr, msr_bitmap_nested + 0x400 / f); | |
537 | ||
538 | if (type & MSR_TYPE_W && | |
539 | !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) | |
540 | /* write-high */ | |
541 | __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); | |
542 | ||
543 | } | |
544 | } | |
545 | ||
ffdbd50d ML |
546 | static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) |
547 | { | |
acff7847 MO |
548 | int msr; |
549 | ||
550 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | |
551 | unsigned word = msr / BITS_PER_LONG; | |
552 | ||
553 | msr_bitmap[word] = ~0; | |
554 | msr_bitmap[word + (0x800 / sizeof(long))] = ~0; | |
555 | } | |
556 | } | |
557 | ||
55d2375e SC |
558 | /* |
559 | * Merge L0's and L1's MSR bitmap, return false to indicate that | |
560 | * we do not use the hardware. | |
561 | */ | |
562 | static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, | |
563 | struct vmcs12 *vmcs12) | |
564 | { | |
565 | int msr; | |
55d2375e SC |
566 | unsigned long *msr_bitmap_l1; |
567 | unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; | |
31f0b6c4 | 568 | struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; |
55d2375e SC |
569 | |
570 | /* Nothing to do if the MSR bitmap is not in use. */ | |
571 | if (!cpu_has_vmx_msr_bitmap() || | |
572 | !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) | |
573 | return false; | |
574 | ||
31f0b6c4 | 575 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) |
55d2375e SC |
576 | return false; |
577 | ||
31f0b6c4 | 578 | msr_bitmap_l1 = (unsigned long *)map->hva; |
55d2375e | 579 | |
acff7847 MO |
580 | /* |
581 | * To keep the control flow simple, pay eight 8-byte writes (sixteen | |
582 | * 4-byte writes on 32-bit systems) up front to enable intercepts for | |
583 | * the x2APIC MSR range and selectively disable them below. | |
584 | */ | |
585 | enable_x2apic_msr_intercepts(msr_bitmap_l0); | |
586 | ||
587 | if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { | |
588 | if (nested_cpu_has_apic_reg_virt(vmcs12)) { | |
589 | /* | |
590 | * L0 need not intercept reads for MSRs between 0x800 | |
591 | * and 0x8ff, it just lets the processor take the value | |
592 | * from the virtual-APIC page; take those 256 bits | |
593 | * directly from the L1 bitmap. | |
594 | */ | |
595 | for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { | |
596 | unsigned word = msr / BITS_PER_LONG; | |
597 | ||
598 | msr_bitmap_l0[word] = msr_bitmap_l1[word]; | |
599 | } | |
600 | } | |
55d2375e | 601 | |
55d2375e SC |
602 | nested_vmx_disable_intercept_for_msr( |
603 | msr_bitmap_l1, msr_bitmap_l0, | |
acff7847 | 604 | X2APIC_MSR(APIC_TASKPRI), |
c73f4c99 | 605 | MSR_TYPE_R | MSR_TYPE_W); |
acff7847 MO |
606 | |
607 | if (nested_cpu_has_vid(vmcs12)) { | |
608 | nested_vmx_disable_intercept_for_msr( | |
609 | msr_bitmap_l1, msr_bitmap_l0, | |
610 | X2APIC_MSR(APIC_EOI), | |
611 | MSR_TYPE_W); | |
612 | nested_vmx_disable_intercept_for_msr( | |
613 | msr_bitmap_l1, msr_bitmap_l0, | |
614 | X2APIC_MSR(APIC_SELF_IPI), | |
615 | MSR_TYPE_W); | |
616 | } | |
55d2375e SC |
617 | } |
618 | ||
d69129b4 SC |
619 | /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ |
620 | nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, | |
621 | MSR_FS_BASE, MSR_TYPE_RW); | |
622 | ||
623 | nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, | |
624 | MSR_GS_BASE, MSR_TYPE_RW); | |
625 | ||
626 | nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, | |
627 | MSR_KERNEL_GS_BASE, MSR_TYPE_RW); | |
628 | ||
629 | /* | |
630 | * Checking the L0->L1 bitmap is trying to verify two things: | |
631 | * | |
632 | * 1. L0 gave a permission to L1 to actually passthrough the MSR. This | |
633 | * ensures that we do not accidentally generate an L02 MSR bitmap | |
634 | * from the L12 MSR bitmap that is too permissive. | |
635 | * 2. That L1 or L2s have actually used the MSR. This avoids | |
636 | * unnecessarily merging of the bitmap if the MSR is unused. This | |
637 | * works properly because we only update the L01 MSR bitmap lazily. | |
638 | * So even if L0 should pass L1 these MSRs, the L01 bitmap is only | |
639 | * updated to reflect this when L1 (or its L2s) actually write to | |
640 | * the MSR. | |
641 | */ | |
642 | if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL)) | |
55d2375e SC |
643 | nested_vmx_disable_intercept_for_msr( |
644 | msr_bitmap_l1, msr_bitmap_l0, | |
645 | MSR_IA32_SPEC_CTRL, | |
646 | MSR_TYPE_R | MSR_TYPE_W); | |
647 | ||
d69129b4 | 648 | if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD)) |
55d2375e SC |
649 | nested_vmx_disable_intercept_for_msr( |
650 | msr_bitmap_l1, msr_bitmap_l0, | |
651 | MSR_IA32_PRED_CMD, | |
652 | MSR_TYPE_W); | |
653 | ||
31f0b6c4 | 654 | kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); |
55d2375e SC |
655 | |
656 | return true; | |
657 | } | |
658 | ||
659 | static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu, | |
660 | struct vmcs12 *vmcs12) | |
661 | { | |
88925305 | 662 | struct kvm_host_map map; |
55d2375e | 663 | struct vmcs12 *shadow; |
55d2375e SC |
664 | |
665 | if (!nested_cpu_has_shadow_vmcs(vmcs12) || | |
666 | vmcs12->vmcs_link_pointer == -1ull) | |
667 | return; | |
668 | ||
669 | shadow = get_shadow_vmcs12(vcpu); | |
55d2375e | 670 | |
88925305 KA |
671 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) |
672 | return; | |
55d2375e | 673 | |
88925305 KA |
674 | memcpy(shadow, map.hva, VMCS12_SIZE); |
675 | kvm_vcpu_unmap(vcpu, &map, false); | |
55d2375e SC |
676 | } |
677 | ||
678 | static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu, | |
679 | struct vmcs12 *vmcs12) | |
680 | { | |
681 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
682 | ||
683 | if (!nested_cpu_has_shadow_vmcs(vmcs12) || | |
684 | vmcs12->vmcs_link_pointer == -1ull) | |
685 | return; | |
686 | ||
687 | kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, | |
688 | get_shadow_vmcs12(vcpu), VMCS12_SIZE); | |
689 | } | |
690 | ||
691 | /* | |
692 | * In nested virtualization, check if L1 has set | |
693 | * VM_EXIT_ACK_INTR_ON_EXIT | |
694 | */ | |
695 | static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu) | |
696 | { | |
697 | return get_vmcs12(vcpu)->vm_exit_controls & | |
698 | VM_EXIT_ACK_INTR_ON_EXIT; | |
699 | } | |
700 | ||
701 | static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu) | |
702 | { | |
703 | return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu)); | |
704 | } | |
705 | ||
706 | static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu, | |
707 | struct vmcs12 *vmcs12) | |
708 | { | |
709 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && | |
5497b955 | 710 | CC(!page_address_valid(vcpu, vmcs12->apic_access_addr))) |
55d2375e SC |
711 | return -EINVAL; |
712 | else | |
713 | return 0; | |
714 | } | |
715 | ||
716 | static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | |
717 | struct vmcs12 *vmcs12) | |
718 | { | |
719 | if (!nested_cpu_has_virt_x2apic_mode(vmcs12) && | |
720 | !nested_cpu_has_apic_reg_virt(vmcs12) && | |
721 | !nested_cpu_has_vid(vmcs12) && | |
722 | !nested_cpu_has_posted_intr(vmcs12)) | |
723 | return 0; | |
724 | ||
725 | /* | |
726 | * If virtualize x2apic mode is enabled, | |
727 | * virtualize apic access must be disabled. | |
728 | */ | |
5497b955 SC |
729 | if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) && |
730 | nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))) | |
55d2375e SC |
731 | return -EINVAL; |
732 | ||
733 | /* | |
734 | * If virtual interrupt delivery is enabled, | |
735 | * we must exit on external interrupts. | |
736 | */ | |
5497b955 | 737 | if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu))) |
55d2375e SC |
738 | return -EINVAL; |
739 | ||
740 | /* | |
741 | * bits 15:8 should be zero in posted_intr_nv, | |
742 | * the descriptor address has been already checked | |
743 | * in nested_get_vmcs12_pages. | |
744 | * | |
745 | * bits 5:0 of posted_intr_desc_addr should be zero. | |
746 | */ | |
747 | if (nested_cpu_has_posted_intr(vmcs12) && | |
5497b955 SC |
748 | (CC(!nested_cpu_has_vid(vmcs12)) || |
749 | CC(!nested_exit_intr_ack_set(vcpu)) || | |
750 | CC((vmcs12->posted_intr_nv & 0xff00)) || | |
751 | CC((vmcs12->posted_intr_desc_addr & 0x3f)) || | |
752 | CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))) | |
55d2375e SC |
753 | return -EINVAL; |
754 | ||
755 | /* tpr shadow is needed by all apicv features. */ | |
5497b955 | 756 | if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))) |
55d2375e SC |
757 | return -EINVAL; |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
762 | static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, | |
f9b245e1 | 763 | u32 count, u64 addr) |
55d2375e | 764 | { |
55d2375e | 765 | int maxphyaddr; |
55d2375e | 766 | |
55d2375e SC |
767 | if (count == 0) |
768 | return 0; | |
769 | maxphyaddr = cpuid_maxphyaddr(vcpu); | |
770 | if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr || | |
f9b245e1 | 771 | (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) |
55d2375e | 772 | return -EINVAL; |
f9b245e1 | 773 | |
55d2375e SC |
774 | return 0; |
775 | } | |
776 | ||
61446ba7 KS |
777 | static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu, |
778 | struct vmcs12 *vmcs12) | |
55d2375e | 779 | { |
5497b955 SC |
780 | if (CC(nested_vmx_check_msr_switch(vcpu, |
781 | vmcs12->vm_exit_msr_load_count, | |
782 | vmcs12->vm_exit_msr_load_addr)) || | |
783 | CC(nested_vmx_check_msr_switch(vcpu, | |
784 | vmcs12->vm_exit_msr_store_count, | |
785 | vmcs12->vm_exit_msr_store_addr))) | |
55d2375e | 786 | return -EINVAL; |
f9b245e1 | 787 | |
55d2375e SC |
788 | return 0; |
789 | } | |
790 | ||
5fbf9634 KS |
791 | static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu, |
792 | struct vmcs12 *vmcs12) | |
61446ba7 | 793 | { |
5497b955 SC |
794 | if (CC(nested_vmx_check_msr_switch(vcpu, |
795 | vmcs12->vm_entry_msr_load_count, | |
796 | vmcs12->vm_entry_msr_load_addr))) | |
61446ba7 KS |
797 | return -EINVAL; |
798 | ||
799 | return 0; | |
800 | } | |
801 | ||
55d2375e SC |
802 | static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu, |
803 | struct vmcs12 *vmcs12) | |
804 | { | |
805 | if (!nested_cpu_has_pml(vmcs12)) | |
806 | return 0; | |
807 | ||
5497b955 SC |
808 | if (CC(!nested_cpu_has_ept(vmcs12)) || |
809 | CC(!page_address_valid(vcpu, vmcs12->pml_address))) | |
55d2375e SC |
810 | return -EINVAL; |
811 | ||
812 | return 0; | |
813 | } | |
814 | ||
815 | static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu, | |
816 | struct vmcs12 *vmcs12) | |
817 | { | |
5497b955 SC |
818 | if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) && |
819 | !nested_cpu_has_ept(vmcs12))) | |
55d2375e SC |
820 | return -EINVAL; |
821 | return 0; | |
822 | } | |
823 | ||
824 | static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu, | |
825 | struct vmcs12 *vmcs12) | |
826 | { | |
5497b955 SC |
827 | if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) && |
828 | !nested_cpu_has_ept(vmcs12))) | |
55d2375e SC |
829 | return -EINVAL; |
830 | return 0; | |
831 | } | |
832 | ||
833 | static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu, | |
834 | struct vmcs12 *vmcs12) | |
835 | { | |
836 | if (!nested_cpu_has_shadow_vmcs(vmcs12)) | |
837 | return 0; | |
838 | ||
5497b955 SC |
839 | if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) || |
840 | CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))) | |
55d2375e SC |
841 | return -EINVAL; |
842 | ||
843 | return 0; | |
844 | } | |
845 | ||
846 | static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu, | |
847 | struct vmx_msr_entry *e) | |
848 | { | |
849 | /* x2APIC MSR accesses are not allowed */ | |
5497b955 | 850 | if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) |
55d2375e | 851 | return -EINVAL; |
5497b955 SC |
852 | if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */ |
853 | CC(e->index == MSR_IA32_UCODE_REV)) | |
55d2375e | 854 | return -EINVAL; |
5497b955 | 855 | if (CC(e->reserved != 0)) |
55d2375e SC |
856 | return -EINVAL; |
857 | return 0; | |
858 | } | |
859 | ||
860 | static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu, | |
861 | struct vmx_msr_entry *e) | |
862 | { | |
5497b955 SC |
863 | if (CC(e->index == MSR_FS_BASE) || |
864 | CC(e->index == MSR_GS_BASE) || | |
865 | CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */ | |
55d2375e SC |
866 | nested_vmx_msr_check_common(vcpu, e)) |
867 | return -EINVAL; | |
868 | return 0; | |
869 | } | |
870 | ||
871 | static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu, | |
872 | struct vmx_msr_entry *e) | |
873 | { | |
5497b955 | 874 | if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */ |
55d2375e SC |
875 | nested_vmx_msr_check_common(vcpu, e)) |
876 | return -EINVAL; | |
877 | return 0; | |
878 | } | |
879 | ||
f0b5105a MO |
880 | static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu) |
881 | { | |
882 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
883 | u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, | |
884 | vmx->nested.msrs.misc_high); | |
885 | ||
886 | return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER; | |
887 | } | |
888 | ||
55d2375e SC |
889 | /* |
890 | * Load guest's/host's msr at nested entry/exit. | |
891 | * return 0 for success, entry index for failure. | |
f0b5105a MO |
892 | * |
893 | * One of the failure modes for MSR load/store is when a list exceeds the | |
894 | * virtual hardware's capacity. To maintain compatibility with hardware inasmuch | |
895 | * as possible, process all valid entries before failing rather than precheck | |
896 | * for a capacity violation. | |
55d2375e SC |
897 | */ |
898 | static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) | |
899 | { | |
900 | u32 i; | |
901 | struct vmx_msr_entry e; | |
f0b5105a | 902 | u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); |
55d2375e | 903 | |
55d2375e | 904 | for (i = 0; i < count; i++) { |
f0b5105a MO |
905 | if (unlikely(i >= max_msr_list_size)) |
906 | goto fail; | |
907 | ||
55d2375e SC |
908 | if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e), |
909 | &e, sizeof(e))) { | |
910 | pr_debug_ratelimited( | |
911 | "%s cannot read MSR entry (%u, 0x%08llx)\n", | |
912 | __func__, i, gpa + i * sizeof(e)); | |
913 | goto fail; | |
914 | } | |
915 | if (nested_vmx_load_msr_check(vcpu, &e)) { | |
916 | pr_debug_ratelimited( | |
917 | "%s check failed (%u, 0x%x, 0x%x)\n", | |
918 | __func__, i, e.index, e.reserved); | |
919 | goto fail; | |
920 | } | |
f20935d8 | 921 | if (kvm_set_msr(vcpu, e.index, e.value)) { |
55d2375e SC |
922 | pr_debug_ratelimited( |
923 | "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", | |
924 | __func__, i, e.index, e.value); | |
925 | goto fail; | |
926 | } | |
927 | } | |
928 | return 0; | |
929 | fail: | |
930 | return i + 1; | |
931 | } | |
932 | ||
662f1d1d AL |
933 | static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu, |
934 | u32 msr_index, | |
935 | u64 *data) | |
936 | { | |
937 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
938 | ||
939 | /* | |
940 | * If the L0 hypervisor stored a more accurate value for the TSC that | |
941 | * does not include the time taken for emulation of the L2->L1 | |
942 | * VM-exit in L0, use the more accurate value. | |
943 | */ | |
944 | if (msr_index == MSR_IA32_TSC) { | |
945 | int index = vmx_find_msr_index(&vmx->msr_autostore.guest, | |
946 | MSR_IA32_TSC); | |
947 | ||
948 | if (index >= 0) { | |
949 | u64 val = vmx->msr_autostore.guest.val[index].value; | |
950 | ||
951 | *data = kvm_read_l1_tsc(vcpu, val); | |
952 | return true; | |
953 | } | |
954 | } | |
955 | ||
956 | if (kvm_get_msr(vcpu, msr_index, data)) { | |
957 | pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, | |
958 | msr_index); | |
959 | return false; | |
960 | } | |
961 | return true; | |
962 | } | |
963 | ||
365d3d55 AL |
964 | static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i, |
965 | struct vmx_msr_entry *e) | |
966 | { | |
967 | if (kvm_vcpu_read_guest(vcpu, | |
968 | gpa + i * sizeof(*e), | |
969 | e, 2 * sizeof(u32))) { | |
970 | pr_debug_ratelimited( | |
971 | "%s cannot read MSR entry (%u, 0x%08llx)\n", | |
972 | __func__, i, gpa + i * sizeof(*e)); | |
973 | return false; | |
974 | } | |
975 | if (nested_vmx_store_msr_check(vcpu, e)) { | |
976 | pr_debug_ratelimited( | |
977 | "%s check failed (%u, 0x%x, 0x%x)\n", | |
978 | __func__, i, e->index, e->reserved); | |
979 | return false; | |
980 | } | |
981 | return true; | |
982 | } | |
983 | ||
55d2375e SC |
984 | static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count) |
985 | { | |
f20935d8 | 986 | u64 data; |
55d2375e SC |
987 | u32 i; |
988 | struct vmx_msr_entry e; | |
f0b5105a | 989 | u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu); |
55d2375e SC |
990 | |
991 | for (i = 0; i < count; i++) { | |
f0b5105a MO |
992 | if (unlikely(i >= max_msr_list_size)) |
993 | return -EINVAL; | |
994 | ||
365d3d55 | 995 | if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) |
55d2375e | 996 | return -EINVAL; |
365d3d55 | 997 | |
662f1d1d | 998 | if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data)) |
55d2375e | 999 | return -EINVAL; |
662f1d1d | 1000 | |
55d2375e SC |
1001 | if (kvm_vcpu_write_guest(vcpu, |
1002 | gpa + i * sizeof(e) + | |
1003 | offsetof(struct vmx_msr_entry, value), | |
f20935d8 | 1004 | &data, sizeof(data))) { |
55d2375e SC |
1005 | pr_debug_ratelimited( |
1006 | "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", | |
f20935d8 | 1007 | __func__, i, e.index, data); |
55d2375e SC |
1008 | return -EINVAL; |
1009 | } | |
1010 | } | |
1011 | return 0; | |
1012 | } | |
1013 | ||
662f1d1d AL |
1014 | static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index) |
1015 | { | |
1016 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
1017 | u32 count = vmcs12->vm_exit_msr_store_count; | |
1018 | u64 gpa = vmcs12->vm_exit_msr_store_addr; | |
1019 | struct vmx_msr_entry e; | |
1020 | u32 i; | |
1021 | ||
1022 | for (i = 0; i < count; i++) { | |
1023 | if (!read_and_check_msr_entry(vcpu, gpa, i, &e)) | |
1024 | return false; | |
1025 | ||
1026 | if (e.index == msr_index) | |
1027 | return true; | |
1028 | } | |
1029 | return false; | |
1030 | } | |
1031 | ||
1032 | static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu, | |
1033 | u32 msr_index) | |
1034 | { | |
1035 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1036 | struct vmx_msrs *autostore = &vmx->msr_autostore.guest; | |
1037 | bool in_vmcs12_store_list; | |
1038 | int msr_autostore_index; | |
1039 | bool in_autostore_list; | |
1040 | int last; | |
1041 | ||
1042 | msr_autostore_index = vmx_find_msr_index(autostore, msr_index); | |
1043 | in_autostore_list = msr_autostore_index >= 0; | |
1044 | in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index); | |
1045 | ||
1046 | if (in_vmcs12_store_list && !in_autostore_list) { | |
1047 | if (autostore->nr == NR_LOADSTORE_MSRS) { | |
1048 | /* | |
1049 | * Emulated VMEntry does not fail here. Instead a less | |
1050 | * accurate value will be returned by | |
1051 | * nested_vmx_get_vmexit_msr_value() using kvm_get_msr() | |
1052 | * instead of reading the value from the vmcs02 VMExit | |
1053 | * MSR-store area. | |
1054 | */ | |
1055 | pr_warn_ratelimited( | |
1056 | "Not enough msr entries in msr_autostore. Can't add msr %x\n", | |
1057 | msr_index); | |
1058 | return; | |
1059 | } | |
1060 | last = autostore->nr++; | |
1061 | autostore->val[last].index = msr_index; | |
1062 | } else if (!in_vmcs12_store_list && in_autostore_list) { | |
1063 | last = --autostore->nr; | |
1064 | autostore->val[msr_autostore_index] = autostore->val[last]; | |
1065 | } | |
1066 | } | |
1067 | ||
55d2375e SC |
1068 | static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val) |
1069 | { | |
1070 | unsigned long invalid_mask; | |
1071 | ||
1072 | invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); | |
1073 | return (val & invalid_mask) == 0; | |
1074 | } | |
1075 | ||
1076 | /* | |
ea79a750 SC |
1077 | * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are |
1078 | * emulating VM-Entry into a guest with EPT enabled. On failure, the expected | |
1079 | * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to | |
1080 | * @entry_failure_code. | |
55d2375e SC |
1081 | */ |
1082 | static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, | |
1083 | u32 *entry_failure_code) | |
1084 | { | |
1085 | if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) { | |
5497b955 | 1086 | if (CC(!nested_cr3_valid(vcpu, cr3))) { |
55d2375e | 1087 | *entry_failure_code = ENTRY_FAIL_DEFAULT; |
c80add0f | 1088 | return -EINVAL; |
55d2375e SC |
1089 | } |
1090 | ||
1091 | /* | |
1092 | * If PAE paging and EPT are both on, CR3 is not used by the CPU and | |
1093 | * must not be dereferenced. | |
1094 | */ | |
bf03d4f9 | 1095 | if (is_pae_paging(vcpu) && !nested_ept) { |
5497b955 | 1096 | if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) { |
55d2375e | 1097 | *entry_failure_code = ENTRY_FAIL_PDPTE; |
c80add0f | 1098 | return -EINVAL; |
55d2375e SC |
1099 | } |
1100 | } | |
1101 | } | |
1102 | ||
1103 | if (!nested_ept) | |
1104 | kvm_mmu_new_cr3(vcpu, cr3, false); | |
1105 | ||
1106 | vcpu->arch.cr3 = cr3; | |
cb3c1e2f | 1107 | kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); |
55d2375e SC |
1108 | |
1109 | kvm_init_mmu(vcpu, false); | |
1110 | ||
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | /* | |
1115 | * Returns if KVM is able to config CPU to tag TLB entries | |
1116 | * populated by L2 differently than TLB entries populated | |
1117 | * by L1. | |
1118 | * | |
992edeae LA |
1119 | * If L0 uses EPT, L1 and L2 run with different EPTP because |
1120 | * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries | |
1121 | * are tagged with different EPTP. | |
55d2375e SC |
1122 | * |
1123 | * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged | |
1124 | * with different VPID (L1 entries are tagged with vmx->vpid | |
1125 | * while L2 entries are tagged with vmx->nested.vpid02). | |
1126 | */ | |
1127 | static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu) | |
1128 | { | |
1129 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
1130 | ||
992edeae | 1131 | return enable_ept || |
55d2375e SC |
1132 | (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); |
1133 | } | |
1134 | ||
1135 | static u16 nested_get_vpid02(struct kvm_vcpu *vcpu) | |
1136 | { | |
1137 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1138 | ||
1139 | return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid; | |
1140 | } | |
1141 | ||
55d2375e SC |
1142 | static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask) |
1143 | { | |
1144 | superset &= mask; | |
1145 | subset &= mask; | |
1146 | ||
1147 | return (superset | subset) == superset; | |
1148 | } | |
1149 | ||
1150 | static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) | |
1151 | { | |
1152 | const u64 feature_and_reserved = | |
1153 | /* feature (except bit 48; see below) */ | |
1154 | BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) | | |
1155 | /* reserved */ | |
1156 | BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56); | |
1157 | u64 vmx_basic = vmx->nested.msrs.basic; | |
1158 | ||
1159 | if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved)) | |
1160 | return -EINVAL; | |
1161 | ||
1162 | /* | |
1163 | * KVM does not emulate a version of VMX that constrains physical | |
1164 | * addresses of VMX structures (e.g. VMCS) to 32-bits. | |
1165 | */ | |
1166 | if (data & BIT_ULL(48)) | |
1167 | return -EINVAL; | |
1168 | ||
1169 | if (vmx_basic_vmcs_revision_id(vmx_basic) != | |
1170 | vmx_basic_vmcs_revision_id(data)) | |
1171 | return -EINVAL; | |
1172 | ||
1173 | if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data)) | |
1174 | return -EINVAL; | |
1175 | ||
1176 | vmx->nested.msrs.basic = data; | |
1177 | return 0; | |
1178 | } | |
1179 | ||
1180 | static int | |
1181 | vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) | |
1182 | { | |
1183 | u64 supported; | |
1184 | u32 *lowp, *highp; | |
1185 | ||
1186 | switch (msr_index) { | |
1187 | case MSR_IA32_VMX_TRUE_PINBASED_CTLS: | |
1188 | lowp = &vmx->nested.msrs.pinbased_ctls_low; | |
1189 | highp = &vmx->nested.msrs.pinbased_ctls_high; | |
1190 | break; | |
1191 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: | |
1192 | lowp = &vmx->nested.msrs.procbased_ctls_low; | |
1193 | highp = &vmx->nested.msrs.procbased_ctls_high; | |
1194 | break; | |
1195 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: | |
1196 | lowp = &vmx->nested.msrs.exit_ctls_low; | |
1197 | highp = &vmx->nested.msrs.exit_ctls_high; | |
1198 | break; | |
1199 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: | |
1200 | lowp = &vmx->nested.msrs.entry_ctls_low; | |
1201 | highp = &vmx->nested.msrs.entry_ctls_high; | |
1202 | break; | |
1203 | case MSR_IA32_VMX_PROCBASED_CTLS2: | |
1204 | lowp = &vmx->nested.msrs.secondary_ctls_low; | |
1205 | highp = &vmx->nested.msrs.secondary_ctls_high; | |
1206 | break; | |
1207 | default: | |
1208 | BUG(); | |
1209 | } | |
1210 | ||
1211 | supported = vmx_control_msr(*lowp, *highp); | |
1212 | ||
1213 | /* Check must-be-1 bits are still 1. */ | |
1214 | if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) | |
1215 | return -EINVAL; | |
1216 | ||
1217 | /* Check must-be-0 bits are still 0. */ | |
1218 | if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32))) | |
1219 | return -EINVAL; | |
1220 | ||
1221 | *lowp = data; | |
1222 | *highp = data >> 32; | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) | |
1227 | { | |
1228 | const u64 feature_and_reserved_bits = | |
1229 | /* feature */ | |
1230 | BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) | | |
1231 | BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) | | |
1232 | /* reserved */ | |
1233 | GENMASK_ULL(13, 9) | BIT_ULL(31); | |
1234 | u64 vmx_misc; | |
1235 | ||
1236 | vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, | |
1237 | vmx->nested.msrs.misc_high); | |
1238 | ||
1239 | if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits)) | |
1240 | return -EINVAL; | |
1241 | ||
1242 | if ((vmx->nested.msrs.pinbased_ctls_high & | |
1243 | PIN_BASED_VMX_PREEMPTION_TIMER) && | |
1244 | vmx_misc_preemption_timer_rate(data) != | |
1245 | vmx_misc_preemption_timer_rate(vmx_misc)) | |
1246 | return -EINVAL; | |
1247 | ||
1248 | if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc)) | |
1249 | return -EINVAL; | |
1250 | ||
1251 | if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc)) | |
1252 | return -EINVAL; | |
1253 | ||
1254 | if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc)) | |
1255 | return -EINVAL; | |
1256 | ||
1257 | vmx->nested.msrs.misc_low = data; | |
1258 | vmx->nested.msrs.misc_high = data >> 32; | |
1259 | ||
55d2375e SC |
1260 | return 0; |
1261 | } | |
1262 | ||
1263 | static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) | |
1264 | { | |
1265 | u64 vmx_ept_vpid_cap; | |
1266 | ||
1267 | vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, | |
1268 | vmx->nested.msrs.vpid_caps); | |
1269 | ||
1270 | /* Every bit is either reserved or a feature bit. */ | |
1271 | if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL)) | |
1272 | return -EINVAL; | |
1273 | ||
1274 | vmx->nested.msrs.ept_caps = data; | |
1275 | vmx->nested.msrs.vpid_caps = data >> 32; | |
1276 | return 0; | |
1277 | } | |
1278 | ||
1279 | static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) | |
1280 | { | |
1281 | u64 *msr; | |
1282 | ||
1283 | switch (msr_index) { | |
1284 | case MSR_IA32_VMX_CR0_FIXED0: | |
1285 | msr = &vmx->nested.msrs.cr0_fixed0; | |
1286 | break; | |
1287 | case MSR_IA32_VMX_CR4_FIXED0: | |
1288 | msr = &vmx->nested.msrs.cr4_fixed0; | |
1289 | break; | |
1290 | default: | |
1291 | BUG(); | |
1292 | } | |
1293 | ||
1294 | /* | |
1295 | * 1 bits (which indicates bits which "must-be-1" during VMX operation) | |
1296 | * must be 1 in the restored value. | |
1297 | */ | |
1298 | if (!is_bitwise_subset(data, *msr, -1ULL)) | |
1299 | return -EINVAL; | |
1300 | ||
1301 | *msr = data; | |
1302 | return 0; | |
1303 | } | |
1304 | ||
1305 | /* | |
1306 | * Called when userspace is restoring VMX MSRs. | |
1307 | * | |
1308 | * Returns 0 on success, non-0 otherwise. | |
1309 | */ | |
1310 | int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |
1311 | { | |
1312 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1313 | ||
1314 | /* | |
1315 | * Don't allow changes to the VMX capability MSRs while the vCPU | |
1316 | * is in VMX operation. | |
1317 | */ | |
1318 | if (vmx->nested.vmxon) | |
1319 | return -EBUSY; | |
1320 | ||
1321 | switch (msr_index) { | |
1322 | case MSR_IA32_VMX_BASIC: | |
1323 | return vmx_restore_vmx_basic(vmx, data); | |
1324 | case MSR_IA32_VMX_PINBASED_CTLS: | |
1325 | case MSR_IA32_VMX_PROCBASED_CTLS: | |
1326 | case MSR_IA32_VMX_EXIT_CTLS: | |
1327 | case MSR_IA32_VMX_ENTRY_CTLS: | |
1328 | /* | |
1329 | * The "non-true" VMX capability MSRs are generated from the | |
1330 | * "true" MSRs, so we do not support restoring them directly. | |
1331 | * | |
1332 | * If userspace wants to emulate VMX_BASIC[55]=0, userspace | |
1333 | * should restore the "true" MSRs with the must-be-1 bits | |
1334 | * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND | |
1335 | * DEFAULT SETTINGS". | |
1336 | */ | |
1337 | return -EINVAL; | |
1338 | case MSR_IA32_VMX_TRUE_PINBASED_CTLS: | |
1339 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: | |
1340 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: | |
1341 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: | |
1342 | case MSR_IA32_VMX_PROCBASED_CTLS2: | |
1343 | return vmx_restore_control_msr(vmx, msr_index, data); | |
1344 | case MSR_IA32_VMX_MISC: | |
1345 | return vmx_restore_vmx_misc(vmx, data); | |
1346 | case MSR_IA32_VMX_CR0_FIXED0: | |
1347 | case MSR_IA32_VMX_CR4_FIXED0: | |
1348 | return vmx_restore_fixed0_msr(vmx, msr_index, data); | |
1349 | case MSR_IA32_VMX_CR0_FIXED1: | |
1350 | case MSR_IA32_VMX_CR4_FIXED1: | |
1351 | /* | |
1352 | * These MSRs are generated based on the vCPU's CPUID, so we | |
1353 | * do not support restoring them directly. | |
1354 | */ | |
1355 | return -EINVAL; | |
1356 | case MSR_IA32_VMX_EPT_VPID_CAP: | |
1357 | return vmx_restore_vmx_ept_vpid_cap(vmx, data); | |
1358 | case MSR_IA32_VMX_VMCS_ENUM: | |
1359 | vmx->nested.msrs.vmcs_enum = data; | |
1360 | return 0; | |
e8a70bd4 PB |
1361 | case MSR_IA32_VMX_VMFUNC: |
1362 | if (data & ~vmx->nested.msrs.vmfunc_controls) | |
1363 | return -EINVAL; | |
1364 | vmx->nested.msrs.vmfunc_controls = data; | |
1365 | return 0; | |
55d2375e SC |
1366 | default: |
1367 | /* | |
1368 | * The rest of the VMX capability MSRs do not support restore. | |
1369 | */ | |
1370 | return -EINVAL; | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | /* Returns 0 on success, non-0 otherwise. */ | |
1375 | int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata) | |
1376 | { | |
1377 | switch (msr_index) { | |
1378 | case MSR_IA32_VMX_BASIC: | |
1379 | *pdata = msrs->basic; | |
1380 | break; | |
1381 | case MSR_IA32_VMX_TRUE_PINBASED_CTLS: | |
1382 | case MSR_IA32_VMX_PINBASED_CTLS: | |
1383 | *pdata = vmx_control_msr( | |
1384 | msrs->pinbased_ctls_low, | |
1385 | msrs->pinbased_ctls_high); | |
1386 | if (msr_index == MSR_IA32_VMX_PINBASED_CTLS) | |
1387 | *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | |
1388 | break; | |
1389 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: | |
1390 | case MSR_IA32_VMX_PROCBASED_CTLS: | |
1391 | *pdata = vmx_control_msr( | |
1392 | msrs->procbased_ctls_low, | |
1393 | msrs->procbased_ctls_high); | |
1394 | if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS) | |
1395 | *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | |
1396 | break; | |
1397 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: | |
1398 | case MSR_IA32_VMX_EXIT_CTLS: | |
1399 | *pdata = vmx_control_msr( | |
1400 | msrs->exit_ctls_low, | |
1401 | msrs->exit_ctls_high); | |
1402 | if (msr_index == MSR_IA32_VMX_EXIT_CTLS) | |
1403 | *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; | |
1404 | break; | |
1405 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: | |
1406 | case MSR_IA32_VMX_ENTRY_CTLS: | |
1407 | *pdata = vmx_control_msr( | |
1408 | msrs->entry_ctls_low, | |
1409 | msrs->entry_ctls_high); | |
1410 | if (msr_index == MSR_IA32_VMX_ENTRY_CTLS) | |
1411 | *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; | |
1412 | break; | |
1413 | case MSR_IA32_VMX_MISC: | |
1414 | *pdata = vmx_control_msr( | |
1415 | msrs->misc_low, | |
1416 | msrs->misc_high); | |
1417 | break; | |
1418 | case MSR_IA32_VMX_CR0_FIXED0: | |
1419 | *pdata = msrs->cr0_fixed0; | |
1420 | break; | |
1421 | case MSR_IA32_VMX_CR0_FIXED1: | |
1422 | *pdata = msrs->cr0_fixed1; | |
1423 | break; | |
1424 | case MSR_IA32_VMX_CR4_FIXED0: | |
1425 | *pdata = msrs->cr4_fixed0; | |
1426 | break; | |
1427 | case MSR_IA32_VMX_CR4_FIXED1: | |
1428 | *pdata = msrs->cr4_fixed1; | |
1429 | break; | |
1430 | case MSR_IA32_VMX_VMCS_ENUM: | |
1431 | *pdata = msrs->vmcs_enum; | |
1432 | break; | |
1433 | case MSR_IA32_VMX_PROCBASED_CTLS2: | |
1434 | *pdata = vmx_control_msr( | |
1435 | msrs->secondary_ctls_low, | |
1436 | msrs->secondary_ctls_high); | |
1437 | break; | |
1438 | case MSR_IA32_VMX_EPT_VPID_CAP: | |
1439 | *pdata = msrs->ept_caps | | |
1440 | ((u64)msrs->vpid_caps << 32); | |
1441 | break; | |
1442 | case MSR_IA32_VMX_VMFUNC: | |
1443 | *pdata = msrs->vmfunc_controls; | |
1444 | break; | |
1445 | default: | |
1446 | return 1; | |
1447 | } | |
1448 | ||
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | /* | |
fadcead0 SC |
1453 | * Copy the writable VMCS shadow fields back to the VMCS12, in case they have |
1454 | * been modified by the L1 guest. Note, "writable" in this context means | |
1455 | * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of | |
1456 | * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only" | |
1457 | * VM-exit information fields (which are actually writable if the vCPU is | |
1458 | * configured to support "VMWRITE to any supported field in the VMCS"). | |
55d2375e SC |
1459 | */ |
1460 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) | |
1461 | { | |
55d2375e | 1462 | struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; |
fadcead0 | 1463 | struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); |
1c6f0b47 SC |
1464 | struct shadow_vmcs_field field; |
1465 | unsigned long val; | |
fadcead0 | 1466 | int i; |
55d2375e | 1467 | |
88dddc11 PB |
1468 | if (WARN_ON(!shadow_vmcs)) |
1469 | return; | |
1470 | ||
55d2375e SC |
1471 | preempt_disable(); |
1472 | ||
1473 | vmcs_load(shadow_vmcs); | |
1474 | ||
fadcead0 SC |
1475 | for (i = 0; i < max_shadow_read_write_fields; i++) { |
1476 | field = shadow_read_write_fields[i]; | |
1c6f0b47 SC |
1477 | val = __vmcs_readl(field.encoding); |
1478 | vmcs12_write_any(vmcs12, field.encoding, field.offset, val); | |
55d2375e SC |
1479 | } |
1480 | ||
1481 | vmcs_clear(shadow_vmcs); | |
1482 | vmcs_load(vmx->loaded_vmcs->vmcs); | |
1483 | ||
1484 | preempt_enable(); | |
1485 | } | |
1486 | ||
1487 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) | |
1488 | { | |
1c6f0b47 | 1489 | const struct shadow_vmcs_field *fields[] = { |
55d2375e SC |
1490 | shadow_read_write_fields, |
1491 | shadow_read_only_fields | |
1492 | }; | |
1493 | const int max_fields[] = { | |
1494 | max_shadow_read_write_fields, | |
1495 | max_shadow_read_only_fields | |
1496 | }; | |
55d2375e | 1497 | struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; |
1c6f0b47 SC |
1498 | struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); |
1499 | struct shadow_vmcs_field field; | |
1500 | unsigned long val; | |
1501 | int i, q; | |
55d2375e | 1502 | |
88dddc11 PB |
1503 | if (WARN_ON(!shadow_vmcs)) |
1504 | return; | |
1505 | ||
55d2375e SC |
1506 | vmcs_load(shadow_vmcs); |
1507 | ||
1508 | for (q = 0; q < ARRAY_SIZE(fields); q++) { | |
1509 | for (i = 0; i < max_fields[q]; i++) { | |
1510 | field = fields[q][i]; | |
1c6f0b47 SC |
1511 | val = vmcs12_read_any(vmcs12, field.encoding, |
1512 | field.offset); | |
1513 | __vmcs_writel(field.encoding, val); | |
55d2375e SC |
1514 | } |
1515 | } | |
1516 | ||
1517 | vmcs_clear(shadow_vmcs); | |
1518 | vmcs_load(vmx->loaded_vmcs->vmcs); | |
1519 | } | |
1520 | ||
1521 | static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) | |
1522 | { | |
1523 | struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; | |
1524 | struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; | |
1525 | ||
1526 | /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ | |
1527 | vmcs12->tpr_threshold = evmcs->tpr_threshold; | |
1528 | vmcs12->guest_rip = evmcs->guest_rip; | |
1529 | ||
1530 | if (unlikely(!(evmcs->hv_clean_fields & | |
1531 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) { | |
1532 | vmcs12->guest_rsp = evmcs->guest_rsp; | |
1533 | vmcs12->guest_rflags = evmcs->guest_rflags; | |
1534 | vmcs12->guest_interruptibility_info = | |
1535 | evmcs->guest_interruptibility_info; | |
1536 | } | |
1537 | ||
1538 | if (unlikely(!(evmcs->hv_clean_fields & | |
1539 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) { | |
1540 | vmcs12->cpu_based_vm_exec_control = | |
1541 | evmcs->cpu_based_vm_exec_control; | |
1542 | } | |
1543 | ||
1544 | if (unlikely(!(evmcs->hv_clean_fields & | |
f9bc5227 | 1545 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) { |
55d2375e SC |
1546 | vmcs12->exception_bitmap = evmcs->exception_bitmap; |
1547 | } | |
1548 | ||
1549 | if (unlikely(!(evmcs->hv_clean_fields & | |
1550 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) { | |
1551 | vmcs12->vm_entry_controls = evmcs->vm_entry_controls; | |
1552 | } | |
1553 | ||
1554 | if (unlikely(!(evmcs->hv_clean_fields & | |
1555 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) { | |
1556 | vmcs12->vm_entry_intr_info_field = | |
1557 | evmcs->vm_entry_intr_info_field; | |
1558 | vmcs12->vm_entry_exception_error_code = | |
1559 | evmcs->vm_entry_exception_error_code; | |
1560 | vmcs12->vm_entry_instruction_len = | |
1561 | evmcs->vm_entry_instruction_len; | |
1562 | } | |
1563 | ||
1564 | if (unlikely(!(evmcs->hv_clean_fields & | |
1565 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) { | |
1566 | vmcs12->host_ia32_pat = evmcs->host_ia32_pat; | |
1567 | vmcs12->host_ia32_efer = evmcs->host_ia32_efer; | |
1568 | vmcs12->host_cr0 = evmcs->host_cr0; | |
1569 | vmcs12->host_cr3 = evmcs->host_cr3; | |
1570 | vmcs12->host_cr4 = evmcs->host_cr4; | |
1571 | vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp; | |
1572 | vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip; | |
1573 | vmcs12->host_rip = evmcs->host_rip; | |
1574 | vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs; | |
1575 | vmcs12->host_es_selector = evmcs->host_es_selector; | |
1576 | vmcs12->host_cs_selector = evmcs->host_cs_selector; | |
1577 | vmcs12->host_ss_selector = evmcs->host_ss_selector; | |
1578 | vmcs12->host_ds_selector = evmcs->host_ds_selector; | |
1579 | vmcs12->host_fs_selector = evmcs->host_fs_selector; | |
1580 | vmcs12->host_gs_selector = evmcs->host_gs_selector; | |
1581 | vmcs12->host_tr_selector = evmcs->host_tr_selector; | |
1582 | } | |
1583 | ||
1584 | if (unlikely(!(evmcs->hv_clean_fields & | |
f9bc5227 | 1585 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) { |
55d2375e SC |
1586 | vmcs12->pin_based_vm_exec_control = |
1587 | evmcs->pin_based_vm_exec_control; | |
1588 | vmcs12->vm_exit_controls = evmcs->vm_exit_controls; | |
1589 | vmcs12->secondary_vm_exec_control = | |
1590 | evmcs->secondary_vm_exec_control; | |
1591 | } | |
1592 | ||
1593 | if (unlikely(!(evmcs->hv_clean_fields & | |
1594 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) { | |
1595 | vmcs12->io_bitmap_a = evmcs->io_bitmap_a; | |
1596 | vmcs12->io_bitmap_b = evmcs->io_bitmap_b; | |
1597 | } | |
1598 | ||
1599 | if (unlikely(!(evmcs->hv_clean_fields & | |
1600 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) { | |
1601 | vmcs12->msr_bitmap = evmcs->msr_bitmap; | |
1602 | } | |
1603 | ||
1604 | if (unlikely(!(evmcs->hv_clean_fields & | |
1605 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) { | |
1606 | vmcs12->guest_es_base = evmcs->guest_es_base; | |
1607 | vmcs12->guest_cs_base = evmcs->guest_cs_base; | |
1608 | vmcs12->guest_ss_base = evmcs->guest_ss_base; | |
1609 | vmcs12->guest_ds_base = evmcs->guest_ds_base; | |
1610 | vmcs12->guest_fs_base = evmcs->guest_fs_base; | |
1611 | vmcs12->guest_gs_base = evmcs->guest_gs_base; | |
1612 | vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base; | |
1613 | vmcs12->guest_tr_base = evmcs->guest_tr_base; | |
1614 | vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base; | |
1615 | vmcs12->guest_idtr_base = evmcs->guest_idtr_base; | |
1616 | vmcs12->guest_es_limit = evmcs->guest_es_limit; | |
1617 | vmcs12->guest_cs_limit = evmcs->guest_cs_limit; | |
1618 | vmcs12->guest_ss_limit = evmcs->guest_ss_limit; | |
1619 | vmcs12->guest_ds_limit = evmcs->guest_ds_limit; | |
1620 | vmcs12->guest_fs_limit = evmcs->guest_fs_limit; | |
1621 | vmcs12->guest_gs_limit = evmcs->guest_gs_limit; | |
1622 | vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit; | |
1623 | vmcs12->guest_tr_limit = evmcs->guest_tr_limit; | |
1624 | vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit; | |
1625 | vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit; | |
1626 | vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes; | |
1627 | vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes; | |
1628 | vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes; | |
1629 | vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes; | |
1630 | vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes; | |
1631 | vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes; | |
1632 | vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes; | |
1633 | vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes; | |
1634 | vmcs12->guest_es_selector = evmcs->guest_es_selector; | |
1635 | vmcs12->guest_cs_selector = evmcs->guest_cs_selector; | |
1636 | vmcs12->guest_ss_selector = evmcs->guest_ss_selector; | |
1637 | vmcs12->guest_ds_selector = evmcs->guest_ds_selector; | |
1638 | vmcs12->guest_fs_selector = evmcs->guest_fs_selector; | |
1639 | vmcs12->guest_gs_selector = evmcs->guest_gs_selector; | |
1640 | vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector; | |
1641 | vmcs12->guest_tr_selector = evmcs->guest_tr_selector; | |
1642 | } | |
1643 | ||
1644 | if (unlikely(!(evmcs->hv_clean_fields & | |
1645 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) { | |
1646 | vmcs12->tsc_offset = evmcs->tsc_offset; | |
1647 | vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr; | |
1648 | vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap; | |
1649 | } | |
1650 | ||
1651 | if (unlikely(!(evmcs->hv_clean_fields & | |
1652 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) { | |
1653 | vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask; | |
1654 | vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask; | |
1655 | vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow; | |
1656 | vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow; | |
1657 | vmcs12->guest_cr0 = evmcs->guest_cr0; | |
1658 | vmcs12->guest_cr3 = evmcs->guest_cr3; | |
1659 | vmcs12->guest_cr4 = evmcs->guest_cr4; | |
1660 | vmcs12->guest_dr7 = evmcs->guest_dr7; | |
1661 | } | |
1662 | ||
1663 | if (unlikely(!(evmcs->hv_clean_fields & | |
1664 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) { | |
1665 | vmcs12->host_fs_base = evmcs->host_fs_base; | |
1666 | vmcs12->host_gs_base = evmcs->host_gs_base; | |
1667 | vmcs12->host_tr_base = evmcs->host_tr_base; | |
1668 | vmcs12->host_gdtr_base = evmcs->host_gdtr_base; | |
1669 | vmcs12->host_idtr_base = evmcs->host_idtr_base; | |
1670 | vmcs12->host_rsp = evmcs->host_rsp; | |
1671 | } | |
1672 | ||
1673 | if (unlikely(!(evmcs->hv_clean_fields & | |
1674 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) { | |
1675 | vmcs12->ept_pointer = evmcs->ept_pointer; | |
1676 | vmcs12->virtual_processor_id = evmcs->virtual_processor_id; | |
1677 | } | |
1678 | ||
1679 | if (unlikely(!(evmcs->hv_clean_fields & | |
1680 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) { | |
1681 | vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer; | |
1682 | vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl; | |
1683 | vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat; | |
1684 | vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer; | |
1685 | vmcs12->guest_pdptr0 = evmcs->guest_pdptr0; | |
1686 | vmcs12->guest_pdptr1 = evmcs->guest_pdptr1; | |
1687 | vmcs12->guest_pdptr2 = evmcs->guest_pdptr2; | |
1688 | vmcs12->guest_pdptr3 = evmcs->guest_pdptr3; | |
1689 | vmcs12->guest_pending_dbg_exceptions = | |
1690 | evmcs->guest_pending_dbg_exceptions; | |
1691 | vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp; | |
1692 | vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip; | |
1693 | vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs; | |
1694 | vmcs12->guest_activity_state = evmcs->guest_activity_state; | |
1695 | vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs; | |
1696 | } | |
1697 | ||
1698 | /* | |
1699 | * Not used? | |
1700 | * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr; | |
1701 | * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr; | |
1702 | * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr; | |
1703 | * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0; | |
1704 | * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1; | |
1705 | * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2; | |
1706 | * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3; | |
1707 | * vmcs12->page_fault_error_code_mask = | |
1708 | * evmcs->page_fault_error_code_mask; | |
1709 | * vmcs12->page_fault_error_code_match = | |
1710 | * evmcs->page_fault_error_code_match; | |
1711 | * vmcs12->cr3_target_count = evmcs->cr3_target_count; | |
1712 | * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count; | |
1713 | * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count; | |
1714 | * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count; | |
1715 | */ | |
1716 | ||
1717 | /* | |
1718 | * Read only fields: | |
1719 | * vmcs12->guest_physical_address = evmcs->guest_physical_address; | |
1720 | * vmcs12->vm_instruction_error = evmcs->vm_instruction_error; | |
1721 | * vmcs12->vm_exit_reason = evmcs->vm_exit_reason; | |
1722 | * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info; | |
1723 | * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code; | |
1724 | * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field; | |
1725 | * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code; | |
1726 | * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len; | |
1727 | * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info; | |
1728 | * vmcs12->exit_qualification = evmcs->exit_qualification; | |
1729 | * vmcs12->guest_linear_address = evmcs->guest_linear_address; | |
1730 | * | |
1731 | * Not present in struct vmcs12: | |
1732 | * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx; | |
1733 | * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi; | |
1734 | * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi; | |
1735 | * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip; | |
1736 | */ | |
1737 | ||
1738 | return 0; | |
1739 | } | |
1740 | ||
1741 | static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) | |
1742 | { | |
1743 | struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; | |
1744 | struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; | |
1745 | ||
1746 | /* | |
1747 | * Should not be changed by KVM: | |
1748 | * | |
1749 | * evmcs->host_es_selector = vmcs12->host_es_selector; | |
1750 | * evmcs->host_cs_selector = vmcs12->host_cs_selector; | |
1751 | * evmcs->host_ss_selector = vmcs12->host_ss_selector; | |
1752 | * evmcs->host_ds_selector = vmcs12->host_ds_selector; | |
1753 | * evmcs->host_fs_selector = vmcs12->host_fs_selector; | |
1754 | * evmcs->host_gs_selector = vmcs12->host_gs_selector; | |
1755 | * evmcs->host_tr_selector = vmcs12->host_tr_selector; | |
1756 | * evmcs->host_ia32_pat = vmcs12->host_ia32_pat; | |
1757 | * evmcs->host_ia32_efer = vmcs12->host_ia32_efer; | |
1758 | * evmcs->host_cr0 = vmcs12->host_cr0; | |
1759 | * evmcs->host_cr3 = vmcs12->host_cr3; | |
1760 | * evmcs->host_cr4 = vmcs12->host_cr4; | |
1761 | * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp; | |
1762 | * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip; | |
1763 | * evmcs->host_rip = vmcs12->host_rip; | |
1764 | * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs; | |
1765 | * evmcs->host_fs_base = vmcs12->host_fs_base; | |
1766 | * evmcs->host_gs_base = vmcs12->host_gs_base; | |
1767 | * evmcs->host_tr_base = vmcs12->host_tr_base; | |
1768 | * evmcs->host_gdtr_base = vmcs12->host_gdtr_base; | |
1769 | * evmcs->host_idtr_base = vmcs12->host_idtr_base; | |
1770 | * evmcs->host_rsp = vmcs12->host_rsp; | |
3731905e | 1771 | * sync_vmcs02_to_vmcs12() doesn't read these: |
55d2375e SC |
1772 | * evmcs->io_bitmap_a = vmcs12->io_bitmap_a; |
1773 | * evmcs->io_bitmap_b = vmcs12->io_bitmap_b; | |
1774 | * evmcs->msr_bitmap = vmcs12->msr_bitmap; | |
1775 | * evmcs->ept_pointer = vmcs12->ept_pointer; | |
1776 | * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap; | |
1777 | * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr; | |
1778 | * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr; | |
1779 | * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr; | |
1780 | * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0; | |
1781 | * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1; | |
1782 | * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2; | |
1783 | * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3; | |
1784 | * evmcs->tpr_threshold = vmcs12->tpr_threshold; | |
1785 | * evmcs->virtual_processor_id = vmcs12->virtual_processor_id; | |
1786 | * evmcs->exception_bitmap = vmcs12->exception_bitmap; | |
1787 | * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer; | |
1788 | * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control; | |
1789 | * evmcs->vm_exit_controls = vmcs12->vm_exit_controls; | |
1790 | * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control; | |
1791 | * evmcs->page_fault_error_code_mask = | |
1792 | * vmcs12->page_fault_error_code_mask; | |
1793 | * evmcs->page_fault_error_code_match = | |
1794 | * vmcs12->page_fault_error_code_match; | |
1795 | * evmcs->cr3_target_count = vmcs12->cr3_target_count; | |
1796 | * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr; | |
1797 | * evmcs->tsc_offset = vmcs12->tsc_offset; | |
1798 | * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl; | |
1799 | * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask; | |
1800 | * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask; | |
1801 | * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow; | |
1802 | * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow; | |
1803 | * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count; | |
1804 | * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count; | |
1805 | * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count; | |
1806 | * | |
1807 | * Not present in struct vmcs12: | |
1808 | * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx; | |
1809 | * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi; | |
1810 | * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi; | |
1811 | * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip; | |
1812 | */ | |
1813 | ||
1814 | evmcs->guest_es_selector = vmcs12->guest_es_selector; | |
1815 | evmcs->guest_cs_selector = vmcs12->guest_cs_selector; | |
1816 | evmcs->guest_ss_selector = vmcs12->guest_ss_selector; | |
1817 | evmcs->guest_ds_selector = vmcs12->guest_ds_selector; | |
1818 | evmcs->guest_fs_selector = vmcs12->guest_fs_selector; | |
1819 | evmcs->guest_gs_selector = vmcs12->guest_gs_selector; | |
1820 | evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector; | |
1821 | evmcs->guest_tr_selector = vmcs12->guest_tr_selector; | |
1822 | ||
1823 | evmcs->guest_es_limit = vmcs12->guest_es_limit; | |
1824 | evmcs->guest_cs_limit = vmcs12->guest_cs_limit; | |
1825 | evmcs->guest_ss_limit = vmcs12->guest_ss_limit; | |
1826 | evmcs->guest_ds_limit = vmcs12->guest_ds_limit; | |
1827 | evmcs->guest_fs_limit = vmcs12->guest_fs_limit; | |
1828 | evmcs->guest_gs_limit = vmcs12->guest_gs_limit; | |
1829 | evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit; | |
1830 | evmcs->guest_tr_limit = vmcs12->guest_tr_limit; | |
1831 | evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit; | |
1832 | evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit; | |
1833 | ||
1834 | evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes; | |
1835 | evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes; | |
1836 | evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes; | |
1837 | evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes; | |
1838 | evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes; | |
1839 | evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes; | |
1840 | evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes; | |
1841 | evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes; | |
1842 | ||
1843 | evmcs->guest_es_base = vmcs12->guest_es_base; | |
1844 | evmcs->guest_cs_base = vmcs12->guest_cs_base; | |
1845 | evmcs->guest_ss_base = vmcs12->guest_ss_base; | |
1846 | evmcs->guest_ds_base = vmcs12->guest_ds_base; | |
1847 | evmcs->guest_fs_base = vmcs12->guest_fs_base; | |
1848 | evmcs->guest_gs_base = vmcs12->guest_gs_base; | |
1849 | evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base; | |
1850 | evmcs->guest_tr_base = vmcs12->guest_tr_base; | |
1851 | evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base; | |
1852 | evmcs->guest_idtr_base = vmcs12->guest_idtr_base; | |
1853 | ||
1854 | evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat; | |
1855 | evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer; | |
1856 | ||
1857 | evmcs->guest_pdptr0 = vmcs12->guest_pdptr0; | |
1858 | evmcs->guest_pdptr1 = vmcs12->guest_pdptr1; | |
1859 | evmcs->guest_pdptr2 = vmcs12->guest_pdptr2; | |
1860 | evmcs->guest_pdptr3 = vmcs12->guest_pdptr3; | |
1861 | ||
1862 | evmcs->guest_pending_dbg_exceptions = | |
1863 | vmcs12->guest_pending_dbg_exceptions; | |
1864 | evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp; | |
1865 | evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip; | |
1866 | ||
1867 | evmcs->guest_activity_state = vmcs12->guest_activity_state; | |
1868 | evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs; | |
1869 | ||
1870 | evmcs->guest_cr0 = vmcs12->guest_cr0; | |
1871 | evmcs->guest_cr3 = vmcs12->guest_cr3; | |
1872 | evmcs->guest_cr4 = vmcs12->guest_cr4; | |
1873 | evmcs->guest_dr7 = vmcs12->guest_dr7; | |
1874 | ||
1875 | evmcs->guest_physical_address = vmcs12->guest_physical_address; | |
1876 | ||
1877 | evmcs->vm_instruction_error = vmcs12->vm_instruction_error; | |
1878 | evmcs->vm_exit_reason = vmcs12->vm_exit_reason; | |
1879 | evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info; | |
1880 | evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code; | |
1881 | evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field; | |
1882 | evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code; | |
1883 | evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len; | |
1884 | evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info; | |
1885 | ||
1886 | evmcs->exit_qualification = vmcs12->exit_qualification; | |
1887 | ||
1888 | evmcs->guest_linear_address = vmcs12->guest_linear_address; | |
1889 | evmcs->guest_rsp = vmcs12->guest_rsp; | |
1890 | evmcs->guest_rflags = vmcs12->guest_rflags; | |
1891 | ||
1892 | evmcs->guest_interruptibility_info = | |
1893 | vmcs12->guest_interruptibility_info; | |
1894 | evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control; | |
1895 | evmcs->vm_entry_controls = vmcs12->vm_entry_controls; | |
1896 | evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field; | |
1897 | evmcs->vm_entry_exception_error_code = | |
1898 | vmcs12->vm_entry_exception_error_code; | |
1899 | evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len; | |
1900 | ||
1901 | evmcs->guest_rip = vmcs12->guest_rip; | |
1902 | ||
1903 | evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs; | |
1904 | ||
1905 | return 0; | |
1906 | } | |
1907 | ||
1908 | /* | |
1909 | * This is an equivalent of the nested hypervisor executing the vmptrld | |
1910 | * instruction. | |
1911 | */ | |
b6a0653a VK |
1912 | static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld( |
1913 | struct kvm_vcpu *vcpu, bool from_launch) | |
55d2375e SC |
1914 | { |
1915 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
a21a39c2 | 1916 | bool evmcs_gpa_changed = false; |
11e34914 | 1917 | u64 evmcs_gpa; |
55d2375e SC |
1918 | |
1919 | if (likely(!vmx->nested.enlightened_vmcs_enabled)) | |
b6a0653a | 1920 | return EVMPTRLD_DISABLED; |
55d2375e | 1921 | |
11e34914 | 1922 | if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) |
b6a0653a | 1923 | return EVMPTRLD_DISABLED; |
55d2375e | 1924 | |
95fa1010 VK |
1925 | if (unlikely(!vmx->nested.hv_evmcs || |
1926 | evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { | |
55d2375e SC |
1927 | if (!vmx->nested.hv_evmcs) |
1928 | vmx->nested.current_vmptr = -1ull; | |
1929 | ||
1930 | nested_release_evmcs(vcpu); | |
1931 | ||
11e34914 | 1932 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa), |
dee9c049 | 1933 | &vmx->nested.hv_evmcs_map)) |
b6a0653a | 1934 | return EVMPTRLD_ERROR; |
55d2375e | 1935 | |
dee9c049 | 1936 | vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; |
55d2375e SC |
1937 | |
1938 | /* | |
1939 | * Currently, KVM only supports eVMCS version 1 | |
1940 | * (== KVM_EVMCS_VERSION) and thus we expect guest to set this | |
1941 | * value to first u32 field of eVMCS which should specify eVMCS | |
1942 | * VersionNumber. | |
1943 | * | |
1944 | * Guest should be aware of supported eVMCS versions by host by | |
1945 | * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is | |
1946 | * expected to set this CPUID leaf according to the value | |
1947 | * returned in vmcs_version from nested_enable_evmcs(). | |
1948 | * | |
1949 | * However, it turns out that Microsoft Hyper-V fails to comply | |
1950 | * to their own invented interface: When Hyper-V use eVMCS, it | |
1951 | * just sets first u32 field of eVMCS to revision_id specified | |
1952 | * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number | |
1953 | * which is one of the supported versions specified in | |
1954 | * CPUID.0x4000000A.EAX[0:15]. | |
1955 | * | |
1956 | * To overcome Hyper-V bug, we accept here either a supported | |
1957 | * eVMCS version or VMCS12 revision_id as valid values for first | |
1958 | * u32 field of eVMCS. | |
1959 | */ | |
1960 | if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && | |
1961 | (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { | |
1962 | nested_release_evmcs(vcpu); | |
b6a0653a | 1963 | return EVMPTRLD_VMFAIL; |
55d2375e SC |
1964 | } |
1965 | ||
1966 | vmx->nested.dirty_vmcs12 = true; | |
11e34914 | 1967 | vmx->nested.hv_evmcs_vmptr = evmcs_gpa; |
55d2375e | 1968 | |
a21a39c2 | 1969 | evmcs_gpa_changed = true; |
55d2375e SC |
1970 | /* |
1971 | * Unlike normal vmcs12, enlightened vmcs12 is not fully | |
1972 | * reloaded from guest's memory (read only fields, fields not | |
1973 | * present in struct hv_enlightened_vmcs, ...). Make sure there | |
1974 | * are no leftovers. | |
1975 | */ | |
1976 | if (from_launch) { | |
1977 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
1978 | memset(vmcs12, 0, sizeof(*vmcs12)); | |
1979 | vmcs12->hdr.revision_id = VMCS12_REVISION; | |
1980 | } | |
1981 | ||
1982 | } | |
a21a39c2 VK |
1983 | |
1984 | /* | |
ffdbd50d | 1985 | * Clean fields data can't be used on VMLAUNCH and when we switch |
a21a39c2 VK |
1986 | * between different L2 guests as KVM keeps a single VMCS12 per L1. |
1987 | */ | |
1988 | if (from_launch || evmcs_gpa_changed) | |
1989 | vmx->nested.hv_evmcs->hv_clean_fields &= | |
1990 | ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; | |
1991 | ||
b6a0653a | 1992 | return EVMPTRLD_SUCCEEDED; |
55d2375e SC |
1993 | } |
1994 | ||
3731905e | 1995 | void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu) |
55d2375e SC |
1996 | { |
1997 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
1998 | ||
55d2375e SC |
1999 | if (vmx->nested.hv_evmcs) { |
2000 | copy_vmcs12_to_enlightened(vmx); | |
2001 | /* All fields are clean */ | |
2002 | vmx->nested.hv_evmcs->hv_clean_fields |= | |
2003 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL; | |
2004 | } else { | |
2005 | copy_vmcs12_to_shadow(vmx); | |
2006 | } | |
2007 | ||
3731905e | 2008 | vmx->nested.need_vmcs12_to_shadow_sync = false; |
55d2375e SC |
2009 | } |
2010 | ||
2011 | static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer) | |
2012 | { | |
2013 | struct vcpu_vmx *vmx = | |
2014 | container_of(timer, struct vcpu_vmx, nested.preemption_timer); | |
2015 | ||
2016 | vmx->nested.preemption_timer_expired = true; | |
2017 | kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); | |
2018 | kvm_vcpu_kick(&vmx->vcpu); | |
2019 | ||
2020 | return HRTIMER_NORESTART; | |
2021 | } | |
2022 | ||
2023 | static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) | |
2024 | { | |
2025 | u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; | |
2026 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2027 | ||
2028 | /* | |
2029 | * A timer value of zero is architecturally guaranteed to cause | |
2030 | * a VMExit prior to executing any instructions in the guest. | |
2031 | */ | |
2032 | if (preemption_timeout == 0) { | |
2033 | vmx_preemption_timer_fn(&vmx->nested.preemption_timer); | |
2034 | return; | |
2035 | } | |
2036 | ||
2037 | if (vcpu->arch.virtual_tsc_khz == 0) | |
2038 | return; | |
2039 | ||
2040 | preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; | |
2041 | preemption_timeout *= 1000000; | |
2042 | do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz); | |
2043 | hrtimer_start(&vmx->nested.preemption_timer, | |
2044 | ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); | |
2045 | } | |
2046 | ||
2047 | static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) | |
2048 | { | |
2049 | if (vmx->nested.nested_run_pending && | |
2050 | (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) | |
2051 | return vmcs12->guest_ia32_efer; | |
2052 | else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) | |
2053 | return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); | |
2054 | else | |
2055 | return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); | |
2056 | } | |
2057 | ||
2058 | static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) | |
2059 | { | |
2060 | /* | |
2061 | * If vmcs02 hasn't been initialized, set the constant vmcs02 state | |
2062 | * according to L0's settings (vmcs12 is irrelevant here). Host | |
2063 | * fields that come from L0 and are not constant, e.g. HOST_CR3, | |
2064 | * will be set as needed prior to VMLAUNCH/VMRESUME. | |
2065 | */ | |
2066 | if (vmx->nested.vmcs02_initialized) | |
2067 | return; | |
2068 | vmx->nested.vmcs02_initialized = true; | |
2069 | ||
2070 | /* | |
2071 | * We don't care what the EPTP value is we just need to guarantee | |
2072 | * it's valid so we don't get a false positive when doing early | |
2073 | * consistency checks. | |
2074 | */ | |
2075 | if (enable_ept && nested_early_check) | |
2076 | vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0)); | |
2077 | ||
2078 | /* All VMFUNCs are currently emulated through L0 vmexits. */ | |
2079 | if (cpu_has_vmx_vmfunc()) | |
2080 | vmcs_write64(VM_FUNCTION_CONTROL, 0); | |
2081 | ||
2082 | if (cpu_has_vmx_posted_intr()) | |
2083 | vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR); | |
2084 | ||
2085 | if (cpu_has_vmx_msr_bitmap()) | |
2086 | vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); | |
2087 | ||
4d6c9892 SC |
2088 | /* |
2089 | * The PML address never changes, so it is constant in vmcs02. | |
2090 | * Conceptually we want to copy the PML index from vmcs01 here, | |
2091 | * and then back to vmcs01 on nested vmexit. But since we flush | |
2092 | * the log and reset GUEST_PML_INDEX on each vmexit, the PML | |
2093 | * index is also effectively constant in vmcs02. | |
2094 | */ | |
2095 | if (enable_pml) { | |
55d2375e | 2096 | vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); |
4d6c9892 SC |
2097 | vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
2098 | } | |
55d2375e | 2099 | |
c538d57f SC |
2100 | if (cpu_has_vmx_encls_vmexit()) |
2101 | vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); | |
55d2375e SC |
2102 | |
2103 | /* | |
2104 | * Set the MSR load/store lists to match L0's settings. Only the | |
2105 | * addresses are constant (for vmcs02), the counts can change based | |
2106 | * on L2's behavior, e.g. switching to/from long mode. | |
2107 | */ | |
662f1d1d | 2108 | vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); |
55d2375e SC |
2109 | vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); |
2110 | vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); | |
2111 | ||
2112 | vmx_set_constant_host_state(vmx); | |
2113 | } | |
2114 | ||
b1346ab2 | 2115 | static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, |
55d2375e SC |
2116 | struct vmcs12 *vmcs12) |
2117 | { | |
2118 | prepare_vmcs02_constant_state(vmx); | |
2119 | ||
2120 | vmcs_write64(VMCS_LINK_POINTER, -1ull); | |
2121 | ||
2122 | if (enable_vpid) { | |
2123 | if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) | |
2124 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); | |
2125 | else | |
2126 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) | |
2131 | { | |
2132 | u32 exec_control, vmcs12_exec_ctrl; | |
2133 | u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); | |
2134 | ||
2135 | if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) | |
b1346ab2 | 2136 | prepare_vmcs02_early_rare(vmx, vmcs12); |
55d2375e | 2137 | |
55d2375e SC |
2138 | /* |
2139 | * PIN CONTROLS | |
2140 | */ | |
c075c3e4 | 2141 | exec_control = vmx_pin_based_exec_ctrl(vmx); |
804939ea SC |
2142 | exec_control |= (vmcs12->pin_based_vm_exec_control & |
2143 | ~PIN_BASED_VMX_PREEMPTION_TIMER); | |
55d2375e SC |
2144 | |
2145 | /* Posted interrupts setting is only taken from vmcs12. */ | |
2146 | if (nested_cpu_has_posted_intr(vmcs12)) { | |
2147 | vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; | |
2148 | vmx->nested.pi_pending = false; | |
2149 | } else { | |
2150 | exec_control &= ~PIN_BASED_POSTED_INTR; | |
2151 | } | |
3af80fec | 2152 | pin_controls_set(vmx, exec_control); |
55d2375e SC |
2153 | |
2154 | /* | |
2155 | * EXEC CONTROLS | |
2156 | */ | |
2157 | exec_control = vmx_exec_control(vmx); /* L0's desires */ | |
9dadc2f9 | 2158 | exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING; |
4e2a0bc5 | 2159 | exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING; |
55d2375e SC |
2160 | exec_control &= ~CPU_BASED_TPR_SHADOW; |
2161 | exec_control |= vmcs12->cpu_based_vm_exec_control; | |
2162 | ||
02d496cf | 2163 | vmx->nested.l1_tpr_threshold = -1; |
ca2f5466 | 2164 | if (exec_control & CPU_BASED_TPR_SHADOW) |
55d2375e | 2165 | vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); |
55d2375e | 2166 | #ifdef CONFIG_X86_64 |
ca2f5466 | 2167 | else |
55d2375e SC |
2168 | exec_control |= CPU_BASED_CR8_LOAD_EXITING | |
2169 | CPU_BASED_CR8_STORE_EXITING; | |
2170 | #endif | |
55d2375e SC |
2171 | |
2172 | /* | |
2173 | * A vmexit (to either L1 hypervisor or L0 userspace) is always needed | |
2174 | * for I/O port accesses. | |
2175 | */ | |
55d2375e | 2176 | exec_control |= CPU_BASED_UNCOND_IO_EXITING; |
de0286b7 SC |
2177 | exec_control &= ~CPU_BASED_USE_IO_BITMAPS; |
2178 | ||
2179 | /* | |
2180 | * This bit will be computed in nested_get_vmcs12_pages, because | |
2181 | * we do not have access to L1's MSR bitmap yet. For now, keep | |
2182 | * the same bit as before, hoping to avoid multiple VMWRITEs that | |
2183 | * only set/clear this bit. | |
2184 | */ | |
2185 | exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; | |
2186 | exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; | |
2187 | ||
3af80fec | 2188 | exec_controls_set(vmx, exec_control); |
55d2375e SC |
2189 | |
2190 | /* | |
2191 | * SECONDARY EXEC CONTROLS | |
2192 | */ | |
2193 | if (cpu_has_secondary_exec_ctrls()) { | |
2194 | exec_control = vmx->secondary_exec_control; | |
2195 | ||
2196 | /* Take the following fields only from vmcs12 */ | |
2197 | exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | | |
2198 | SECONDARY_EXEC_ENABLE_INVPCID | | |
2199 | SECONDARY_EXEC_RDTSCP | | |
2200 | SECONDARY_EXEC_XSAVES | | |
e69e72fa | 2201 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | |
55d2375e SC |
2202 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
2203 | SECONDARY_EXEC_APIC_REGISTER_VIRT | | |
2204 | SECONDARY_EXEC_ENABLE_VMFUNC); | |
2205 | if (nested_cpu_has(vmcs12, | |
2206 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) { | |
2207 | vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control & | |
2208 | ~SECONDARY_EXEC_ENABLE_PML; | |
2209 | exec_control |= vmcs12_exec_ctrl; | |
2210 | } | |
2211 | ||
2212 | /* VMCS shadowing for L2 is emulated for now */ | |
2213 | exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; | |
2214 | ||
55d2375e | 2215 | /* |
469debdb SC |
2216 | * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4() |
2217 | * will not have to rewrite the controls just for this bit. | |
55d2375e | 2218 | */ |
469debdb SC |
2219 | if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() && |
2220 | (vmcs12->guest_cr4 & X86_CR4_UMIP)) | |
2221 | exec_control |= SECONDARY_EXEC_DESC; | |
55d2375e | 2222 | |
55d2375e SC |
2223 | if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) |
2224 | vmcs_write16(GUEST_INTR_STATUS, | |
2225 | vmcs12->guest_intr_status); | |
55d2375e | 2226 | |
3af80fec | 2227 | secondary_exec_controls_set(vmx, exec_control); |
55d2375e SC |
2228 | } |
2229 | ||
2230 | /* | |
2231 | * ENTRY CONTROLS | |
2232 | * | |
2233 | * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE | |
2234 | * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate | |
2235 | * on the related bits (if supported by the CPU) in the hope that | |
2236 | * we can avoid VMWrites during vmx_set_efer(). | |
2237 | */ | |
2238 | exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) & | |
2239 | ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER; | |
2240 | if (cpu_has_load_ia32_efer()) { | |
2241 | if (guest_efer & EFER_LMA) | |
2242 | exec_control |= VM_ENTRY_IA32E_MODE; | |
2243 | if (guest_efer != host_efer) | |
2244 | exec_control |= VM_ENTRY_LOAD_IA32_EFER; | |
2245 | } | |
3af80fec | 2246 | vm_entry_controls_set(vmx, exec_control); |
55d2375e SC |
2247 | |
2248 | /* | |
2249 | * EXIT CONTROLS | |
2250 | * | |
2251 | * L2->L1 exit controls are emulated - the hardware exit is to L0 so | |
2252 | * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER | |
2253 | * bits may be modified by vmx_set_efer() in prepare_vmcs02(). | |
2254 | */ | |
2255 | exec_control = vmx_vmexit_ctrl(); | |
2256 | if (cpu_has_load_ia32_efer() && guest_efer != host_efer) | |
2257 | exec_control |= VM_EXIT_LOAD_IA32_EFER; | |
3af80fec | 2258 | vm_exit_controls_set(vmx, exec_control); |
55d2375e SC |
2259 | |
2260 | /* | |
2261 | * Interrupt/Exception Fields | |
2262 | */ | |
2263 | if (vmx->nested.nested_run_pending) { | |
2264 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | |
2265 | vmcs12->vm_entry_intr_info_field); | |
2266 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | |
2267 | vmcs12->vm_entry_exception_error_code); | |
2268 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | |
2269 | vmcs12->vm_entry_instruction_len); | |
2270 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | |
2271 | vmcs12->guest_interruptibility_info); | |
2272 | vmx->loaded_vmcs->nmi_known_unmasked = | |
2273 | !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI); | |
2274 | } else { | |
2275 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); | |
2276 | } | |
2277 | } | |
2278 | ||
b1346ab2 | 2279 | static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) |
55d2375e SC |
2280 | { |
2281 | struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; | |
2282 | ||
2283 | if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & | |
2284 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) { | |
2285 | vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); | |
2286 | vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); | |
2287 | vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector); | |
2288 | vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector); | |
2289 | vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector); | |
2290 | vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector); | |
2291 | vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector); | |
2292 | vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector); | |
2293 | vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit); | |
2294 | vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit); | |
2295 | vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit); | |
2296 | vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit); | |
2297 | vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit); | |
2298 | vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit); | |
2299 | vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit); | |
2300 | vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit); | |
2301 | vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit); | |
2302 | vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit); | |
1c6f0b47 SC |
2303 | vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes); |
2304 | vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes); | |
55d2375e SC |
2305 | vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes); |
2306 | vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes); | |
2307 | vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes); | |
2308 | vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes); | |
2309 | vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes); | |
2310 | vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes); | |
2311 | vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base); | |
2312 | vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base); | |
2313 | vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base); | |
2314 | vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base); | |
2315 | vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base); | |
2316 | vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base); | |
2317 | vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base); | |
2318 | vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base); | |
2319 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); | |
2320 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); | |
2321 | } | |
2322 | ||
2323 | if (!hv_evmcs || !(hv_evmcs->hv_clean_fields & | |
2324 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) { | |
2325 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); | |
2326 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, | |
2327 | vmcs12->guest_pending_dbg_exceptions); | |
2328 | vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); | |
2329 | vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip); | |
2330 | ||
2331 | /* | |
2332 | * L1 may access the L2's PDPTR, so save them to construct | |
2333 | * vmcs12 | |
2334 | */ | |
2335 | if (enable_ept) { | |
2336 | vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); | |
2337 | vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); | |
2338 | vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); | |
2339 | vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); | |
2340 | } | |
c27e5b0d SC |
2341 | |
2342 | if (kvm_mpx_supported() && vmx->nested.nested_run_pending && | |
2343 | (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) | |
2344 | vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs); | |
55d2375e SC |
2345 | } |
2346 | ||
2347 | if (nested_cpu_has_xsaves(vmcs12)) | |
2348 | vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap); | |
2349 | ||
2350 | /* | |
2351 | * Whether page-faults are trapped is determined by a combination of | |
2352 | * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. | |
2353 | * If enable_ept, L0 doesn't care about page faults and we should | |
2354 | * set all of these to L1's desires. However, if !enable_ept, L0 does | |
2355 | * care about (at least some) page faults, and because it is not easy | |
2356 | * (if at all possible?) to merge L0 and L1's desires, we simply ask | |
2357 | * to exit on each and every L2 page fault. This is done by setting | |
2358 | * MASK=MATCH=0 and (see below) EB.PF=1. | |
2359 | * Note that below we don't need special code to set EB.PF beyond the | |
2360 | * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept, | |
2361 | * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when | |
2362 | * !enable_ept, EB.PF is 1, so the "or" will always be 1. | |
2363 | */ | |
2364 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, | |
2365 | enable_ept ? vmcs12->page_fault_error_code_mask : 0); | |
2366 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, | |
2367 | enable_ept ? vmcs12->page_fault_error_code_match : 0); | |
2368 | ||
2369 | if (cpu_has_vmx_apicv()) { | |
2370 | vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0); | |
2371 | vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1); | |
2372 | vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2); | |
2373 | vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3); | |
2374 | } | |
2375 | ||
662f1d1d AL |
2376 | /* |
2377 | * Make sure the msr_autostore list is up to date before we set the | |
2378 | * count in the vmcs02. | |
2379 | */ | |
2380 | prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); | |
2381 | ||
2382 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); | |
55d2375e SC |
2383 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); |
2384 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); | |
2385 | ||
2386 | set_cr4_guest_host_mask(vmx); | |
55d2375e SC |
2387 | } |
2388 | ||
2389 | /* | |
2390 | * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested | |
2391 | * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it | |
2392 | * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2 | |
2393 | * guest in a way that will both be appropriate to L1's requests, and our | |
2394 | * needs. In addition to modifying the active vmcs (which is vmcs02), this | |
2395 | * function also has additional necessary side-effects, like setting various | |
2396 | * vcpu->arch fields. | |
2397 | * Returns 0 on success, 1 on failure. Invalid state exit qualification code | |
2398 | * is assigned to entry_failure_code on failure. | |
2399 | */ | |
2400 | static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |
2401 | u32 *entry_failure_code) | |
2402 | { | |
2403 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2404 | struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; | |
c7554efc | 2405 | bool load_guest_pdptrs_vmcs12 = false; |
55d2375e | 2406 | |
c7554efc | 2407 | if (vmx->nested.dirty_vmcs12 || hv_evmcs) { |
b1346ab2 | 2408 | prepare_vmcs02_rare(vmx, vmcs12); |
55d2375e | 2409 | vmx->nested.dirty_vmcs12 = false; |
55d2375e | 2410 | |
c7554efc SC |
2411 | load_guest_pdptrs_vmcs12 = !hv_evmcs || |
2412 | !(hv_evmcs->hv_clean_fields & | |
2413 | HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1); | |
55d2375e SC |
2414 | } |
2415 | ||
2416 | if (vmx->nested.nested_run_pending && | |
2417 | (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) { | |
2418 | kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); | |
2419 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); | |
2420 | } else { | |
2421 | kvm_set_dr(vcpu, 7, vcpu->arch.dr7); | |
2422 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); | |
2423 | } | |
3b013a29 SC |
2424 | if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || |
2425 | !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))) | |
2426 | vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); | |
55d2375e SC |
2427 | vmx_set_rflags(vcpu, vmcs12->guest_rflags); |
2428 | ||
55d2375e SC |
2429 | /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the |
2430 | * bitwise-or of what L1 wants to trap for L2, and what we want to | |
2431 | * trap. Note that CR0.TS also needs updating - we do this later. | |
2432 | */ | |
2433 | update_exception_bitmap(vcpu); | |
2434 | vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; | |
2435 | vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); | |
2436 | ||
2437 | if (vmx->nested.nested_run_pending && | |
2438 | (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) { | |
2439 | vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); | |
2440 | vcpu->arch.pat = vmcs12->guest_ia32_pat; | |
2441 | } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { | |
2442 | vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); | |
2443 | } | |
2444 | ||
2445 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | |
2446 | ||
2447 | if (kvm_has_tsc_control) | |
2448 | decache_tsc_multiplier(vmx); | |
2449 | ||
2450 | if (enable_vpid) { | |
2451 | /* | |
2452 | * There is no direct mapping between vpid02 and vpid12, the | |
2453 | * vpid02 is per-vCPU for L0 and reused while the value of | |
2454 | * vpid12 is changed w/ one invvpid during nested vmentry. | |
2455 | * The vpid12 is allocated by L1 for L2, so it will not | |
2456 | * influence global bitmap(for vpid01 and vpid02 allocation) | |
2457 | * even if spawn a lot of nested vCPUs. | |
2458 | */ | |
2459 | if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) { | |
2460 | if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { | |
2461 | vmx->nested.last_vpid = vmcs12->virtual_processor_id; | |
2462 | __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false); | |
2463 | } | |
2464 | } else { | |
2465 | /* | |
2466 | * If L1 use EPT, then L0 needs to execute INVEPT on | |
2467 | * EPTP02 instead of EPTP01. Therefore, delay TLB | |
2468 | * flush until vmcs02->eptp is fully updated by | |
727a7e27 | 2469 | * KVM_REQ_LOAD_MMU_PGD. Note that this assumes |
55d2375e | 2470 | * KVM_REQ_TLB_FLUSH is evaluated after |
727a7e27 | 2471 | * KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest(). |
55d2375e SC |
2472 | */ |
2473 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
2474 | } | |
2475 | } | |
2476 | ||
2477 | if (nested_cpu_has_ept(vmcs12)) | |
2478 | nested_ept_init_mmu_context(vcpu); | |
55d2375e SC |
2479 | |
2480 | /* | |
2481 | * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those | |
2482 | * bits which we consider mandatory enabled. | |
2483 | * The CR0_READ_SHADOW is what L2 should have expected to read given | |
2484 | * the specifications by L1; It's not enough to take | |
2485 | * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we | |
2486 | * have more bits than L1 expected. | |
2487 | */ | |
2488 | vmx_set_cr0(vcpu, vmcs12->guest_cr0); | |
2489 | vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12)); | |
2490 | ||
2491 | vmx_set_cr4(vcpu, vmcs12->guest_cr4); | |
2492 | vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12)); | |
2493 | ||
2494 | vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); | |
2495 | /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ | |
2496 | vmx_set_efer(vcpu, vcpu->arch.efer); | |
2497 | ||
2498 | /* | |
2499 | * Guest state is invalid and unrestricted guest is disabled, | |
2500 | * which means L1 attempted VMEntry to L2 with invalid state. | |
2501 | * Fail the VMEntry. | |
2502 | */ | |
2503 | if (vmx->emulation_required) { | |
2504 | *entry_failure_code = ENTRY_FAIL_DEFAULT; | |
c80add0f | 2505 | return -EINVAL; |
55d2375e SC |
2506 | } |
2507 | ||
2508 | /* Shadow page tables on either EPT or shadow page tables. */ | |
2509 | if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), | |
2510 | entry_failure_code)) | |
c80add0f | 2511 | return -EINVAL; |
55d2375e | 2512 | |
04f11ef4 SC |
2513 | /* |
2514 | * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12 | |
2515 | * on nested VM-Exit, which can occur without actually running L2 and | |
727a7e27 | 2516 | * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with |
04f11ef4 SC |
2517 | * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the |
2518 | * transition to HLT instead of running L2. | |
2519 | */ | |
2520 | if (enable_ept) | |
2521 | vmcs_writel(GUEST_CR3, vmcs12->guest_cr3); | |
2522 | ||
c7554efc SC |
2523 | /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */ |
2524 | if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) && | |
2525 | is_pae_paging(vcpu)) { | |
2526 | vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0); | |
2527 | vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); | |
2528 | vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); | |
2529 | vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); | |
2530 | } | |
2531 | ||
55d2375e SC |
2532 | if (!enable_ept) |
2533 | vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; | |
2534 | ||
71f73470 | 2535 | if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && |
d1968421 OU |
2536 | WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, |
2537 | vmcs12->guest_ia32_perf_global_ctrl))) | |
71f73470 OU |
2538 | return -EINVAL; |
2539 | ||
e9c16c78 PB |
2540 | kvm_rsp_write(vcpu, vmcs12->guest_rsp); |
2541 | kvm_rip_write(vcpu, vmcs12->guest_rip); | |
55d2375e SC |
2542 | return 0; |
2543 | } | |
2544 | ||
2545 | static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12) | |
2546 | { | |
5497b955 SC |
2547 | if (CC(!nested_cpu_has_nmi_exiting(vmcs12) && |
2548 | nested_cpu_has_virtual_nmis(vmcs12))) | |
55d2375e SC |
2549 | return -EINVAL; |
2550 | ||
5497b955 | 2551 | if (CC(!nested_cpu_has_virtual_nmis(vmcs12) && |
4e2a0bc5 | 2552 | nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING))) |
55d2375e SC |
2553 | return -EINVAL; |
2554 | ||
2555 | return 0; | |
2556 | } | |
2557 | ||
ac6389ab | 2558 | static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) |
55d2375e SC |
2559 | { |
2560 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2561 | int maxphyaddr = cpuid_maxphyaddr(vcpu); | |
2562 | ||
2563 | /* Check for memory type validity */ | |
ac6389ab | 2564 | switch (new_eptp & VMX_EPTP_MT_MASK) { |
55d2375e | 2565 | case VMX_EPTP_MT_UC: |
5497b955 | 2566 | if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) |
55d2375e SC |
2567 | return false; |
2568 | break; | |
2569 | case VMX_EPTP_MT_WB: | |
5497b955 | 2570 | if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) |
55d2375e SC |
2571 | return false; |
2572 | break; | |
2573 | default: | |
2574 | return false; | |
2575 | } | |
2576 | ||
bb1fcc70 | 2577 | /* Page-walk levels validity. */ |
ac6389ab | 2578 | switch (new_eptp & VMX_EPTP_PWL_MASK) { |
bb1fcc70 SC |
2579 | case VMX_EPTP_PWL_5: |
2580 | if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) | |
2581 | return false; | |
2582 | break; | |
2583 | case VMX_EPTP_PWL_4: | |
2584 | if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) | |
2585 | return false; | |
2586 | break; | |
2587 | default: | |
55d2375e | 2588 | return false; |
bb1fcc70 | 2589 | } |
55d2375e SC |
2590 | |
2591 | /* Reserved bits should not be set */ | |
ac6389ab | 2592 | if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f))) |
55d2375e SC |
2593 | return false; |
2594 | ||
2595 | /* AD, if set, should be supported */ | |
ac6389ab | 2596 | if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) { |
5497b955 | 2597 | if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) |
55d2375e SC |
2598 | return false; |
2599 | } | |
2600 | ||
2601 | return true; | |
2602 | } | |
2603 | ||
461b4ba4 KS |
2604 | /* |
2605 | * Checks related to VM-Execution Control Fields | |
2606 | */ | |
2607 | static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, | |
2608 | struct vmcs12 *vmcs12) | |
55d2375e SC |
2609 | { |
2610 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
55d2375e | 2611 | |
5497b955 SC |
2612 | if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control, |
2613 | vmx->nested.msrs.pinbased_ctls_low, | |
2614 | vmx->nested.msrs.pinbased_ctls_high)) || | |
2615 | CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | |
2616 | vmx->nested.msrs.procbased_ctls_low, | |
2617 | vmx->nested.msrs.procbased_ctls_high))) | |
461b4ba4 | 2618 | return -EINVAL; |
55d2375e | 2619 | |
461b4ba4 | 2620 | if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && |
5497b955 SC |
2621 | CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control, |
2622 | vmx->nested.msrs.secondary_ctls_low, | |
2623 | vmx->nested.msrs.secondary_ctls_high))) | |
461b4ba4 KS |
2624 | return -EINVAL; |
2625 | ||
5497b955 | 2626 | if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) || |
461b4ba4 KS |
2627 | nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) || |
2628 | nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) || | |
2629 | nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) || | |
2630 | nested_vmx_check_apic_access_controls(vcpu, vmcs12) || | |
2631 | nested_vmx_check_apicv_controls(vcpu, vmcs12) || | |
2632 | nested_vmx_check_nmi_controls(vmcs12) || | |
2633 | nested_vmx_check_pml_controls(vcpu, vmcs12) || | |
2634 | nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) || | |
2635 | nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) || | |
2636 | nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) || | |
5497b955 | 2637 | CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) |
461b4ba4 KS |
2638 | return -EINVAL; |
2639 | ||
bc441211 SC |
2640 | if (!nested_cpu_has_preemption_timer(vmcs12) && |
2641 | nested_cpu_has_save_preemption_timer(vmcs12)) | |
2642 | return -EINVAL; | |
2643 | ||
461b4ba4 | 2644 | if (nested_cpu_has_ept(vmcs12) && |
ac6389ab | 2645 | CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer))) |
461b4ba4 | 2646 | return -EINVAL; |
55d2375e SC |
2647 | |
2648 | if (nested_cpu_has_vmfunc(vmcs12)) { | |
5497b955 SC |
2649 | if (CC(vmcs12->vm_function_control & |
2650 | ~vmx->nested.msrs.vmfunc_controls)) | |
461b4ba4 | 2651 | return -EINVAL; |
55d2375e SC |
2652 | |
2653 | if (nested_cpu_has_eptp_switching(vmcs12)) { | |
5497b955 SC |
2654 | if (CC(!nested_cpu_has_ept(vmcs12)) || |
2655 | CC(!page_address_valid(vcpu, vmcs12->eptp_list_address))) | |
461b4ba4 | 2656 | return -EINVAL; |
55d2375e SC |
2657 | } |
2658 | } | |
2659 | ||
461b4ba4 KS |
2660 | return 0; |
2661 | } | |
2662 | ||
61446ba7 KS |
2663 | /* |
2664 | * Checks related to VM-Exit Control Fields | |
2665 | */ | |
2666 | static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu, | |
2667 | struct vmcs12 *vmcs12) | |
2668 | { | |
2669 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2670 | ||
5497b955 SC |
2671 | if (CC(!vmx_control_verify(vmcs12->vm_exit_controls, |
2672 | vmx->nested.msrs.exit_ctls_low, | |
2673 | vmx->nested.msrs.exit_ctls_high)) || | |
2674 | CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))) | |
61446ba7 KS |
2675 | return -EINVAL; |
2676 | ||
2677 | return 0; | |
2678 | } | |
2679 | ||
5fbf9634 KS |
2680 | /* |
2681 | * Checks related to VM-Entry Control Fields | |
2682 | */ | |
2683 | static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu, | |
2684 | struct vmcs12 *vmcs12) | |
461b4ba4 KS |
2685 | { |
2686 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
55d2375e | 2687 | |
5497b955 SC |
2688 | if (CC(!vmx_control_verify(vmcs12->vm_entry_controls, |
2689 | vmx->nested.msrs.entry_ctls_low, | |
2690 | vmx->nested.msrs.entry_ctls_high))) | |
5fbf9634 | 2691 | return -EINVAL; |
55d2375e SC |
2692 | |
2693 | /* | |
2694 | * From the Intel SDM, volume 3: | |
2695 | * Fields relevant to VM-entry event injection must be set properly. | |
2696 | * These fields are the VM-entry interruption-information field, the | |
2697 | * VM-entry exception error code, and the VM-entry instruction length. | |
2698 | */ | |
2699 | if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { | |
2700 | u32 intr_info = vmcs12->vm_entry_intr_info_field; | |
2701 | u8 vector = intr_info & INTR_INFO_VECTOR_MASK; | |
2702 | u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; | |
2703 | bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; | |
2704 | bool should_have_error_code; | |
2705 | bool urg = nested_cpu_has2(vmcs12, | |
2706 | SECONDARY_EXEC_UNRESTRICTED_GUEST); | |
2707 | bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; | |
2708 | ||
2709 | /* VM-entry interruption-info field: interruption type */ | |
5497b955 SC |
2710 | if (CC(intr_type == INTR_TYPE_RESERVED) || |
2711 | CC(intr_type == INTR_TYPE_OTHER_EVENT && | |
2712 | !nested_cpu_supports_monitor_trap_flag(vcpu))) | |
5fbf9634 | 2713 | return -EINVAL; |
55d2375e SC |
2714 | |
2715 | /* VM-entry interruption-info field: vector */ | |
5497b955 SC |
2716 | if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || |
2717 | CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || | |
2718 | CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) | |
5fbf9634 | 2719 | return -EINVAL; |
55d2375e SC |
2720 | |
2721 | /* VM-entry interruption-info field: deliver error code */ | |
2722 | should_have_error_code = | |
2723 | intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && | |
2724 | x86_exception_has_error_code(vector); | |
5497b955 | 2725 | if (CC(has_error_code != should_have_error_code)) |
5fbf9634 | 2726 | return -EINVAL; |
55d2375e SC |
2727 | |
2728 | /* VM-entry exception error code */ | |
5497b955 | 2729 | if (CC(has_error_code && |
567926cc | 2730 | vmcs12->vm_entry_exception_error_code & GENMASK(31, 16))) |
5fbf9634 | 2731 | return -EINVAL; |
55d2375e SC |
2732 | |
2733 | /* VM-entry interruption-info field: reserved bits */ | |
5497b955 | 2734 | if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK)) |
5fbf9634 | 2735 | return -EINVAL; |
55d2375e SC |
2736 | |
2737 | /* VM-entry instruction length */ | |
2738 | switch (intr_type) { | |
2739 | case INTR_TYPE_SOFT_EXCEPTION: | |
2740 | case INTR_TYPE_SOFT_INTR: | |
2741 | case INTR_TYPE_PRIV_SW_EXCEPTION: | |
5497b955 SC |
2742 | if (CC(vmcs12->vm_entry_instruction_len > 15) || |
2743 | CC(vmcs12->vm_entry_instruction_len == 0 && | |
2744 | CC(!nested_cpu_has_zero_length_injection(vcpu)))) | |
5fbf9634 | 2745 | return -EINVAL; |
55d2375e SC |
2746 | } |
2747 | } | |
2748 | ||
5fbf9634 KS |
2749 | if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12)) |
2750 | return -EINVAL; | |
2751 | ||
2752 | return 0; | |
2753 | } | |
2754 | ||
5478ba34 SC |
2755 | static int nested_vmx_check_controls(struct kvm_vcpu *vcpu, |
2756 | struct vmcs12 *vmcs12) | |
2757 | { | |
2758 | if (nested_check_vm_execution_controls(vcpu, vmcs12) || | |
2759 | nested_check_vm_exit_controls(vcpu, vmcs12) || | |
2760 | nested_check_vm_entry_controls(vcpu, vmcs12)) | |
98d9e858 | 2761 | return -EINVAL; |
5478ba34 | 2762 | |
a8350231 VK |
2763 | if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) |
2764 | return nested_evmcs_check_controls(vmcs12); | |
2765 | ||
5478ba34 SC |
2766 | return 0; |
2767 | } | |
2768 | ||
98d9e858 PB |
2769 | static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, |
2770 | struct vmcs12 *vmcs12) | |
5fbf9634 KS |
2771 | { |
2772 | bool ia32e; | |
2773 | ||
5497b955 SC |
2774 | if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || |
2775 | CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || | |
2776 | CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3))) | |
254b2f3b | 2777 | return -EINVAL; |
711eff3a | 2778 | |
5497b955 SC |
2779 | if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || |
2780 | CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu))) | |
711eff3a KS |
2781 | return -EINVAL; |
2782 | ||
f6b0db1f | 2783 | if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) && |
5497b955 | 2784 | CC(!kvm_pat_valid(vmcs12->host_ia32_pat))) |
f6b0db1f KS |
2785 | return -EINVAL; |
2786 | ||
c547cb6f OU |
2787 | if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) && |
2788 | CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), | |
2789 | vmcs12->host_ia32_perf_global_ctrl))) | |
2790 | return -EINVAL; | |
2791 | ||
fd3edd4a PB |
2792 | #ifdef CONFIG_X86_64 |
2793 | ia32e = !!(vcpu->arch.efer & EFER_LMA); | |
2794 | #else | |
2795 | ia32e = false; | |
2796 | #endif | |
2797 | ||
2798 | if (ia32e) { | |
2799 | if (CC(!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)) || | |
2800 | CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) | |
2801 | return -EINVAL; | |
2802 | } else { | |
2803 | if (CC(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) || | |
2804 | CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) || | |
2805 | CC(vmcs12->host_cr4 & X86_CR4_PCIDE) || | |
2806 | CC((vmcs12->host_rip) >> 32)) | |
2807 | return -EINVAL; | |
2808 | } | |
1ef23e1f | 2809 | |
5497b955 SC |
2810 | if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || |
2811 | CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2812 | CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2813 | CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2814 | CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2815 | CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2816 | CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) || | |
2817 | CC(vmcs12->host_cs_selector == 0) || | |
2818 | CC(vmcs12->host_tr_selector == 0) || | |
2819 | CC(vmcs12->host_ss_selector == 0 && !ia32e)) | |
1ef23e1f KS |
2820 | return -EINVAL; |
2821 | ||
5497b955 SC |
2822 | if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) || |
2823 | CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) || | |
2824 | CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) || | |
2825 | CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) || | |
fd3edd4a PB |
2826 | CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) || |
2827 | CC(is_noncanonical_address(vmcs12->host_rip, vcpu))) | |
5845038c | 2828 | return -EINVAL; |
1ef23e1f | 2829 | |
5fbf9634 KS |
2830 | /* |
2831 | * If the load IA32_EFER VM-exit control is 1, bits reserved in the | |
2832 | * IA32_EFER MSR must be 0 in the field for that register. In addition, | |
2833 | * the values of the LMA and LME bits in the field must each be that of | |
2834 | * the host address-space size VM-exit control. | |
2835 | */ | |
2836 | if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) { | |
5497b955 SC |
2837 | if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) || |
2838 | CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) || | |
2839 | CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))) | |
254b2f3b | 2840 | return -EINVAL; |
5fbf9634 KS |
2841 | } |
2842 | ||
55d2375e SC |
2843 | return 0; |
2844 | } | |
2845 | ||
2846 | static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu, | |
2847 | struct vmcs12 *vmcs12) | |
2848 | { | |
88925305 | 2849 | int r = 0; |
55d2375e | 2850 | struct vmcs12 *shadow; |
88925305 | 2851 | struct kvm_host_map map; |
55d2375e SC |
2852 | |
2853 | if (vmcs12->vmcs_link_pointer == -1ull) | |
2854 | return 0; | |
2855 | ||
5497b955 | 2856 | if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))) |
55d2375e SC |
2857 | return -EINVAL; |
2858 | ||
5497b955 | 2859 | if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) |
55d2375e SC |
2860 | return -EINVAL; |
2861 | ||
88925305 KA |
2862 | shadow = map.hva; |
2863 | ||
5497b955 SC |
2864 | if (CC(shadow->hdr.revision_id != VMCS12_REVISION) || |
2865 | CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))) | |
55d2375e | 2866 | r = -EINVAL; |
88925305 KA |
2867 | |
2868 | kvm_vcpu_unmap(vcpu, &map, false); | |
55d2375e SC |
2869 | return r; |
2870 | } | |
2871 | ||
9c3e922b SC |
2872 | /* |
2873 | * Checks related to Guest Non-register State | |
2874 | */ | |
2875 | static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12) | |
2876 | { | |
5497b955 SC |
2877 | if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE && |
2878 | vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)) | |
9c3e922b SC |
2879 | return -EINVAL; |
2880 | ||
2881 | return 0; | |
2882 | } | |
2883 | ||
5478ba34 SC |
2884 | static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, |
2885 | struct vmcs12 *vmcs12, | |
2886 | u32 *exit_qual) | |
55d2375e SC |
2887 | { |
2888 | bool ia32e; | |
2889 | ||
2890 | *exit_qual = ENTRY_FAIL_DEFAULT; | |
2891 | ||
5497b955 SC |
2892 | if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) || |
2893 | CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))) | |
c80add0f | 2894 | return -EINVAL; |
55d2375e | 2895 | |
b91991bf KS |
2896 | if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) && |
2897 | CC(!kvm_dr7_valid(vmcs12->guest_dr7))) | |
2898 | return -EINVAL; | |
2899 | ||
de2bc2bf | 2900 | if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) && |
5497b955 | 2901 | CC(!kvm_pat_valid(vmcs12->guest_ia32_pat))) |
c80add0f | 2902 | return -EINVAL; |
55d2375e SC |
2903 | |
2904 | if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) { | |
2905 | *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR; | |
c80add0f | 2906 | return -EINVAL; |
55d2375e SC |
2907 | } |
2908 | ||
bfc6ad6a OU |
2909 | if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) && |
2910 | CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu), | |
2911 | vmcs12->guest_ia32_perf_global_ctrl))) | |
2912 | return -EINVAL; | |
2913 | ||
55d2375e SC |
2914 | /* |
2915 | * If the load IA32_EFER VM-entry control is 1, the following checks | |
2916 | * are performed on the field for the IA32_EFER MSR: | |
2917 | * - Bits reserved in the IA32_EFER MSR must be 0. | |
2918 | * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of | |
2919 | * the IA-32e mode guest VM-exit control. It must also be identical | |
2920 | * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to | |
2921 | * CR0.PG) is 1. | |
2922 | */ | |
2923 | if (to_vmx(vcpu)->nested.nested_run_pending && | |
2924 | (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { | |
2925 | ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; | |
5497b955 SC |
2926 | if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || |
2927 | CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || | |
2928 | CC(((vmcs12->guest_cr0 & X86_CR0_PG) && | |
2929 | ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))) | |
c80add0f | 2930 | return -EINVAL; |
55d2375e SC |
2931 | } |
2932 | ||
2933 | if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) && | |
5497b955 SC |
2934 | (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) || |
2935 | CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))) | |
c80add0f | 2936 | return -EINVAL; |
55d2375e | 2937 | |
9c3e922b | 2938 | if (nested_check_guest_non_reg_state(vmcs12)) |
c80add0f | 2939 | return -EINVAL; |
55d2375e SC |
2940 | |
2941 | return 0; | |
2942 | } | |
2943 | ||
453eafbe | 2944 | static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu) |
55d2375e SC |
2945 | { |
2946 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
2947 | unsigned long cr3, cr4; | |
f1727b49 | 2948 | bool vm_fail; |
55d2375e SC |
2949 | |
2950 | if (!nested_early_check) | |
2951 | return 0; | |
2952 | ||
2953 | if (vmx->msr_autoload.host.nr) | |
2954 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); | |
2955 | if (vmx->msr_autoload.guest.nr) | |
2956 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); | |
2957 | ||
2958 | preempt_disable(); | |
2959 | ||
2960 | vmx_prepare_switch_to_guest(vcpu); | |
2961 | ||
2962 | /* | |
2963 | * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS, | |
2964 | * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to | |
49f933d4 | 2965 | * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e. |
55d2375e SC |
2966 | * there is no need to preserve other bits or save/restore the field. |
2967 | */ | |
2968 | vmcs_writel(GUEST_RFLAGS, 0); | |
2969 | ||
55d2375e SC |
2970 | cr3 = __get_current_cr3_fast(); |
2971 | if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { | |
2972 | vmcs_writel(HOST_CR3, cr3); | |
2973 | vmx->loaded_vmcs->host_state.cr3 = cr3; | |
2974 | } | |
2975 | ||
2976 | cr4 = cr4_read_shadow(); | |
2977 | if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { | |
2978 | vmcs_writel(HOST_CR4, cr4); | |
2979 | vmx->loaded_vmcs->host_state.cr4 = cr4; | |
2980 | } | |
2981 | ||
55d2375e | 2982 | asm( |
453eafbe | 2983 | "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */ |
5a878160 SC |
2984 | "cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" |
2985 | "je 1f \n\t" | |
fbda0fd3 | 2986 | __ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t" |
5a878160 SC |
2987 | "mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t" |
2988 | "1: \n\t" | |
453eafbe | 2989 | "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */ |
55d2375e SC |
2990 | |
2991 | /* Check if vmlaunch or vmresume is needed */ | |
74dfa278 | 2992 | "cmpb $0, %c[launched](%[loaded_vmcs])\n\t" |
453eafbe | 2993 | |
f1727b49 SC |
2994 | /* |
2995 | * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set | |
2996 | * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail | |
2997 | * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the | |
bbc0b823 | 2998 | * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail. |
f1727b49 | 2999 | */ |
453eafbe SC |
3000 | "call vmx_vmenter\n\t" |
3001 | ||
bbc0b823 SC |
3002 | CC_SET(be) |
3003 | : ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail) | |
5a878160 | 3004 | : [HOST_RSP]"r"((unsigned long)HOST_RSP), |
74dfa278 SC |
3005 | [loaded_vmcs]"r"(vmx->loaded_vmcs), |
3006 | [launched]"i"(offsetof(struct loaded_vmcs, launched)), | |
5a878160 | 3007 | [host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)), |
453eafbe | 3008 | [wordsize]"i"(sizeof(ulong)) |
5a253552 | 3009 | : "memory" |
55d2375e SC |
3010 | ); |
3011 | ||
55d2375e SC |
3012 | if (vmx->msr_autoload.host.nr) |
3013 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); | |
3014 | if (vmx->msr_autoload.guest.nr) | |
3015 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); | |
3016 | ||
f1727b49 | 3017 | if (vm_fail) { |
380e0055 SC |
3018 | u32 error = vmcs_read32(VM_INSTRUCTION_ERROR); |
3019 | ||
541e886f | 3020 | preempt_enable(); |
380e0055 SC |
3021 | |
3022 | trace_kvm_nested_vmenter_failed( | |
3023 | "early hardware check VM-instruction error: ", error); | |
3024 | WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD); | |
55d2375e SC |
3025 | return 1; |
3026 | } | |
3027 | ||
3028 | /* | |
3029 | * VMExit clears RFLAGS.IF and DR7, even on a consistency check. | |
3030 | */ | |
3031 | local_irq_enable(); | |
3032 | if (hw_breakpoint_active()) | |
3033 | set_debugreg(__this_cpu_read(cpu_dr7), 7); | |
541e886f | 3034 | preempt_enable(); |
55d2375e SC |
3035 | |
3036 | /* | |
3037 | * A non-failing VMEntry means we somehow entered guest mode with | |
3038 | * an illegal RIP, and that's just the tip of the iceberg. There | |
3039 | * is no telling what memory has been modified or what state has | |
3040 | * been exposed to unknown code. Hitting this all but guarantees | |
3041 | * a (very critical) hardware issue. | |
3042 | */ | |
3043 | WARN_ON(!(vmcs_read32(VM_EXIT_REASON) & | |
3044 | VMX_EXIT_REASONS_FAILED_VMENTRY)); | |
3045 | ||
3046 | return 0; | |
3047 | } | |
55d2375e | 3048 | |
671ddc70 | 3049 | static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu) |
55d2375e SC |
3050 | { |
3051 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
3052 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
96c66e87 | 3053 | struct kvm_host_map *map; |
55d2375e SC |
3054 | struct page *page; |
3055 | u64 hpa; | |
3056 | ||
e942dbf8 VK |
3057 | /* |
3058 | * hv_evmcs may end up being not mapped after migration (when | |
3059 | * L2 was running), map it here to make sure vmcs12 changes are | |
3060 | * properly reflected. | |
3061 | */ | |
b6a0653a VK |
3062 | if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { |
3063 | enum nested_evmptrld_status evmptrld_status = | |
3064 | nested_vmx_handle_enlightened_vmptrld(vcpu, false); | |
3065 | ||
3066 | if (evmptrld_status == EVMPTRLD_VMFAIL || | |
3067 | evmptrld_status == EVMPTRLD_ERROR) { | |
3068 | pr_debug_ratelimited("%s: enlightened vmptrld failed\n", | |
3069 | __func__); | |
3070 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
3071 | vcpu->run->internal.suberror = | |
3072 | KVM_INTERNAL_ERROR_EMULATION; | |
3073 | vcpu->run->internal.ndata = 0; | |
3074 | return false; | |
3075 | } | |
3076 | } | |
e942dbf8 | 3077 | |
55d2375e SC |
3078 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { |
3079 | /* | |
3080 | * Translate L1 physical address to host physical | |
3081 | * address for vmcs02. Keep the page pinned, so this | |
3082 | * physical address remains valid. We keep a reference | |
3083 | * to it so we can release it later. | |
3084 | */ | |
3085 | if (vmx->nested.apic_access_page) { /* shouldn't happen */ | |
b11494bc | 3086 | kvm_release_page_clean(vmx->nested.apic_access_page); |
55d2375e SC |
3087 | vmx->nested.apic_access_page = NULL; |
3088 | } | |
3089 | page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr); | |
55d2375e SC |
3090 | if (!is_error_page(page)) { |
3091 | vmx->nested.apic_access_page = page; | |
3092 | hpa = page_to_phys(vmx->nested.apic_access_page); | |
3093 | vmcs_write64(APIC_ACCESS_ADDR, hpa); | |
3094 | } else { | |
671ddc70 JM |
3095 | pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n", |
3096 | __func__); | |
3097 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
3098 | vcpu->run->internal.suberror = | |
3099 | KVM_INTERNAL_ERROR_EMULATION; | |
3100 | vcpu->run->internal.ndata = 0; | |
3101 | return false; | |
55d2375e SC |
3102 | } |
3103 | } | |
3104 | ||
3105 | if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { | |
96c66e87 | 3106 | map = &vmx->nested.virtual_apic_map; |
55d2375e | 3107 | |
96c66e87 KA |
3108 | if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { |
3109 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); | |
69090810 PB |
3110 | } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) && |
3111 | nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) && | |
3112 | !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { | |
3113 | /* | |
3114 | * The processor will never use the TPR shadow, simply | |
3115 | * clear the bit from the execution control. Such a | |
3116 | * configuration is useless, but it happens in tests. | |
3117 | * For any other configuration, failing the vm entry is | |
3118 | * _not_ what the processor does but it's basically the | |
3119 | * only possibility we have. | |
3120 | */ | |
2183f564 | 3121 | exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); |
69090810 | 3122 | } else { |
ca2f5466 SC |
3123 | /* |
3124 | * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to | |
3125 | * force VM-Entry to fail. | |
3126 | */ | |
3127 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull); | |
55d2375e SC |
3128 | } |
3129 | } | |
3130 | ||
3131 | if (nested_cpu_has_posted_intr(vmcs12)) { | |
3278e049 KA |
3132 | map = &vmx->nested.pi_desc_map; |
3133 | ||
3134 | if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { | |
3135 | vmx->nested.pi_desc = | |
3136 | (struct pi_desc *)(((void *)map->hva) + | |
3137 | offset_in_page(vmcs12->posted_intr_desc_addr)); | |
3138 | vmcs_write64(POSTED_INTR_DESC_ADDR, | |
3139 | pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); | |
55d2375e | 3140 | } |
55d2375e SC |
3141 | } |
3142 | if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12)) | |
2183f564 | 3143 | exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); |
55d2375e | 3144 | else |
2183f564 | 3145 | exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); |
671ddc70 | 3146 | return true; |
55d2375e SC |
3147 | } |
3148 | ||
3149 | /* | |
3150 | * Intel's VMX Instruction Reference specifies a common set of prerequisites | |
3151 | * for running VMX instructions (except VMXON, whose prerequisites are | |
3152 | * slightly different). It also specifies what exception to inject otherwise. | |
3153 | * Note that many of these exceptions have priority over VM exits, so they | |
3154 | * don't have to be checked again here. | |
3155 | */ | |
3156 | static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) | |
3157 | { | |
3158 | if (!to_vmx(vcpu)->nested.vmxon) { | |
3159 | kvm_queue_exception(vcpu, UD_VECTOR); | |
3160 | return 0; | |
3161 | } | |
3162 | ||
3163 | if (vmx_get_cpl(vcpu)) { | |
3164 | kvm_inject_gp(vcpu, 0); | |
3165 | return 0; | |
3166 | } | |
3167 | ||
3168 | return 1; | |
3169 | } | |
3170 | ||
3171 | static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu) | |
3172 | { | |
3173 | u8 rvi = vmx_get_rvi(); | |
3174 | u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI); | |
3175 | ||
3176 | return ((rvi & 0xf0) > (vppr & 0xf0)); | |
3177 | } | |
3178 | ||
3179 | static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |
3180 | struct vmcs12 *vmcs12); | |
3181 | ||
3182 | /* | |
3183 | * If from_vmentry is false, this is being called from state restore (either RSM | |
3184 | * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume. | |
671ddc70 JM |
3185 | * |
3186 | * Returns: | |
463bfeee ML |
3187 | * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode |
3188 | * NVMX_VMENTRY_VMFAIL: Consistency check VMFail | |
3189 | * NVMX_VMENTRY_VMEXIT: Consistency check VMExit | |
3190 | * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error | |
55d2375e | 3191 | */ |
671ddc70 JM |
3192 | enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, |
3193 | bool from_vmentry) | |
55d2375e SC |
3194 | { |
3195 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3196 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
3197 | bool evaluate_pending_interrupts; | |
3198 | u32 exit_reason = EXIT_REASON_INVALID_STATE; | |
3199 | u32 exit_qual; | |
3200 | ||
2183f564 | 3201 | evaluate_pending_interrupts = exec_controls_get(vmx) & |
4e2a0bc5 | 3202 | (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING); |
55d2375e SC |
3203 | if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu)) |
3204 | evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu); | |
3205 | ||
3206 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | |
3207 | vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | |
3208 | if (kvm_mpx_supported() && | |
3209 | !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)) | |
3210 | vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); | |
3211 | ||
f087a029 SC |
3212 | /* |
3213 | * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and* | |
3214 | * nested early checks are disabled. In the event of a "late" VM-Fail, | |
3215 | * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its | |
3216 | * software model to the pre-VMEntry host state. When EPT is disabled, | |
3217 | * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes | |
3218 | * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing | |
3219 | * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to | |
3220 | * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested | |
3221 | * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is | |
3222 | * guaranteed to be overwritten with a shadow CR3 prior to re-entering | |
3223 | * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as | |
3224 | * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks | |
3225 | * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail | |
3226 | * path would need to manually save/restore vmcs01.GUEST_CR3. | |
3227 | */ | |
3228 | if (!enable_ept && !nested_early_check) | |
3229 | vmcs_writel(GUEST_CR3, vcpu->arch.cr3); | |
3230 | ||
55d2375e SC |
3231 | vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); |
3232 | ||
3233 | prepare_vmcs02_early(vmx, vmcs12); | |
3234 | ||
3235 | if (from_vmentry) { | |
671ddc70 JM |
3236 | if (unlikely(!nested_get_vmcs12_pages(vcpu))) |
3237 | return NVMX_VMENTRY_KVM_INTERNAL_ERROR; | |
55d2375e SC |
3238 | |
3239 | if (nested_vmx_check_vmentry_hw(vcpu)) { | |
3240 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | |
671ddc70 | 3241 | return NVMX_VMENTRY_VMFAIL; |
55d2375e SC |
3242 | } |
3243 | ||
5478ba34 | 3244 | if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual)) |
55d2375e SC |
3245 | goto vmentry_fail_vmexit; |
3246 | } | |
3247 | ||
3248 | enter_guest_mode(vcpu); | |
5e3d394f | 3249 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) |
55d2375e SC |
3250 | vcpu->arch.tsc_offset += vmcs12->tsc_offset; |
3251 | ||
3252 | if (prepare_vmcs02(vcpu, vmcs12, &exit_qual)) | |
3253 | goto vmentry_fail_vmexit_guest_mode; | |
3254 | ||
3255 | if (from_vmentry) { | |
3256 | exit_reason = EXIT_REASON_MSR_LOAD_FAIL; | |
3257 | exit_qual = nested_vmx_load_msr(vcpu, | |
3258 | vmcs12->vm_entry_msr_load_addr, | |
3259 | vmcs12->vm_entry_msr_load_count); | |
3260 | if (exit_qual) | |
3261 | goto vmentry_fail_vmexit_guest_mode; | |
3262 | } else { | |
3263 | /* | |
3264 | * The MMU is not initialized to point at the right entities yet and | |
3265 | * "get pages" would need to read data from the guest (i.e. we will | |
3266 | * need to perform gpa to hpa translation). Request a call | |
3267 | * to nested_get_vmcs12_pages before the next VM-entry. The MSRs | |
3268 | * have already been set at vmentry time and should not be reset. | |
3269 | */ | |
3270 | kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); | |
3271 | } | |
3272 | ||
3273 | /* | |
3274 | * If L1 had a pending IRQ/NMI until it executed | |
3275 | * VMLAUNCH/VMRESUME which wasn't delivered because it was | |
3276 | * disallowed (e.g. interrupts disabled), L0 needs to | |
3277 | * evaluate if this pending event should cause an exit from L2 | |
3278 | * to L1 or delivered directly to L2 (e.g. In case L1 don't | |
3279 | * intercept EXTERNAL_INTERRUPT). | |
3280 | * | |
3281 | * Usually this would be handled by the processor noticing an | |
3282 | * IRQ/NMI window request, or checking RVI during evaluation of | |
3283 | * pending virtual interrupts. However, this setting was done | |
3284 | * on VMCS01 and now VMCS02 is active instead. Thus, we force L0 | |
3285 | * to perform pending event evaluation by requesting a KVM_REQ_EVENT. | |
3286 | */ | |
3287 | if (unlikely(evaluate_pending_interrupts)) | |
3288 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
3289 | ||
359a6c3d PB |
3290 | /* |
3291 | * Do not start the preemption timer hrtimer until after we know | |
3292 | * we are successful, so that only nested_vmx_vmexit needs to cancel | |
3293 | * the timer. | |
3294 | */ | |
3295 | vmx->nested.preemption_timer_expired = false; | |
3296 | if (nested_cpu_has_preemption_timer(vmcs12)) | |
3297 | vmx_start_preemption_timer(vcpu); | |
3298 | ||
55d2375e SC |
3299 | /* |
3300 | * Note no nested_vmx_succeed or nested_vmx_fail here. At this point | |
3301 | * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet | |
3302 | * returned as far as L1 is concerned. It will only return (and set | |
3303 | * the success flag) when L2 exits (see nested_vmx_vmexit()). | |
3304 | */ | |
671ddc70 | 3305 | return NVMX_VMENTRY_SUCCESS; |
55d2375e SC |
3306 | |
3307 | /* | |
3308 | * A failed consistency check that leads to a VMExit during L1's | |
3309 | * VMEnter to L2 is a variation of a normal VMexit, as explained in | |
3310 | * 26.7 "VM-entry failures during or after loading guest state". | |
3311 | */ | |
3312 | vmentry_fail_vmexit_guest_mode: | |
5e3d394f | 3313 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) |
55d2375e SC |
3314 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; |
3315 | leave_guest_mode(vcpu); | |
3316 | ||
3317 | vmentry_fail_vmexit: | |
3318 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | |
3319 | ||
3320 | if (!from_vmentry) | |
671ddc70 | 3321 | return NVMX_VMENTRY_VMEXIT; |
55d2375e SC |
3322 | |
3323 | load_vmcs12_host_state(vcpu, vmcs12); | |
3324 | vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY; | |
3325 | vmcs12->exit_qualification = exit_qual; | |
3326 | if (enable_shadow_vmcs || vmx->nested.hv_evmcs) | |
3731905e | 3327 | vmx->nested.need_vmcs12_to_shadow_sync = true; |
671ddc70 | 3328 | return NVMX_VMENTRY_VMEXIT; |
55d2375e SC |
3329 | } |
3330 | ||
3331 | /* | |
3332 | * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1 | |
3333 | * for running an L2 nested guest. | |
3334 | */ | |
3335 | static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |
3336 | { | |
3337 | struct vmcs12 *vmcs12; | |
671ddc70 | 3338 | enum nvmx_vmentry_status status; |
55d2375e SC |
3339 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3340 | u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu); | |
b6a0653a | 3341 | enum nested_evmptrld_status evmptrld_status; |
55d2375e SC |
3342 | |
3343 | if (!nested_vmx_check_permission(vcpu)) | |
3344 | return 1; | |
3345 | ||
b6a0653a VK |
3346 | evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch); |
3347 | if (evmptrld_status == EVMPTRLD_ERROR) { | |
3348 | kvm_queue_exception(vcpu, UD_VECTOR); | |
55d2375e | 3349 | return 1; |
b6a0653a VK |
3350 | } else if (evmptrld_status == EVMPTRLD_VMFAIL) { |
3351 | return nested_vmx_failInvalid(vcpu); | |
3352 | } | |
55d2375e SC |
3353 | |
3354 | if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull) | |
3355 | return nested_vmx_failInvalid(vcpu); | |
3356 | ||
3357 | vmcs12 = get_vmcs12(vcpu); | |
3358 | ||
3359 | /* | |
3360 | * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact | |
3361 | * that there *is* a valid VMCS pointer, RFLAGS.CF is set | |
3362 | * rather than RFLAGS.ZF, and no error number is stored to the | |
3363 | * VM-instruction error field. | |
3364 | */ | |
3365 | if (vmcs12->hdr.shadow_vmcs) | |
3366 | return nested_vmx_failInvalid(vcpu); | |
3367 | ||
3368 | if (vmx->nested.hv_evmcs) { | |
3369 | copy_enlightened_to_vmcs12(vmx); | |
3370 | /* Enlightened VMCS doesn't have launch state */ | |
3371 | vmcs12->launch_state = !launch; | |
3372 | } else if (enable_shadow_vmcs) { | |
3373 | copy_shadow_to_vmcs12(vmx); | |
3374 | } | |
3375 | ||
3376 | /* | |
3377 | * The nested entry process starts with enforcing various prerequisites | |
3378 | * on vmcs12 as required by the Intel SDM, and act appropriately when | |
3379 | * they fail: As the SDM explains, some conditions should cause the | |
3380 | * instruction to fail, while others will cause the instruction to seem | |
3381 | * to succeed, but return an EXIT_REASON_INVALID_STATE. | |
3382 | * To speed up the normal (success) code path, we should avoid checking | |
3383 | * for misconfigurations which will anyway be caught by the processor | |
3384 | * when using the merged vmcs02. | |
3385 | */ | |
3386 | if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) | |
3387 | return nested_vmx_failValid(vcpu, | |
3388 | VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS); | |
3389 | ||
3390 | if (vmcs12->launch_state == launch) | |
3391 | return nested_vmx_failValid(vcpu, | |
3392 | launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS | |
3393 | : VMXERR_VMRESUME_NONLAUNCHED_VMCS); | |
3394 | ||
98d9e858 PB |
3395 | if (nested_vmx_check_controls(vcpu, vmcs12)) |
3396 | return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | |
5478ba34 | 3397 | |
98d9e858 PB |
3398 | if (nested_vmx_check_host_state(vcpu, vmcs12)) |
3399 | return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); | |
55d2375e SC |
3400 | |
3401 | /* | |
3402 | * We're finally done with prerequisite checking, and can start with | |
3403 | * the nested entry. | |
3404 | */ | |
3405 | vmx->nested.nested_run_pending = 1; | |
671ddc70 JM |
3406 | status = nested_vmx_enter_non_root_mode(vcpu, true); |
3407 | if (unlikely(status != NVMX_VMENTRY_SUCCESS)) | |
3408 | goto vmentry_failed; | |
55d2375e SC |
3409 | |
3410 | /* Hide L1D cache contents from the nested guest. */ | |
3411 | vmx->vcpu.arch.l1tf_flush_l1d = true; | |
3412 | ||
3413 | /* | |
3414 | * Must happen outside of nested_vmx_enter_non_root_mode() as it will | |
3415 | * also be used as part of restoring nVMX state for | |
3416 | * snapshot restore (migration). | |
3417 | * | |
3418 | * In this flow, it is assumed that vmcs12 cache was | |
3419 | * trasferred as part of captured nVMX state and should | |
3420 | * therefore not be read from guest memory (which may not | |
3421 | * exist on destination host yet). | |
3422 | */ | |
3423 | nested_cache_shadow_vmcs12(vcpu, vmcs12); | |
3424 | ||
3425 | /* | |
9ebdfe52 JM |
3426 | * If we're entering a halted L2 vcpu and the L2 vcpu won't be |
3427 | * awakened by event injection or by an NMI-window VM-exit or | |
3428 | * by an interrupt-window VM-exit, halt the vcpu. | |
55d2375e SC |
3429 | */ |
3430 | if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && | |
9ebdfe52 | 3431 | !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) && |
4e2a0bc5 | 3432 | !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) && |
9dadc2f9 | 3433 | !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) && |
9ebdfe52 | 3434 | (vmcs12->guest_rflags & X86_EFLAGS_IF))) { |
55d2375e SC |
3435 | vmx->nested.nested_run_pending = 0; |
3436 | return kvm_vcpu_halt(vcpu); | |
3437 | } | |
3438 | return 1; | |
671ddc70 JM |
3439 | |
3440 | vmentry_failed: | |
3441 | vmx->nested.nested_run_pending = 0; | |
3442 | if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR) | |
3443 | return 0; | |
3444 | if (status == NVMX_VMENTRY_VMEXIT) | |
3445 | return 1; | |
3446 | WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL); | |
3447 | return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | |
55d2375e SC |
3448 | } |
3449 | ||
3450 | /* | |
3451 | * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date | |
67b0ae43 | 3452 | * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK). |
55d2375e SC |
3453 | * This function returns the new value we should put in vmcs12.guest_cr0. |
3454 | * It's not enough to just return the vmcs02 GUEST_CR0. Rather, | |
3455 | * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now | |
3456 | * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0 | |
3457 | * didn't trap the bit, because if L1 did, so would L0). | |
3458 | * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have | |
3459 | * been modified by L2, and L1 knows it. So just leave the old value of | |
3460 | * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0 | |
3461 | * isn't relevant, because if L0 traps this bit it can set it to anything. | |
3462 | * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have | |
3463 | * changed these bits, and therefore they need to be updated, but L0 | |
3464 | * didn't necessarily allow them to be changed in GUEST_CR0 - and rather | |
3465 | * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there. | |
3466 | */ | |
3467 | static inline unsigned long | |
3468 | vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |
3469 | { | |
3470 | return | |
3471 | /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) | | |
3472 | /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) | | |
3473 | /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask | | |
3474 | vcpu->arch.cr0_guest_owned_bits)); | |
3475 | } | |
3476 | ||
3477 | static inline unsigned long | |
3478 | vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |
3479 | { | |
3480 | return | |
3481 | /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) | | |
3482 | /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) | | |
3483 | /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask | | |
3484 | vcpu->arch.cr4_guest_owned_bits)); | |
3485 | } | |
3486 | ||
3487 | static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu, | |
3488 | struct vmcs12 *vmcs12) | |
3489 | { | |
3490 | u32 idt_vectoring; | |
3491 | unsigned int nr; | |
3492 | ||
3493 | if (vcpu->arch.exception.injected) { | |
3494 | nr = vcpu->arch.exception.nr; | |
3495 | idt_vectoring = nr | VECTORING_INFO_VALID_MASK; | |
3496 | ||
3497 | if (kvm_exception_is_soft(nr)) { | |
3498 | vmcs12->vm_exit_instruction_len = | |
3499 | vcpu->arch.event_exit_inst_len; | |
3500 | idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION; | |
3501 | } else | |
3502 | idt_vectoring |= INTR_TYPE_HARD_EXCEPTION; | |
3503 | ||
3504 | if (vcpu->arch.exception.has_error_code) { | |
3505 | idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK; | |
3506 | vmcs12->idt_vectoring_error_code = | |
3507 | vcpu->arch.exception.error_code; | |
3508 | } | |
3509 | ||
3510 | vmcs12->idt_vectoring_info_field = idt_vectoring; | |
3511 | } else if (vcpu->arch.nmi_injected) { | |
3512 | vmcs12->idt_vectoring_info_field = | |
3513 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR; | |
3514 | } else if (vcpu->arch.interrupt.injected) { | |
3515 | nr = vcpu->arch.interrupt.nr; | |
3516 | idt_vectoring = nr | VECTORING_INFO_VALID_MASK; | |
3517 | ||
3518 | if (vcpu->arch.interrupt.soft) { | |
3519 | idt_vectoring |= INTR_TYPE_SOFT_INTR; | |
3520 | vmcs12->vm_entry_instruction_len = | |
3521 | vcpu->arch.event_exit_inst_len; | |
3522 | } else | |
3523 | idt_vectoring |= INTR_TYPE_EXT_INTR; | |
3524 | ||
3525 | vmcs12->idt_vectoring_info_field = idt_vectoring; | |
3526 | } | |
3527 | } | |
3528 | ||
3529 | ||
96b100cd | 3530 | void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu) |
55d2375e SC |
3531 | { |
3532 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
3533 | gfn_t gfn; | |
3534 | ||
3535 | /* | |
3536 | * Don't need to mark the APIC access page dirty; it is never | |
3537 | * written to by the CPU during APIC virtualization. | |
3538 | */ | |
3539 | ||
3540 | if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) { | |
3541 | gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT; | |
3542 | kvm_vcpu_mark_page_dirty(vcpu, gfn); | |
3543 | } | |
3544 | ||
3545 | if (nested_cpu_has_posted_intr(vmcs12)) { | |
3546 | gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT; | |
3547 | kvm_vcpu_mark_page_dirty(vcpu, gfn); | |
3548 | } | |
3549 | } | |
3550 | ||
3551 | static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) | |
3552 | { | |
3553 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3554 | int max_irr; | |
3555 | void *vapic_page; | |
3556 | u16 status; | |
3557 | ||
3558 | if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) | |
3559 | return; | |
3560 | ||
3561 | vmx->nested.pi_pending = false; | |
3562 | if (!pi_test_and_clear_on(vmx->nested.pi_desc)) | |
3563 | return; | |
3564 | ||
3565 | max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); | |
3566 | if (max_irr != 256) { | |
96c66e87 KA |
3567 | vapic_page = vmx->nested.virtual_apic_map.hva; |
3568 | if (!vapic_page) | |
3569 | return; | |
3570 | ||
55d2375e SC |
3571 | __kvm_apic_update_irr(vmx->nested.pi_desc->pir, |
3572 | vapic_page, &max_irr); | |
55d2375e SC |
3573 | status = vmcs_read16(GUEST_INTR_STATUS); |
3574 | if ((u8)max_irr > ((u8)status & 0xff)) { | |
3575 | status &= ~0xff; | |
3576 | status |= (u8)max_irr; | |
3577 | vmcs_write16(GUEST_INTR_STATUS, status); | |
3578 | } | |
3579 | } | |
3580 | ||
3581 | nested_mark_vmcs12_pages_dirty(vcpu); | |
3582 | } | |
3583 | ||
3584 | static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu, | |
3585 | unsigned long exit_qual) | |
3586 | { | |
3587 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
3588 | unsigned int nr = vcpu->arch.exception.nr; | |
3589 | u32 intr_info = nr | INTR_INFO_VALID_MASK; | |
3590 | ||
3591 | if (vcpu->arch.exception.has_error_code) { | |
3592 | vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code; | |
3593 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; | |
3594 | } | |
3595 | ||
3596 | if (kvm_exception_is_soft(nr)) | |
3597 | intr_info |= INTR_TYPE_SOFT_EXCEPTION; | |
3598 | else | |
3599 | intr_info |= INTR_TYPE_HARD_EXCEPTION; | |
3600 | ||
3601 | if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) && | |
3602 | vmx_get_nmi_mask(vcpu)) | |
3603 | intr_info |= INTR_INFO_UNBLOCK_NMI; | |
3604 | ||
3605 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual); | |
3606 | } | |
3607 | ||
684c0422 OU |
3608 | /* |
3609 | * Returns true if a debug trap is pending delivery. | |
3610 | * | |
3611 | * In KVM, debug traps bear an exception payload. As such, the class of a #DB | |
3612 | * exception may be inferred from the presence of an exception payload. | |
3613 | */ | |
3614 | static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu) | |
3615 | { | |
3616 | return vcpu->arch.exception.pending && | |
3617 | vcpu->arch.exception.nr == DB_VECTOR && | |
3618 | vcpu->arch.exception.payload; | |
3619 | } | |
3620 | ||
3621 | /* | |
3622 | * Certain VM-exits set the 'pending debug exceptions' field to indicate a | |
3623 | * recognized #DB (data or single-step) that has yet to be delivered. Since KVM | |
3624 | * represents these debug traps with a payload that is said to be compatible | |
3625 | * with the 'pending debug exceptions' field, write the payload to the VMCS | |
3626 | * field if a VM-exit is delivered before the debug trap. | |
3627 | */ | |
3628 | static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu) | |
3629 | { | |
3630 | if (vmx_pending_dbg_trap(vcpu)) | |
3631 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, | |
3632 | vcpu->arch.exception.payload); | |
3633 | } | |
3634 | ||
a1c77abb | 3635 | static int vmx_check_nested_events(struct kvm_vcpu *vcpu) |
55d2375e SC |
3636 | { |
3637 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3638 | unsigned long exit_qual; | |
3639 | bool block_nested_events = | |
3640 | vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); | |
5ef8acbd | 3641 | bool mtf_pending = vmx->nested.mtf_pending; |
4b9852f4 LA |
3642 | struct kvm_lapic *apic = vcpu->arch.apic; |
3643 | ||
5ef8acbd OU |
3644 | /* |
3645 | * Clear the MTF state. If a higher priority VM-exit is delivered first, | |
3646 | * this state is discarded. | |
3647 | */ | |
5c8beb47 OU |
3648 | if (!block_nested_events) |
3649 | vmx->nested.mtf_pending = false; | |
5ef8acbd | 3650 | |
4b9852f4 LA |
3651 | if (lapic_in_kernel(vcpu) && |
3652 | test_bit(KVM_APIC_INIT, &apic->pending_events)) { | |
3653 | if (block_nested_events) | |
3654 | return -EBUSY; | |
684c0422 | 3655 | nested_vmx_update_pending_dbg(vcpu); |
e64a8508 | 3656 | clear_bit(KVM_APIC_INIT, &apic->pending_events); |
4b9852f4 LA |
3657 | nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); |
3658 | return 0; | |
3659 | } | |
55d2375e | 3660 | |
5ef8acbd OU |
3661 | /* |
3662 | * Process any exceptions that are not debug traps before MTF. | |
3663 | */ | |
3664 | if (vcpu->arch.exception.pending && | |
3665 | !vmx_pending_dbg_trap(vcpu) && | |
3666 | nested_vmx_check_exception(vcpu, &exit_qual)) { | |
3667 | if (block_nested_events) | |
3668 | return -EBUSY; | |
3669 | nested_vmx_inject_exception_vmexit(vcpu, exit_qual); | |
3670 | return 0; | |
3671 | } | |
3672 | ||
3673 | if (mtf_pending) { | |
3674 | if (block_nested_events) | |
3675 | return -EBUSY; | |
3676 | nested_vmx_update_pending_dbg(vcpu); | |
3677 | nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); | |
3678 | return 0; | |
3679 | } | |
3680 | ||
55d2375e | 3681 | if (vcpu->arch.exception.pending && |
5ef8acbd | 3682 | nested_vmx_check_exception(vcpu, &exit_qual)) { |
55d2375e SC |
3683 | if (block_nested_events) |
3684 | return -EBUSY; | |
3685 | nested_vmx_inject_exception_vmexit(vcpu, exit_qual); | |
3686 | return 0; | |
3687 | } | |
3688 | ||
3689 | if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) && | |
3690 | vmx->nested.preemption_timer_expired) { | |
3691 | if (block_nested_events) | |
3692 | return -EBUSY; | |
3693 | nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); | |
3694 | return 0; | |
3695 | } | |
3696 | ||
3697 | if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) { | |
3698 | if (block_nested_events) | |
3699 | return -EBUSY; | |
3700 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, | |
3701 | NMI_VECTOR | INTR_TYPE_NMI_INTR | | |
3702 | INTR_INFO_VALID_MASK, 0); | |
3703 | /* | |
3704 | * The NMI-triggered VM exit counts as injection: | |
3705 | * clear this one and block further NMIs. | |
3706 | */ | |
3707 | vcpu->arch.nmi_pending = 0; | |
3708 | vmx_set_nmi_mask(vcpu, true); | |
3709 | return 0; | |
3710 | } | |
3711 | ||
a1c77abb | 3712 | if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(vcpu)) { |
55d2375e SC |
3713 | if (block_nested_events) |
3714 | return -EBUSY; | |
3715 | nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); | |
3716 | return 0; | |
3717 | } | |
3718 | ||
3719 | vmx_complete_nested_posted_interrupt(vcpu); | |
3720 | return 0; | |
3721 | } | |
3722 | ||
3723 | static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu) | |
3724 | { | |
3725 | ktime_t remaining = | |
3726 | hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); | |
3727 | u64 value; | |
3728 | ||
3729 | if (ktime_to_ns(remaining) <= 0) | |
3730 | return 0; | |
3731 | ||
3732 | value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz; | |
3733 | do_div(value, 1000000); | |
3734 | return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE; | |
3735 | } | |
3736 | ||
7952d769 | 3737 | static bool is_vmcs12_ext_field(unsigned long field) |
55d2375e | 3738 | { |
7952d769 SC |
3739 | switch (field) { |
3740 | case GUEST_ES_SELECTOR: | |
3741 | case GUEST_CS_SELECTOR: | |
3742 | case GUEST_SS_SELECTOR: | |
3743 | case GUEST_DS_SELECTOR: | |
3744 | case GUEST_FS_SELECTOR: | |
3745 | case GUEST_GS_SELECTOR: | |
3746 | case GUEST_LDTR_SELECTOR: | |
3747 | case GUEST_TR_SELECTOR: | |
3748 | case GUEST_ES_LIMIT: | |
3749 | case GUEST_CS_LIMIT: | |
3750 | case GUEST_SS_LIMIT: | |
3751 | case GUEST_DS_LIMIT: | |
3752 | case GUEST_FS_LIMIT: | |
3753 | case GUEST_GS_LIMIT: | |
3754 | case GUEST_LDTR_LIMIT: | |
3755 | case GUEST_TR_LIMIT: | |
3756 | case GUEST_GDTR_LIMIT: | |
3757 | case GUEST_IDTR_LIMIT: | |
3758 | case GUEST_ES_AR_BYTES: | |
3759 | case GUEST_DS_AR_BYTES: | |
3760 | case GUEST_FS_AR_BYTES: | |
3761 | case GUEST_GS_AR_BYTES: | |
3762 | case GUEST_LDTR_AR_BYTES: | |
3763 | case GUEST_TR_AR_BYTES: | |
3764 | case GUEST_ES_BASE: | |
3765 | case GUEST_CS_BASE: | |
3766 | case GUEST_SS_BASE: | |
3767 | case GUEST_DS_BASE: | |
3768 | case GUEST_FS_BASE: | |
3769 | case GUEST_GS_BASE: | |
3770 | case GUEST_LDTR_BASE: | |
3771 | case GUEST_TR_BASE: | |
3772 | case GUEST_GDTR_BASE: | |
3773 | case GUEST_IDTR_BASE: | |
3774 | case GUEST_PENDING_DBG_EXCEPTIONS: | |
3775 | case GUEST_BNDCFGS: | |
3776 | return true; | |
3777 | default: | |
3778 | break; | |
3779 | } | |
55d2375e | 3780 | |
7952d769 SC |
3781 | return false; |
3782 | } | |
3783 | ||
3784 | static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, | |
3785 | struct vmcs12 *vmcs12) | |
3786 | { | |
3787 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
55d2375e SC |
3788 | |
3789 | vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR); | |
3790 | vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR); | |
3791 | vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR); | |
3792 | vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR); | |
3793 | vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR); | |
3794 | vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR); | |
3795 | vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR); | |
3796 | vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR); | |
3797 | vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT); | |
3798 | vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT); | |
3799 | vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT); | |
3800 | vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT); | |
3801 | vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT); | |
3802 | vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT); | |
3803 | vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT); | |
3804 | vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT); | |
3805 | vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT); | |
3806 | vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT); | |
3807 | vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES); | |
55d2375e SC |
3808 | vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES); |
3809 | vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES); | |
3810 | vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES); | |
3811 | vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES); | |
3812 | vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES); | |
3813 | vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE); | |
3814 | vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE); | |
3815 | vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE); | |
3816 | vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE); | |
3817 | vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE); | |
3818 | vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE); | |
3819 | vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE); | |
3820 | vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE); | |
3821 | vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE); | |
3822 | vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE); | |
7952d769 SC |
3823 | vmcs12->guest_pending_dbg_exceptions = |
3824 | vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); | |
3825 | if (kvm_mpx_supported()) | |
3826 | vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); | |
3827 | ||
3828 | vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; | |
3829 | } | |
3830 | ||
3831 | static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu, | |
3832 | struct vmcs12 *vmcs12) | |
3833 | { | |
3834 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3835 | int cpu; | |
3836 | ||
3837 | if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) | |
3838 | return; | |
3839 | ||
3840 | ||
3841 | WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); | |
3842 | ||
3843 | cpu = get_cpu(); | |
3844 | vmx->loaded_vmcs = &vmx->nested.vmcs02; | |
3845 | vmx_vcpu_load(&vmx->vcpu, cpu); | |
3846 | ||
3847 | sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); | |
3848 | ||
3849 | vmx->loaded_vmcs = &vmx->vmcs01; | |
3850 | vmx_vcpu_load(&vmx->vcpu, cpu); | |
3851 | put_cpu(); | |
3852 | } | |
3853 | ||
3854 | /* | |
3855 | * Update the guest state fields of vmcs12 to reflect changes that | |
3856 | * occurred while L2 was running. (The "IA-32e mode guest" bit of the | |
3857 | * VM-entry controls is also updated, since this is really a guest | |
3858 | * state bit.) | |
3859 | */ | |
3860 | static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |
3861 | { | |
3862 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
3863 | ||
3864 | if (vmx->nested.hv_evmcs) | |
3865 | sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); | |
3866 | ||
3867 | vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; | |
3868 | ||
3869 | vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); | |
3870 | vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); | |
3871 | ||
3872 | vmcs12->guest_rsp = kvm_rsp_read(vcpu); | |
3873 | vmcs12->guest_rip = kvm_rip_read(vcpu); | |
3874 | vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); | |
3875 | ||
3876 | vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES); | |
3877 | vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES); | |
55d2375e | 3878 | |
de70d279 SC |
3879 | vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); |
3880 | vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); | |
3881 | vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); | |
55d2375e SC |
3882 | |
3883 | vmcs12->guest_interruptibility_info = | |
3884 | vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | |
7952d769 | 3885 | |
55d2375e SC |
3886 | if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) |
3887 | vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT; | |
3888 | else | |
3889 | vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE; | |
3890 | ||
b4b65b56 PB |
3891 | if (nested_cpu_has_preemption_timer(vmcs12) && |
3892 | vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER) | |
55d2375e SC |
3893 | vmcs12->vmx_preemption_timer_value = |
3894 | vmx_get_preemption_timer_value(vcpu); | |
55d2375e SC |
3895 | |
3896 | /* | |
3897 | * In some cases (usually, nested EPT), L2 is allowed to change its | |
3898 | * own CR3 without exiting. If it has changed it, we must keep it. | |
3899 | * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined | |
3900 | * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12. | |
3901 | * | |
3902 | * Additionally, restore L2's PDPTR to vmcs12. | |
3903 | */ | |
3904 | if (enable_ept) { | |
3905 | vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3); | |
c7554efc SC |
3906 | if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) { |
3907 | vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0); | |
3908 | vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1); | |
3909 | vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2); | |
3910 | vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); | |
3911 | } | |
55d2375e SC |
3912 | } |
3913 | ||
3914 | vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); | |
3915 | ||
3916 | if (nested_cpu_has_vid(vmcs12)) | |
3917 | vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); | |
3918 | ||
3919 | vmcs12->vm_entry_controls = | |
3920 | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | | |
3921 | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); | |
3922 | ||
699a1ac2 | 3923 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) |
55d2375e | 3924 | kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); |
55d2375e | 3925 | |
55d2375e SC |
3926 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) |
3927 | vmcs12->guest_ia32_efer = vcpu->arch.efer; | |
55d2375e SC |
3928 | } |
3929 | ||
3930 | /* | |
3931 | * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits | |
3932 | * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12), | |
3933 | * and this function updates it to reflect the changes to the guest state while | |
3934 | * L2 was running (and perhaps made some exits which were handled directly by L0 | |
3935 | * without going back to L1), and to reflect the exit reason. | |
3936 | * Note that we do not have to copy here all VMCS fields, just those that | |
3937 | * could have changed by the L2 guest or the exit - i.e., the guest-state and | |
3938 | * exit-information fields only. Other fields are modified by L1 with VMWRITE, | |
3939 | * which already writes to vmcs12 directly. | |
3940 | */ | |
3941 | static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |
3942 | u32 exit_reason, u32 exit_intr_info, | |
3943 | unsigned long exit_qualification) | |
3944 | { | |
55d2375e | 3945 | /* update exit information fields: */ |
55d2375e SC |
3946 | vmcs12->vm_exit_reason = exit_reason; |
3947 | vmcs12->exit_qualification = exit_qualification; | |
3948 | vmcs12->vm_exit_intr_info = exit_intr_info; | |
3949 | ||
3950 | vmcs12->idt_vectoring_info_field = 0; | |
3951 | vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); | |
3952 | vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
3953 | ||
3954 | if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) { | |
3955 | vmcs12->launch_state = 1; | |
3956 | ||
3957 | /* vm_entry_intr_info_field is cleared on exit. Emulate this | |
3958 | * instead of reading the real value. */ | |
3959 | vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK; | |
3960 | ||
3961 | /* | |
3962 | * Transfer the event that L0 or L1 may wanted to inject into | |
3963 | * L2 to IDT_VECTORING_INFO_FIELD. | |
3964 | */ | |
3965 | vmcs12_save_pending_event(vcpu, vmcs12); | |
a0d4f803 KS |
3966 | |
3967 | /* | |
3968 | * According to spec, there's no need to store the guest's | |
3969 | * MSRs if the exit is due to a VM-entry failure that occurs | |
3970 | * during or after loading the guest state. Since this exit | |
3971 | * does not fall in that category, we need to save the MSRs. | |
3972 | */ | |
3973 | if (nested_vmx_store_msr(vcpu, | |
3974 | vmcs12->vm_exit_msr_store_addr, | |
3975 | vmcs12->vm_exit_msr_store_count)) | |
3976 | nested_vmx_abort(vcpu, | |
3977 | VMX_ABORT_SAVE_GUEST_MSR_FAIL); | |
55d2375e SC |
3978 | } |
3979 | ||
3980 | /* | |
3981 | * Drop what we picked up for L2 via vmx_complete_interrupts. It is | |
3982 | * preserved above and would only end up incorrectly in L1. | |
3983 | */ | |
3984 | vcpu->arch.nmi_injected = false; | |
3985 | kvm_clear_exception_queue(vcpu); | |
3986 | kvm_clear_interrupt_queue(vcpu); | |
3987 | } | |
3988 | ||
3989 | /* | |
3990 | * A part of what we need to when the nested L2 guest exits and we want to | |
3991 | * run its L1 parent, is to reset L1's guest state to the host state specified | |
3992 | * in vmcs12. | |
3993 | * This function is to be called not only on normal nested exit, but also on | |
3994 | * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry | |
3995 | * Failures During or After Loading Guest State"). | |
3996 | * This function should be called when the active VMCS is L1's (vmcs01). | |
3997 | */ | |
3998 | static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |
3999 | struct vmcs12 *vmcs12) | |
4000 | { | |
4001 | struct kvm_segment seg; | |
4002 | u32 entry_failure_code; | |
4003 | ||
4004 | if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) | |
4005 | vcpu->arch.efer = vmcs12->host_ia32_efer; | |
4006 | else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) | |
4007 | vcpu->arch.efer |= (EFER_LMA | EFER_LME); | |
4008 | else | |
4009 | vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); | |
4010 | vmx_set_efer(vcpu, vcpu->arch.efer); | |
4011 | ||
e9c16c78 PB |
4012 | kvm_rsp_write(vcpu, vmcs12->host_rsp); |
4013 | kvm_rip_write(vcpu, vmcs12->host_rip); | |
55d2375e SC |
4014 | vmx_set_rflags(vcpu, X86_EFLAGS_FIXED); |
4015 | vmx_set_interrupt_shadow(vcpu, 0); | |
4016 | ||
4017 | /* | |
4018 | * Note that calling vmx_set_cr0 is important, even if cr0 hasn't | |
4019 | * actually changed, because vmx_set_cr0 refers to efer set above. | |
4020 | * | |
4021 | * CR0_GUEST_HOST_MASK is already set in the original vmcs01 | |
4022 | * (KVM doesn't change it); | |
4023 | */ | |
4024 | vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; | |
4025 | vmx_set_cr0(vcpu, vmcs12->host_cr0); | |
4026 | ||
4027 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ | |
4028 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | |
4029 | vmx_set_cr4(vcpu, vmcs12->host_cr4); | |
4030 | ||
4031 | nested_ept_uninit_mmu_context(vcpu); | |
4032 | ||
4033 | /* | |
4034 | * Only PDPTE load can fail as the value of cr3 was checked on entry and | |
4035 | * couldn't have changed. | |
4036 | */ | |
4037 | if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code)) | |
4038 | nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL); | |
4039 | ||
4040 | if (!enable_ept) | |
4041 | vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; | |
4042 | ||
4043 | /* | |
4044 | * If vmcs01 doesn't use VPID, CPU flushes TLB on every | |
4045 | * VMEntry/VMExit. Thus, no need to flush TLB. | |
4046 | * | |
4047 | * If vmcs12 doesn't use VPID, L1 expects TLB to be | |
4048 | * flushed on every VMEntry/VMExit. | |
4049 | * | |
4050 | * Otherwise, we can preserve TLB entries as long as we are | |
4051 | * able to tag L1 TLB entries differently than L2 TLB entries. | |
4052 | * | |
4053 | * If vmcs12 uses EPT, we need to execute this flush on EPTP01 | |
4054 | * and therefore we request the TLB flush to happen only after VMCS EPTP | |
727a7e27 | 4055 | * has been set by KVM_REQ_LOAD_MMU_PGD. |
55d2375e SC |
4056 | */ |
4057 | if (enable_vpid && | |
4058 | (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) { | |
4059 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
4060 | } | |
4061 | ||
4062 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs); | |
4063 | vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp); | |
4064 | vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); | |
4065 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); | |
4066 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); | |
4067 | vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); | |
4068 | vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); | |
4069 | ||
4070 | /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ | |
4071 | if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) | |
4072 | vmcs_write64(GUEST_BNDCFGS, 0); | |
4073 | ||
4074 | if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) { | |
4075 | vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); | |
4076 | vcpu->arch.pat = vmcs12->host_ia32_pat; | |
4077 | } | |
4078 | if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) | |
d1968421 OU |
4079 | WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL, |
4080 | vmcs12->host_ia32_perf_global_ctrl)); | |
55d2375e SC |
4081 | |
4082 | /* Set L1 segment info according to Intel SDM | |
4083 | 27.5.2 Loading Host Segment and Descriptor-Table Registers */ | |
4084 | seg = (struct kvm_segment) { | |
4085 | .base = 0, | |
4086 | .limit = 0xFFFFFFFF, | |
4087 | .selector = vmcs12->host_cs_selector, | |
4088 | .type = 11, | |
4089 | .present = 1, | |
4090 | .s = 1, | |
4091 | .g = 1 | |
4092 | }; | |
4093 | if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) | |
4094 | seg.l = 1; | |
4095 | else | |
4096 | seg.db = 1; | |
4097 | vmx_set_segment(vcpu, &seg, VCPU_SREG_CS); | |
4098 | seg = (struct kvm_segment) { | |
4099 | .base = 0, | |
4100 | .limit = 0xFFFFFFFF, | |
4101 | .type = 3, | |
4102 | .present = 1, | |
4103 | .s = 1, | |
4104 | .db = 1, | |
4105 | .g = 1 | |
4106 | }; | |
4107 | seg.selector = vmcs12->host_ds_selector; | |
4108 | vmx_set_segment(vcpu, &seg, VCPU_SREG_DS); | |
4109 | seg.selector = vmcs12->host_es_selector; | |
4110 | vmx_set_segment(vcpu, &seg, VCPU_SREG_ES); | |
4111 | seg.selector = vmcs12->host_ss_selector; | |
4112 | vmx_set_segment(vcpu, &seg, VCPU_SREG_SS); | |
4113 | seg.selector = vmcs12->host_fs_selector; | |
4114 | seg.base = vmcs12->host_fs_base; | |
4115 | vmx_set_segment(vcpu, &seg, VCPU_SREG_FS); | |
4116 | seg.selector = vmcs12->host_gs_selector; | |
4117 | seg.base = vmcs12->host_gs_base; | |
4118 | vmx_set_segment(vcpu, &seg, VCPU_SREG_GS); | |
4119 | seg = (struct kvm_segment) { | |
4120 | .base = vmcs12->host_tr_base, | |
4121 | .limit = 0x67, | |
4122 | .selector = vmcs12->host_tr_selector, | |
4123 | .type = 11, | |
4124 | .present = 1 | |
4125 | }; | |
4126 | vmx_set_segment(vcpu, &seg, VCPU_SREG_TR); | |
4127 | ||
4128 | kvm_set_dr(vcpu, 7, 0x400); | |
4129 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); | |
4130 | ||
4131 | if (cpu_has_vmx_msr_bitmap()) | |
4132 | vmx_update_msr_bitmap(vcpu); | |
4133 | ||
4134 | if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, | |
4135 | vmcs12->vm_exit_msr_load_count)) | |
4136 | nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); | |
4137 | } | |
4138 | ||
4139 | static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) | |
4140 | { | |
4141 | struct shared_msr_entry *efer_msr; | |
4142 | unsigned int i; | |
4143 | ||
4144 | if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) | |
4145 | return vmcs_read64(GUEST_IA32_EFER); | |
4146 | ||
4147 | if (cpu_has_load_ia32_efer()) | |
4148 | return host_efer; | |
4149 | ||
4150 | for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { | |
4151 | if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) | |
4152 | return vmx->msr_autoload.guest.val[i].value; | |
4153 | } | |
4154 | ||
4155 | efer_msr = find_msr_entry(vmx, MSR_EFER); | |
4156 | if (efer_msr) | |
4157 | return efer_msr->data; | |
4158 | ||
4159 | return host_efer; | |
4160 | } | |
4161 | ||
4162 | static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu) | |
4163 | { | |
4164 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
4165 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4166 | struct vmx_msr_entry g, h; | |
55d2375e SC |
4167 | gpa_t gpa; |
4168 | u32 i, j; | |
4169 | ||
4170 | vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT); | |
4171 | ||
4172 | if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { | |
4173 | /* | |
4174 | * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set | |
4175 | * as vmcs01.GUEST_DR7 contains a userspace defined value | |
4176 | * and vcpu->arch.dr7 is not squirreled away before the | |
4177 | * nested VMENTER (not worth adding a variable in nested_vmx). | |
4178 | */ | |
4179 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | |
4180 | kvm_set_dr(vcpu, 7, DR7_FIXED_1); | |
4181 | else | |
4182 | WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7))); | |
4183 | } | |
4184 | ||
4185 | /* | |
4186 | * Note that calling vmx_set_{efer,cr0,cr4} is important as they | |
4187 | * handle a variety of side effects to KVM's software model. | |
4188 | */ | |
4189 | vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); | |
4190 | ||
4191 | vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; | |
4192 | vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW)); | |
4193 | ||
4194 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | |
4195 | vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW)); | |
4196 | ||
4197 | nested_ept_uninit_mmu_context(vcpu); | |
f087a029 | 4198 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
cb3c1e2f | 4199 | kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); |
55d2375e SC |
4200 | |
4201 | /* | |
4202 | * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs | |
4203 | * from vmcs01 (if necessary). The PDPTRs are not loaded on | |
4204 | * VMFail, like everything else we just need to ensure our | |
4205 | * software model is up-to-date. | |
4206 | */ | |
f087a029 SC |
4207 | if (enable_ept) |
4208 | ept_save_pdptrs(vcpu); | |
55d2375e SC |
4209 | |
4210 | kvm_mmu_reset_context(vcpu); | |
4211 | ||
4212 | if (cpu_has_vmx_msr_bitmap()) | |
4213 | vmx_update_msr_bitmap(vcpu); | |
4214 | ||
4215 | /* | |
4216 | * This nasty bit of open coding is a compromise between blindly | |
4217 | * loading L1's MSRs using the exit load lists (incorrect emulation | |
4218 | * of VMFail), leaving the nested VM's MSRs in the software model | |
4219 | * (incorrect behavior) and snapshotting the modified MSRs (too | |
4220 | * expensive since the lists are unbound by hardware). For each | |
4221 | * MSR that was (prematurely) loaded from the nested VMEntry load | |
4222 | * list, reload it from the exit load list if it exists and differs | |
4223 | * from the guest value. The intent is to stuff host state as | |
4224 | * silently as possible, not to fully process the exit load list. | |
4225 | */ | |
55d2375e SC |
4226 | for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { |
4227 | gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g)); | |
4228 | if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) { | |
4229 | pr_debug_ratelimited( | |
4230 | "%s read MSR index failed (%u, 0x%08llx)\n", | |
4231 | __func__, i, gpa); | |
4232 | goto vmabort; | |
4233 | } | |
4234 | ||
4235 | for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { | |
4236 | gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h)); | |
4237 | if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) { | |
4238 | pr_debug_ratelimited( | |
4239 | "%s read MSR failed (%u, 0x%08llx)\n", | |
4240 | __func__, j, gpa); | |
4241 | goto vmabort; | |
4242 | } | |
4243 | if (h.index != g.index) | |
4244 | continue; | |
4245 | if (h.value == g.value) | |
4246 | break; | |
4247 | ||
4248 | if (nested_vmx_load_msr_check(vcpu, &h)) { | |
4249 | pr_debug_ratelimited( | |
4250 | "%s check failed (%u, 0x%x, 0x%x)\n", | |
4251 | __func__, j, h.index, h.reserved); | |
4252 | goto vmabort; | |
4253 | } | |
4254 | ||
f20935d8 | 4255 | if (kvm_set_msr(vcpu, h.index, h.value)) { |
55d2375e SC |
4256 | pr_debug_ratelimited( |
4257 | "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", | |
4258 | __func__, j, h.index, h.value); | |
4259 | goto vmabort; | |
4260 | } | |
4261 | } | |
4262 | } | |
4263 | ||
4264 | return; | |
4265 | ||
4266 | vmabort: | |
4267 | nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); | |
4268 | } | |
4269 | ||
4270 | /* | |
4271 | * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1 | |
4272 | * and modify vmcs12 to make it see what it would expect to see there if | |
4273 | * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) | |
4274 | */ | |
4275 | void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |
4276 | u32 exit_intr_info, unsigned long exit_qualification) | |
4277 | { | |
4278 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4279 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
4280 | ||
4281 | /* trying to cancel vmlaunch/vmresume is a bug */ | |
4282 | WARN_ON_ONCE(vmx->nested.nested_run_pending); | |
4283 | ||
4284 | leave_guest_mode(vcpu); | |
4285 | ||
b4b65b56 PB |
4286 | if (nested_cpu_has_preemption_timer(vmcs12)) |
4287 | hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); | |
4288 | ||
5e3d394f | 4289 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING) |
55d2375e SC |
4290 | vcpu->arch.tsc_offset -= vmcs12->tsc_offset; |
4291 | ||
4292 | if (likely(!vmx->fail)) { | |
3731905e | 4293 | sync_vmcs02_to_vmcs12(vcpu, vmcs12); |
f4f8316d SC |
4294 | |
4295 | if (exit_reason != -1) | |
55d2375e SC |
4296 | prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, |
4297 | exit_qualification); | |
4298 | ||
4299 | /* | |
3731905e | 4300 | * Must happen outside of sync_vmcs02_to_vmcs12() as it will |
55d2375e SC |
4301 | * also be used to capture vmcs12 cache as part of |
4302 | * capturing nVMX state for snapshot (migration). | |
4303 | * | |
4304 | * Otherwise, this flush will dirty guest memory at a | |
4305 | * point it is already assumed by user-space to be | |
4306 | * immutable. | |
4307 | */ | |
4308 | nested_flush_cached_shadow_vmcs12(vcpu, vmcs12); | |
55d2375e SC |
4309 | } else { |
4310 | /* | |
4311 | * The only expected VM-instruction error is "VM entry with | |
4312 | * invalid control field(s)." Anything else indicates a | |
4313 | * problem with L0. And we should never get here with a | |
4314 | * VMFail of any type if early consistency checks are enabled. | |
4315 | */ | |
4316 | WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) != | |
4317 | VMXERR_ENTRY_INVALID_CONTROL_FIELD); | |
4318 | WARN_ON_ONCE(nested_early_check); | |
4319 | } | |
4320 | ||
4321 | vmx_switch_vmcs(vcpu, &vmx->vmcs01); | |
4322 | ||
4323 | /* Update any VMCS fields that might have changed while L2 ran */ | |
4324 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); | |
4325 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); | |
4326 | vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset); | |
02d496cf LA |
4327 | if (vmx->nested.l1_tpr_threshold != -1) |
4328 | vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); | |
55d2375e SC |
4329 | |
4330 | if (kvm_has_tsc_control) | |
4331 | decache_tsc_multiplier(vmx); | |
4332 | ||
4333 | if (vmx->nested.change_vmcs01_virtual_apic_mode) { | |
4334 | vmx->nested.change_vmcs01_virtual_apic_mode = false; | |
4335 | vmx_set_virtual_apic_mode(vcpu); | |
55d2375e SC |
4336 | } |
4337 | ||
55d2375e SC |
4338 | /* Unpin physical memory we referred to in vmcs02 */ |
4339 | if (vmx->nested.apic_access_page) { | |
b11494bc | 4340 | kvm_release_page_clean(vmx->nested.apic_access_page); |
55d2375e SC |
4341 | vmx->nested.apic_access_page = NULL; |
4342 | } | |
96c66e87 | 4343 | kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); |
3278e049 KA |
4344 | kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); |
4345 | vmx->nested.pi_desc = NULL; | |
55d2375e SC |
4346 | |
4347 | /* | |
4348 | * We are now running in L2, mmu_notifier will force to reload the | |
4349 | * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1. | |
4350 | */ | |
4351 | kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); | |
4352 | ||
4353 | if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs)) | |
3731905e | 4354 | vmx->nested.need_vmcs12_to_shadow_sync = true; |
55d2375e SC |
4355 | |
4356 | /* in case we halted in L2 */ | |
4357 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | |
4358 | ||
4359 | if (likely(!vmx->fail)) { | |
a1c77abb SC |
4360 | if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT && |
4361 | nested_exit_intr_ack_set(vcpu)) { | |
55d2375e SC |
4362 | int irq = kvm_cpu_get_interrupt(vcpu); |
4363 | WARN_ON(irq < 0); | |
4364 | vmcs12->vm_exit_intr_info = irq | | |
4365 | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; | |
4366 | } | |
4367 | ||
4368 | if (exit_reason != -1) | |
4369 | trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, | |
4370 | vmcs12->exit_qualification, | |
4371 | vmcs12->idt_vectoring_info_field, | |
4372 | vmcs12->vm_exit_intr_info, | |
4373 | vmcs12->vm_exit_intr_error_code, | |
4374 | KVM_ISA_VMX); | |
4375 | ||
4376 | load_vmcs12_host_state(vcpu, vmcs12); | |
4377 | ||
4378 | return; | |
4379 | } | |
4380 | ||
4381 | /* | |
4382 | * After an early L2 VM-entry failure, we're now back | |
4383 | * in L1 which thinks it just finished a VMLAUNCH or | |
4384 | * VMRESUME instruction, so we need to set the failure | |
4385 | * flag and the VM-instruction error field of the VMCS | |
4386 | * accordingly, and skip the emulated instruction. | |
4387 | */ | |
4388 | (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | |
4389 | ||
4390 | /* | |
4391 | * Restore L1's host state to KVM's software model. We're here | |
4392 | * because a consistency check was caught by hardware, which | |
4393 | * means some amount of guest state has been propagated to KVM's | |
4394 | * model and needs to be unwound to the host's state. | |
4395 | */ | |
4396 | nested_vmx_restore_host_state(vcpu); | |
4397 | ||
4398 | vmx->fail = 0; | |
4399 | } | |
4400 | ||
4401 | /* | |
4402 | * Decode the memory-address operand of a vmx instruction, as recorded on an | |
4403 | * exit caused by such an instruction (run by a guest hypervisor). | |
4404 | * On success, returns 0. When the operand is invalid, returns 1 and throws | |
49f933d4 | 4405 | * #UD, #GP, or #SS. |
55d2375e SC |
4406 | */ |
4407 | int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, | |
fdb28619 | 4408 | u32 vmx_instruction_info, bool wr, int len, gva_t *ret) |
55d2375e SC |
4409 | { |
4410 | gva_t off; | |
4411 | bool exn; | |
4412 | struct kvm_segment s; | |
4413 | ||
4414 | /* | |
4415 | * According to Vol. 3B, "Information for VM Exits Due to Instruction | |
4416 | * Execution", on an exit, vmx_instruction_info holds most of the | |
4417 | * addressing components of the operand. Only the displacement part | |
4418 | * is put in exit_qualification (see 3B, "Basic VM-Exit Information"). | |
4419 | * For how an actual address is calculated from all these components, | |
4420 | * refer to Vol. 1, "Operand Addressing". | |
4421 | */ | |
4422 | int scaling = vmx_instruction_info & 3; | |
4423 | int addr_size = (vmx_instruction_info >> 7) & 7; | |
4424 | bool is_reg = vmx_instruction_info & (1u << 10); | |
4425 | int seg_reg = (vmx_instruction_info >> 15) & 7; | |
4426 | int index_reg = (vmx_instruction_info >> 18) & 0xf; | |
4427 | bool index_is_valid = !(vmx_instruction_info & (1u << 22)); | |
4428 | int base_reg = (vmx_instruction_info >> 23) & 0xf; | |
4429 | bool base_is_valid = !(vmx_instruction_info & (1u << 27)); | |
4430 | ||
4431 | if (is_reg) { | |
4432 | kvm_queue_exception(vcpu, UD_VECTOR); | |
4433 | return 1; | |
4434 | } | |
4435 | ||
4436 | /* Addr = segment_base + offset */ | |
4437 | /* offset = base + [index * scale] + displacement */ | |
4438 | off = exit_qualification; /* holds the displacement */ | |
946c522b SC |
4439 | if (addr_size == 1) |
4440 | off = (gva_t)sign_extend64(off, 31); | |
4441 | else if (addr_size == 0) | |
4442 | off = (gva_t)sign_extend64(off, 15); | |
55d2375e SC |
4443 | if (base_is_valid) |
4444 | off += kvm_register_read(vcpu, base_reg); | |
4445 | if (index_is_valid) | |
e6302698 | 4446 | off += kvm_register_read(vcpu, index_reg) << scaling; |
55d2375e | 4447 | vmx_get_segment(vcpu, &s, seg_reg); |
55d2375e | 4448 | |
8570f9e8 SC |
4449 | /* |
4450 | * The effective address, i.e. @off, of a memory operand is truncated | |
4451 | * based on the address size of the instruction. Note that this is | |
4452 | * the *effective address*, i.e. the address prior to accounting for | |
4453 | * the segment's base. | |
4454 | */ | |
55d2375e | 4455 | if (addr_size == 1) /* 32 bit */ |
8570f9e8 SC |
4456 | off &= 0xffffffff; |
4457 | else if (addr_size == 0) /* 16 bit */ | |
4458 | off &= 0xffff; | |
55d2375e SC |
4459 | |
4460 | /* Checks for #GP/#SS exceptions. */ | |
4461 | exn = false; | |
4462 | if (is_long_mode(vcpu)) { | |
8570f9e8 SC |
4463 | /* |
4464 | * The virtual/linear address is never truncated in 64-bit | |
4465 | * mode, e.g. a 32-bit address size can yield a 64-bit virtual | |
4466 | * address when using FS/GS with a non-zero base. | |
4467 | */ | |
6694e480 LA |
4468 | if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS) |
4469 | *ret = s.base + off; | |
4470 | else | |
4471 | *ret = off; | |
8570f9e8 | 4472 | |
55d2375e SC |
4473 | /* Long mode: #GP(0)/#SS(0) if the memory address is in a |
4474 | * non-canonical form. This is the only check on the memory | |
4475 | * destination for long mode! | |
4476 | */ | |
4477 | exn = is_noncanonical_address(*ret, vcpu); | |
e0dfacbf | 4478 | } else { |
8570f9e8 SC |
4479 | /* |
4480 | * When not in long mode, the virtual/linear address is | |
4481 | * unconditionally truncated to 32 bits regardless of the | |
4482 | * address size. | |
4483 | */ | |
4484 | *ret = (s.base + off) & 0xffffffff; | |
4485 | ||
55d2375e SC |
4486 | /* Protected mode: apply checks for segment validity in the |
4487 | * following order: | |
4488 | * - segment type check (#GP(0) may be thrown) | |
4489 | * - usability check (#GP(0)/#SS(0)) | |
4490 | * - limit check (#GP(0)/#SS(0)) | |
4491 | */ | |
4492 | if (wr) | |
4493 | /* #GP(0) if the destination operand is located in a | |
4494 | * read-only data segment or any code segment. | |
4495 | */ | |
4496 | exn = ((s.type & 0xa) == 0 || (s.type & 8)); | |
4497 | else | |
4498 | /* #GP(0) if the source operand is located in an | |
4499 | * execute-only code segment | |
4500 | */ | |
4501 | exn = ((s.type & 0xa) == 8); | |
4502 | if (exn) { | |
4503 | kvm_queue_exception_e(vcpu, GP_VECTOR, 0); | |
4504 | return 1; | |
4505 | } | |
4506 | /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. | |
4507 | */ | |
4508 | exn = (s.unusable != 0); | |
34333cc6 SC |
4509 | |
4510 | /* | |
4511 | * Protected mode: #GP(0)/#SS(0) if the memory operand is | |
4512 | * outside the segment limit. All CPUs that support VMX ignore | |
4513 | * limit checks for flat segments, i.e. segments with base==0, | |
4514 | * limit==0xffffffff and of type expand-up data or code. | |
55d2375e | 4515 | */ |
34333cc6 SC |
4516 | if (!(s.base == 0 && s.limit == 0xffffffff && |
4517 | ((s.type & 8) || !(s.type & 4)))) | |
fdb28619 | 4518 | exn = exn || ((u64)off + len - 1 > s.limit); |
55d2375e SC |
4519 | } |
4520 | if (exn) { | |
4521 | kvm_queue_exception_e(vcpu, | |
4522 | seg_reg == VCPU_SREG_SS ? | |
4523 | SS_VECTOR : GP_VECTOR, | |
4524 | 0); | |
4525 | return 1; | |
4526 | } | |
4527 | ||
4528 | return 0; | |
4529 | } | |
4530 | ||
03a8871a OU |
4531 | void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu) |
4532 | { | |
4533 | struct vcpu_vmx *vmx; | |
4534 | ||
4535 | if (!nested_vmx_allowed(vcpu)) | |
4536 | return; | |
4537 | ||
4538 | vmx = to_vmx(vcpu); | |
afaf0b2f | 4539 | if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) { |
03a8871a OU |
4540 | vmx->nested.msrs.entry_ctls_high |= |
4541 | VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; | |
4542 | vmx->nested.msrs.exit_ctls_high |= | |
4543 | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL; | |
4544 | } else { | |
4545 | vmx->nested.msrs.entry_ctls_high &= | |
4546 | ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; | |
4547 | vmx->nested.msrs.exit_ctls_high &= | |
4548 | ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL; | |
4549 | } | |
4550 | } | |
4551 | ||
55d2375e SC |
4552 | static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) |
4553 | { | |
4554 | gva_t gva; | |
4555 | struct x86_exception e; | |
4556 | ||
4557 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), | |
fdb28619 EK |
4558 | vmcs_read32(VMX_INSTRUCTION_INFO), false, |
4559 | sizeof(*vmpointer), &gva)) | |
55d2375e SC |
4560 | return 1; |
4561 | ||
4562 | if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { | |
4563 | kvm_inject_page_fault(vcpu, &e); | |
4564 | return 1; | |
4565 | } | |
4566 | ||
4567 | return 0; | |
4568 | } | |
4569 | ||
4570 | /* | |
4571 | * Allocate a shadow VMCS and associate it with the currently loaded | |
4572 | * VMCS, unless such a shadow VMCS already exists. The newly allocated | |
4573 | * VMCS is also VMCLEARed, so that it is ready for use. | |
4574 | */ | |
4575 | static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu) | |
4576 | { | |
4577 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4578 | struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; | |
4579 | ||
4580 | /* | |
4581 | * We should allocate a shadow vmcs for vmcs01 only when L1 | |
4582 | * executes VMXON and free it when L1 executes VMXOFF. | |
4583 | * As it is invalid to execute VMXON twice, we shouldn't reach | |
4584 | * here when vmcs01 already have an allocated shadow vmcs. | |
4585 | */ | |
4586 | WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); | |
4587 | ||
4588 | if (!loaded_vmcs->shadow_vmcs) { | |
4589 | loaded_vmcs->shadow_vmcs = alloc_vmcs(true); | |
4590 | if (loaded_vmcs->shadow_vmcs) | |
4591 | vmcs_clear(loaded_vmcs->shadow_vmcs); | |
4592 | } | |
4593 | return loaded_vmcs->shadow_vmcs; | |
4594 | } | |
4595 | ||
4596 | static int enter_vmx_operation(struct kvm_vcpu *vcpu) | |
4597 | { | |
4598 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4599 | int r; | |
4600 | ||
4601 | r = alloc_loaded_vmcs(&vmx->nested.vmcs02); | |
4602 | if (r < 0) | |
4603 | goto out_vmcs02; | |
4604 | ||
41836839 | 4605 | vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); |
55d2375e SC |
4606 | if (!vmx->nested.cached_vmcs12) |
4607 | goto out_cached_vmcs12; | |
4608 | ||
41836839 | 4609 | vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); |
55d2375e SC |
4610 | if (!vmx->nested.cached_shadow_vmcs12) |
4611 | goto out_cached_shadow_vmcs12; | |
4612 | ||
4613 | if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu)) | |
4614 | goto out_shadow_vmcs; | |
4615 | ||
4616 | hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, | |
4617 | HRTIMER_MODE_REL_PINNED); | |
4618 | vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; | |
4619 | ||
4620 | vmx->nested.vpid02 = allocate_vpid(); | |
4621 | ||
4622 | vmx->nested.vmcs02_initialized = false; | |
4623 | vmx->nested.vmxon = true; | |
ee85dec2 | 4624 | |
2ef7619d | 4625 | if (vmx_pt_mode_is_host_guest()) { |
ee85dec2 LK |
4626 | vmx->pt_desc.guest.ctl = 0; |
4627 | pt_update_intercept_for_msr(vmx); | |
4628 | } | |
4629 | ||
55d2375e SC |
4630 | return 0; |
4631 | ||
4632 | out_shadow_vmcs: | |
4633 | kfree(vmx->nested.cached_shadow_vmcs12); | |
4634 | ||
4635 | out_cached_shadow_vmcs12: | |
4636 | kfree(vmx->nested.cached_vmcs12); | |
4637 | ||
4638 | out_cached_vmcs12: | |
4639 | free_loaded_vmcs(&vmx->nested.vmcs02); | |
4640 | ||
4641 | out_vmcs02: | |
4642 | return -ENOMEM; | |
4643 | } | |
4644 | ||
4645 | /* | |
4646 | * Emulate the VMXON instruction. | |
4647 | * Currently, we just remember that VMX is active, and do not save or even | |
4648 | * inspect the argument to VMXON (the so-called "VMXON pointer") because we | |
4649 | * do not currently need to store anything in that guest-allocated memory | |
4650 | * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their | |
4651 | * argument is different from the VMXON pointer (which the spec says they do). | |
4652 | */ | |
4653 | static int handle_vmon(struct kvm_vcpu *vcpu) | |
4654 | { | |
4655 | int ret; | |
4656 | gpa_t vmptr; | |
2e408936 | 4657 | uint32_t revision; |
55d2375e | 4658 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
32ad73db SC |
4659 | const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED |
4660 | | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; | |
55d2375e SC |
4661 | |
4662 | /* | |
4663 | * The Intel VMX Instruction Reference lists a bunch of bits that are | |
4664 | * prerequisite to running VMXON, most notably cr4.VMXE must be set to | |
4665 | * 1 (see vmx_set_cr4() for when we allow the guest to set this). | |
4666 | * Otherwise, we should fail with #UD. But most faulting conditions | |
4667 | * have already been checked by hardware, prior to the VM-exit for | |
4668 | * VMXON. We do test guest cr4.VMXE because processor CR4 always has | |
4669 | * that bit set to 1 in non-root mode. | |
4670 | */ | |
4671 | if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) { | |
4672 | kvm_queue_exception(vcpu, UD_VECTOR); | |
4673 | return 1; | |
4674 | } | |
4675 | ||
4676 | /* CPL=0 must be checked manually. */ | |
4677 | if (vmx_get_cpl(vcpu)) { | |
4678 | kvm_inject_gp(vcpu, 0); | |
4679 | return 1; | |
4680 | } | |
4681 | ||
4682 | if (vmx->nested.vmxon) | |
4683 | return nested_vmx_failValid(vcpu, | |
4684 | VMXERR_VMXON_IN_VMX_ROOT_OPERATION); | |
4685 | ||
4686 | if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) | |
4687 | != VMXON_NEEDED_FEATURES) { | |
4688 | kvm_inject_gp(vcpu, 0); | |
4689 | return 1; | |
4690 | } | |
4691 | ||
4692 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) | |
4693 | return 1; | |
4694 | ||
4695 | /* | |
4696 | * SDM 3: 24.11.5 | |
4697 | * The first 4 bytes of VMXON region contain the supported | |
4698 | * VMCS revision identifier | |
4699 | * | |
4700 | * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; | |
4701 | * which replaces physical address width with 32 | |
4702 | */ | |
e0bf2665 | 4703 | if (!page_address_valid(vcpu, vmptr)) |
55d2375e SC |
4704 | return nested_vmx_failInvalid(vcpu); |
4705 | ||
2e408936 KA |
4706 | if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) || |
4707 | revision != VMCS12_REVISION) | |
55d2375e | 4708 | return nested_vmx_failInvalid(vcpu); |
55d2375e SC |
4709 | |
4710 | vmx->nested.vmxon_ptr = vmptr; | |
4711 | ret = enter_vmx_operation(vcpu); | |
4712 | if (ret) | |
4713 | return ret; | |
4714 | ||
4715 | return nested_vmx_succeed(vcpu); | |
4716 | } | |
4717 | ||
4718 | static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu) | |
4719 | { | |
4720 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4721 | ||
4722 | if (vmx->nested.current_vmptr == -1ull) | |
4723 | return; | |
4724 | ||
7952d769 SC |
4725 | copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu)); |
4726 | ||
55d2375e SC |
4727 | if (enable_shadow_vmcs) { |
4728 | /* copy to memory all shadowed fields in case | |
4729 | they were modified */ | |
4730 | copy_shadow_to_vmcs12(vmx); | |
55d2375e SC |
4731 | vmx_disable_shadow_vmcs(vmx); |
4732 | } | |
4733 | vmx->nested.posted_intr_nv = -1; | |
4734 | ||
4735 | /* Flush VMCS12 to guest memory */ | |
4736 | kvm_vcpu_write_guest_page(vcpu, | |
4737 | vmx->nested.current_vmptr >> PAGE_SHIFT, | |
4738 | vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); | |
4739 | ||
4740 | kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); | |
4741 | ||
4742 | vmx->nested.current_vmptr = -1ull; | |
4743 | } | |
4744 | ||
4745 | /* Emulate the VMXOFF instruction */ | |
4746 | static int handle_vmoff(struct kvm_vcpu *vcpu) | |
4747 | { | |
4748 | if (!nested_vmx_check_permission(vcpu)) | |
4749 | return 1; | |
4b9852f4 | 4750 | |
55d2375e | 4751 | free_nested(vcpu); |
4b9852f4 LA |
4752 | |
4753 | /* Process a latched INIT during time CPU was in VMX operation */ | |
4754 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
4755 | ||
55d2375e SC |
4756 | return nested_vmx_succeed(vcpu); |
4757 | } | |
4758 | ||
4759 | /* Emulate the VMCLEAR instruction */ | |
4760 | static int handle_vmclear(struct kvm_vcpu *vcpu) | |
4761 | { | |
4762 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4763 | u32 zero = 0; | |
4764 | gpa_t vmptr; | |
11e34914 | 4765 | u64 evmcs_gpa; |
55d2375e SC |
4766 | |
4767 | if (!nested_vmx_check_permission(vcpu)) | |
4768 | return 1; | |
4769 | ||
4770 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) | |
4771 | return 1; | |
4772 | ||
e0bf2665 | 4773 | if (!page_address_valid(vcpu, vmptr)) |
55d2375e SC |
4774 | return nested_vmx_failValid(vcpu, |
4775 | VMXERR_VMCLEAR_INVALID_ADDRESS); | |
4776 | ||
4777 | if (vmptr == vmx->nested.vmxon_ptr) | |
4778 | return nested_vmx_failValid(vcpu, | |
4779 | VMXERR_VMCLEAR_VMXON_POINTER); | |
4780 | ||
11e34914 VK |
4781 | /* |
4782 | * When Enlightened VMEntry is enabled on the calling CPU we treat | |
4783 | * memory area pointer by vmptr as Enlightened VMCS (as there's no good | |
4784 | * way to distinguish it from VMCS12) and we must not corrupt it by | |
4785 | * writing to the non-existent 'launch_state' field. The area doesn't | |
4786 | * have to be the currently active EVMCS on the calling CPU and there's | |
4787 | * nothing KVM has to do to transition it from 'active' to 'non-active' | |
4788 | * state. It is possible that the area will stay mapped as | |
4789 | * vmx->nested.hv_evmcs but this shouldn't be a problem. | |
4790 | */ | |
4791 | if (likely(!vmx->nested.enlightened_vmcs_enabled || | |
4792 | !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) { | |
55d2375e SC |
4793 | if (vmptr == vmx->nested.current_vmptr) |
4794 | nested_release_vmcs12(vcpu); | |
4795 | ||
4796 | kvm_vcpu_write_guest(vcpu, | |
4797 | vmptr + offsetof(struct vmcs12, | |
4798 | launch_state), | |
4799 | &zero, sizeof(zero)); | |
4800 | } | |
4801 | ||
4802 | return nested_vmx_succeed(vcpu); | |
4803 | } | |
4804 | ||
55d2375e SC |
4805 | /* Emulate the VMLAUNCH instruction */ |
4806 | static int handle_vmlaunch(struct kvm_vcpu *vcpu) | |
4807 | { | |
4808 | return nested_vmx_run(vcpu, true); | |
4809 | } | |
4810 | ||
4811 | /* Emulate the VMRESUME instruction */ | |
4812 | static int handle_vmresume(struct kvm_vcpu *vcpu) | |
4813 | { | |
4814 | ||
4815 | return nested_vmx_run(vcpu, false); | |
4816 | } | |
4817 | ||
4818 | static int handle_vmread(struct kvm_vcpu *vcpu) | |
4819 | { | |
dd2d6042 JM |
4820 | struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) |
4821 | : get_vmcs12(vcpu); | |
55d2375e | 4822 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
c90f4d03 JM |
4823 | u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
4824 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
f7eea636 | 4825 | struct x86_exception e; |
c90f4d03 JM |
4826 | unsigned long field; |
4827 | u64 value; | |
4828 | gva_t gva = 0; | |
1c6f0b47 | 4829 | short offset; |
c90f4d03 | 4830 | int len; |
55d2375e SC |
4831 | |
4832 | if (!nested_vmx_check_permission(vcpu)) | |
4833 | return 1; | |
4834 | ||
dd2d6042 JM |
4835 | /* |
4836 | * In VMX non-root operation, when the VMCS-link pointer is -1ull, | |
4837 | * any VMREAD sets the ALU flags for VMfailInvalid. | |
4838 | */ | |
4839 | if (vmx->nested.current_vmptr == -1ull || | |
4840 | (is_guest_mode(vcpu) && | |
4841 | get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) | |
55d2375e SC |
4842 | return nested_vmx_failInvalid(vcpu); |
4843 | ||
55d2375e | 4844 | /* Decode instruction info and find the field to read */ |
c90f4d03 | 4845 | field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); |
1c6f0b47 SC |
4846 | |
4847 | offset = vmcs_field_to_offset(field); | |
4848 | if (offset < 0) | |
55d2375e SC |
4849 | return nested_vmx_failValid(vcpu, |
4850 | VMXERR_UNSUPPORTED_VMCS_COMPONENT); | |
4851 | ||
7952d769 SC |
4852 | if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field)) |
4853 | copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); | |
4854 | ||
c90f4d03 JM |
4855 | /* Read the field, zero-extended to a u64 value */ |
4856 | value = vmcs12_read_any(vmcs12, field, offset); | |
1c6f0b47 | 4857 | |
55d2375e SC |
4858 | /* |
4859 | * Now copy part of this value to register or memory, as requested. | |
4860 | * Note that the number of bits actually copied is 32 or 64 depending | |
4861 | * on the guest's mode (32 or 64 bit), not on the given field's length. | |
4862 | */ | |
c90f4d03 JM |
4863 | if (instr_info & BIT(10)) { |
4864 | kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); | |
55d2375e | 4865 | } else { |
fdb28619 | 4866 | len = is_64_bit_mode(vcpu) ? 8 : 4; |
55d2375e | 4867 | if (get_vmx_mem_address(vcpu, exit_qualification, |
c90f4d03 | 4868 | instr_info, true, len, &gva)) |
55d2375e SC |
4869 | return 1; |
4870 | /* _system ok, nested_vmx_check_permission has verified cpl=0 */ | |
a4d956b9 | 4871 | if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) { |
f7eea636 | 4872 | kvm_inject_page_fault(vcpu, &e); |
a4d956b9 ML |
4873 | return 1; |
4874 | } | |
55d2375e SC |
4875 | } |
4876 | ||
4877 | return nested_vmx_succeed(vcpu); | |
4878 | } | |
4879 | ||
e2174295 SC |
4880 | static bool is_shadow_field_rw(unsigned long field) |
4881 | { | |
4882 | switch (field) { | |
4883 | #define SHADOW_FIELD_RW(x, y) case x: | |
4884 | #include "vmcs_shadow_fields.h" | |
4885 | return true; | |
4886 | default: | |
4887 | break; | |
4888 | } | |
4889 | return false; | |
4890 | } | |
4891 | ||
4892 | static bool is_shadow_field_ro(unsigned long field) | |
4893 | { | |
4894 | switch (field) { | |
4895 | #define SHADOW_FIELD_RO(x, y) case x: | |
4896 | #include "vmcs_shadow_fields.h" | |
4897 | return true; | |
4898 | default: | |
4899 | break; | |
4900 | } | |
4901 | return false; | |
4902 | } | |
55d2375e SC |
4903 | |
4904 | static int handle_vmwrite(struct kvm_vcpu *vcpu) | |
4905 | { | |
c90f4d03 JM |
4906 | struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu) |
4907 | : get_vmcs12(vcpu); | |
4908 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
4909 | u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
4910 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
4911 | struct x86_exception e; | |
55d2375e | 4912 | unsigned long field; |
c90f4d03 | 4913 | short offset; |
55d2375e | 4914 | gva_t gva; |
c90f4d03 | 4915 | int len; |
55d2375e | 4916 | |
c90f4d03 JM |
4917 | /* |
4918 | * The value to write might be 32 or 64 bits, depending on L1's long | |
55d2375e SC |
4919 | * mode, and eventually we need to write that into a field of several |
4920 | * possible lengths. The code below first zero-extends the value to 64 | |
c90f4d03 | 4921 | * bit (value), and then copies only the appropriate number of |
55d2375e SC |
4922 | * bits into the vmcs12 field. |
4923 | */ | |
c90f4d03 | 4924 | u64 value = 0; |
55d2375e SC |
4925 | |
4926 | if (!nested_vmx_check_permission(vcpu)) | |
4927 | return 1; | |
4928 | ||
dd2d6042 JM |
4929 | /* |
4930 | * In VMX non-root operation, when the VMCS-link pointer is -1ull, | |
4931 | * any VMWRITE sets the ALU flags for VMfailInvalid. | |
4932 | */ | |
4933 | if (vmx->nested.current_vmptr == -1ull || | |
4934 | (is_guest_mode(vcpu) && | |
4935 | get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)) | |
55d2375e SC |
4936 | return nested_vmx_failInvalid(vcpu); |
4937 | ||
c90f4d03 JM |
4938 | if (instr_info & BIT(10)) |
4939 | value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); | |
55d2375e | 4940 | else { |
fdb28619 | 4941 | len = is_64_bit_mode(vcpu) ? 8 : 4; |
55d2375e | 4942 | if (get_vmx_mem_address(vcpu, exit_qualification, |
c90f4d03 | 4943 | instr_info, false, len, &gva)) |
55d2375e | 4944 | return 1; |
c90f4d03 | 4945 | if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) { |
55d2375e SC |
4946 | kvm_inject_page_fault(vcpu, &e); |
4947 | return 1; | |
4948 | } | |
4949 | } | |
4950 | ||
c90f4d03 | 4951 | field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); |
693e02cc JM |
4952 | |
4953 | offset = vmcs_field_to_offset(field); | |
4954 | if (offset < 0) | |
4955 | return nested_vmx_failValid(vcpu, | |
4956 | VMXERR_UNSUPPORTED_VMCS_COMPONENT); | |
55d2375e | 4957 | |
55d2375e SC |
4958 | /* |
4959 | * If the vCPU supports "VMWRITE to any supported field in the | |
4960 | * VMCS," then the "read-only" fields are actually read/write. | |
4961 | */ | |
4962 | if (vmcs_field_readonly(field) && | |
4963 | !nested_cpu_has_vmwrite_any_field(vcpu)) | |
4964 | return nested_vmx_failValid(vcpu, | |
4965 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); | |
4966 | ||
dd2d6042 JM |
4967 | /* |
4968 | * Ensure vmcs12 is up-to-date before any VMWRITE that dirties | |
4969 | * vmcs12, else we may crush a field or consume a stale value. | |
4970 | */ | |
4971 | if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) | |
4972 | copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12); | |
55d2375e SC |
4973 | |
4974 | /* | |
b6437805 SC |
4975 | * Some Intel CPUs intentionally drop the reserved bits of the AR byte |
4976 | * fields on VMWRITE. Emulate this behavior to ensure consistent KVM | |
4977 | * behavior regardless of the underlying hardware, e.g. if an AR_BYTE | |
4978 | * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD | |
4979 | * from L1 will return a different value than VMREAD from L2 (L1 sees | |
4980 | * the stripped down value, L2 sees the full value as stored by KVM). | |
55d2375e | 4981 | */ |
b6437805 | 4982 | if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES) |
c90f4d03 | 4983 | value &= 0x1f0ff; |
b6437805 | 4984 | |
c90f4d03 | 4985 | vmcs12_write_any(vmcs12, field, offset, value); |
55d2375e SC |
4986 | |
4987 | /* | |
e2174295 SC |
4988 | * Do not track vmcs12 dirty-state if in guest-mode as we actually |
4989 | * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated | |
4990 | * by L1 without a vmexit are always updated in the vmcs02, i.e. don't | |
4991 | * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path. | |
55d2375e | 4992 | */ |
e2174295 SC |
4993 | if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) { |
4994 | /* | |
4995 | * L1 can read these fields without exiting, ensure the | |
4996 | * shadow VMCS is up-to-date. | |
4997 | */ | |
4998 | if (enable_shadow_vmcs && is_shadow_field_ro(field)) { | |
4999 | preempt_disable(); | |
5000 | vmcs_load(vmx->vmcs01.shadow_vmcs); | |
fadcead0 | 5001 | |
c90f4d03 | 5002 | __vmcs_writel(field, value); |
fadcead0 | 5003 | |
e2174295 SC |
5004 | vmcs_clear(vmx->vmcs01.shadow_vmcs); |
5005 | vmcs_load(vmx->loaded_vmcs->vmcs); | |
5006 | preempt_enable(); | |
55d2375e | 5007 | } |
e2174295 | 5008 | vmx->nested.dirty_vmcs12 = true; |
55d2375e SC |
5009 | } |
5010 | ||
5011 | return nested_vmx_succeed(vcpu); | |
5012 | } | |
5013 | ||
5014 | static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) | |
5015 | { | |
5016 | vmx->nested.current_vmptr = vmptr; | |
5017 | if (enable_shadow_vmcs) { | |
fe7f895d | 5018 | secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); |
55d2375e SC |
5019 | vmcs_write64(VMCS_LINK_POINTER, |
5020 | __pa(vmx->vmcs01.shadow_vmcs)); | |
3731905e | 5021 | vmx->nested.need_vmcs12_to_shadow_sync = true; |
55d2375e SC |
5022 | } |
5023 | vmx->nested.dirty_vmcs12 = true; | |
5024 | } | |
5025 | ||
5026 | /* Emulate the VMPTRLD instruction */ | |
5027 | static int handle_vmptrld(struct kvm_vcpu *vcpu) | |
5028 | { | |
5029 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5030 | gpa_t vmptr; | |
5031 | ||
5032 | if (!nested_vmx_check_permission(vcpu)) | |
5033 | return 1; | |
5034 | ||
5035 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) | |
5036 | return 1; | |
5037 | ||
e0bf2665 | 5038 | if (!page_address_valid(vcpu, vmptr)) |
55d2375e SC |
5039 | return nested_vmx_failValid(vcpu, |
5040 | VMXERR_VMPTRLD_INVALID_ADDRESS); | |
5041 | ||
5042 | if (vmptr == vmx->nested.vmxon_ptr) | |
5043 | return nested_vmx_failValid(vcpu, | |
5044 | VMXERR_VMPTRLD_VMXON_POINTER); | |
5045 | ||
5046 | /* Forbid normal VMPTRLD if Enlightened version was used */ | |
5047 | if (vmx->nested.hv_evmcs) | |
5048 | return 1; | |
5049 | ||
5050 | if (vmx->nested.current_vmptr != vmptr) { | |
b146b839 | 5051 | struct kvm_host_map map; |
55d2375e | 5052 | struct vmcs12 *new_vmcs12; |
55d2375e | 5053 | |
b146b839 | 5054 | if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { |
55d2375e SC |
5055 | /* |
5056 | * Reads from an unbacked page return all 1s, | |
5057 | * which means that the 32 bits located at the | |
5058 | * given physical address won't match the required | |
5059 | * VMCS12_REVISION identifier. | |
5060 | */ | |
826c1362 | 5061 | return nested_vmx_failValid(vcpu, |
55d2375e | 5062 | VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); |
55d2375e | 5063 | } |
b146b839 KA |
5064 | |
5065 | new_vmcs12 = map.hva; | |
5066 | ||
55d2375e SC |
5067 | if (new_vmcs12->hdr.revision_id != VMCS12_REVISION || |
5068 | (new_vmcs12->hdr.shadow_vmcs && | |
5069 | !nested_cpu_has_vmx_shadow_vmcs(vcpu))) { | |
b146b839 | 5070 | kvm_vcpu_unmap(vcpu, &map, false); |
55d2375e SC |
5071 | return nested_vmx_failValid(vcpu, |
5072 | VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID); | |
5073 | } | |
5074 | ||
5075 | nested_release_vmcs12(vcpu); | |
5076 | ||
5077 | /* | |
5078 | * Load VMCS12 from guest memory since it is not already | |
5079 | * cached. | |
5080 | */ | |
5081 | memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); | |
b146b839 | 5082 | kvm_vcpu_unmap(vcpu, &map, false); |
55d2375e SC |
5083 | |
5084 | set_current_vmptr(vmx, vmptr); | |
5085 | } | |
5086 | ||
5087 | return nested_vmx_succeed(vcpu); | |
5088 | } | |
5089 | ||
5090 | /* Emulate the VMPTRST instruction */ | |
5091 | static int handle_vmptrst(struct kvm_vcpu *vcpu) | |
5092 | { | |
5093 | unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION); | |
5094 | u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
5095 | gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; | |
5096 | struct x86_exception e; | |
5097 | gva_t gva; | |
5098 | ||
5099 | if (!nested_vmx_check_permission(vcpu)) | |
5100 | return 1; | |
5101 | ||
5102 | if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) | |
5103 | return 1; | |
5104 | ||
fdb28619 EK |
5105 | if (get_vmx_mem_address(vcpu, exit_qual, instr_info, |
5106 | true, sizeof(gpa_t), &gva)) | |
55d2375e SC |
5107 | return 1; |
5108 | /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ | |
5109 | if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr, | |
5110 | sizeof(gpa_t), &e)) { | |
5111 | kvm_inject_page_fault(vcpu, &e); | |
5112 | return 1; | |
5113 | } | |
5114 | return nested_vmx_succeed(vcpu); | |
5115 | } | |
5116 | ||
5117 | /* Emulate the INVEPT instruction */ | |
5118 | static int handle_invept(struct kvm_vcpu *vcpu) | |
5119 | { | |
5120 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5121 | u32 vmx_instruction_info, types; | |
5122 | unsigned long type; | |
5123 | gva_t gva; | |
5124 | struct x86_exception e; | |
5125 | struct { | |
5126 | u64 eptp, gpa; | |
5127 | } operand; | |
5128 | ||
5129 | if (!(vmx->nested.msrs.secondary_ctls_high & | |
5130 | SECONDARY_EXEC_ENABLE_EPT) || | |
5131 | !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { | |
5132 | kvm_queue_exception(vcpu, UD_VECTOR); | |
5133 | return 1; | |
5134 | } | |
5135 | ||
5136 | if (!nested_vmx_check_permission(vcpu)) | |
5137 | return 1; | |
5138 | ||
5139 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
5140 | type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); | |
5141 | ||
5142 | types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; | |
5143 | ||
5144 | if (type >= 32 || !(types & (1 << type))) | |
5145 | return nested_vmx_failValid(vcpu, | |
5146 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | |
5147 | ||
5148 | /* According to the Intel VMX instruction reference, the memory | |
5149 | * operand is read even if it isn't needed (e.g., for type==global) | |
5150 | */ | |
5151 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), | |
fdb28619 | 5152 | vmx_instruction_info, false, sizeof(operand), &gva)) |
55d2375e SC |
5153 | return 1; |
5154 | if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { | |
5155 | kvm_inject_page_fault(vcpu, &e); | |
5156 | return 1; | |
5157 | } | |
5158 | ||
5159 | switch (type) { | |
5160 | case VMX_EPT_EXTENT_GLOBAL: | |
b1190198 | 5161 | case VMX_EPT_EXTENT_CONTEXT: |
55d2375e | 5162 | /* |
b1190198 JM |
5163 | * TODO: Sync the necessary shadow EPT roots here, rather than |
5164 | * at the next emulated VM-entry. | |
55d2375e | 5165 | */ |
55d2375e SC |
5166 | break; |
5167 | default: | |
f9336e32 | 5168 | BUG(); |
55d2375e SC |
5169 | break; |
5170 | } | |
5171 | ||
5172 | return nested_vmx_succeed(vcpu); | |
5173 | } | |
5174 | ||
5175 | static int handle_invvpid(struct kvm_vcpu *vcpu) | |
5176 | { | |
5177 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5178 | u32 vmx_instruction_info; | |
5179 | unsigned long type, types; | |
5180 | gva_t gva; | |
5181 | struct x86_exception e; | |
5182 | struct { | |
5183 | u64 vpid; | |
5184 | u64 gla; | |
5185 | } operand; | |
5186 | u16 vpid02; | |
5187 | ||
5188 | if (!(vmx->nested.msrs.secondary_ctls_high & | |
5189 | SECONDARY_EXEC_ENABLE_VPID) || | |
5190 | !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { | |
5191 | kvm_queue_exception(vcpu, UD_VECTOR); | |
5192 | return 1; | |
5193 | } | |
5194 | ||
5195 | if (!nested_vmx_check_permission(vcpu)) | |
5196 | return 1; | |
5197 | ||
5198 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
5199 | type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); | |
5200 | ||
5201 | types = (vmx->nested.msrs.vpid_caps & | |
5202 | VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8; | |
5203 | ||
5204 | if (type >= 32 || !(types & (1 << type))) | |
5205 | return nested_vmx_failValid(vcpu, | |
5206 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | |
5207 | ||
5208 | /* according to the intel vmx instruction reference, the memory | |
5209 | * operand is read even if it isn't needed (e.g., for type==global) | |
5210 | */ | |
5211 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), | |
fdb28619 | 5212 | vmx_instruction_info, false, sizeof(operand), &gva)) |
55d2375e SC |
5213 | return 1; |
5214 | if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) { | |
5215 | kvm_inject_page_fault(vcpu, &e); | |
5216 | return 1; | |
5217 | } | |
5218 | if (operand.vpid >> 16) | |
5219 | return nested_vmx_failValid(vcpu, | |
5220 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | |
5221 | ||
5222 | vpid02 = nested_get_vpid02(vcpu); | |
5223 | switch (type) { | |
5224 | case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: | |
5225 | if (!operand.vpid || | |
5226 | is_noncanonical_address(operand.gla, vcpu)) | |
5227 | return nested_vmx_failValid(vcpu, | |
5228 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | |
5229 | if (cpu_has_vmx_invvpid_individual_addr()) { | |
5230 | __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, | |
5231 | vpid02, operand.gla); | |
5232 | } else | |
5233 | __vmx_flush_tlb(vcpu, vpid02, false); | |
5234 | break; | |
5235 | case VMX_VPID_EXTENT_SINGLE_CONTEXT: | |
5236 | case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL: | |
5237 | if (!operand.vpid) | |
5238 | return nested_vmx_failValid(vcpu, | |
5239 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | |
5240 | __vmx_flush_tlb(vcpu, vpid02, false); | |
5241 | break; | |
5242 | case VMX_VPID_EXTENT_ALL_CONTEXT: | |
5243 | __vmx_flush_tlb(vcpu, vpid02, false); | |
5244 | break; | |
5245 | default: | |
5246 | WARN_ON_ONCE(1); | |
5247 | return kvm_skip_emulated_instruction(vcpu); | |
5248 | } | |
5249 | ||
5250 | return nested_vmx_succeed(vcpu); | |
5251 | } | |
5252 | ||
5253 | static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu, | |
5254 | struct vmcs12 *vmcs12) | |
5255 | { | |
2b3eaf81 | 5256 | u32 index = kvm_rcx_read(vcpu); |
ac6389ab | 5257 | u64 new_eptp; |
55d2375e SC |
5258 | bool accessed_dirty; |
5259 | struct kvm_mmu *mmu = vcpu->arch.walk_mmu; | |
5260 | ||
5261 | if (!nested_cpu_has_eptp_switching(vmcs12) || | |
5262 | !nested_cpu_has_ept(vmcs12)) | |
5263 | return 1; | |
5264 | ||
5265 | if (index >= VMFUNC_EPTP_ENTRIES) | |
5266 | return 1; | |
5267 | ||
5268 | ||
5269 | if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT, | |
ac6389ab | 5270 | &new_eptp, index * 8, 8)) |
55d2375e SC |
5271 | return 1; |
5272 | ||
ac6389ab | 5273 | accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT); |
55d2375e SC |
5274 | |
5275 | /* | |
5276 | * If the (L2) guest does a vmfunc to the currently | |
5277 | * active ept pointer, we don't have to do anything else | |
5278 | */ | |
ac6389ab SC |
5279 | if (vmcs12->ept_pointer != new_eptp) { |
5280 | if (!nested_vmx_check_eptp(vcpu, new_eptp)) | |
55d2375e SC |
5281 | return 1; |
5282 | ||
5283 | kvm_mmu_unload(vcpu); | |
5284 | mmu->ept_ad = accessed_dirty; | |
5285 | mmu->mmu_role.base.ad_disabled = !accessed_dirty; | |
ac6389ab | 5286 | vmcs12->ept_pointer = new_eptp; |
55d2375e SC |
5287 | /* |
5288 | * TODO: Check what's the correct approach in case | |
5289 | * mmu reload fails. Currently, we just let the next | |
5290 | * reload potentially fail | |
5291 | */ | |
5292 | kvm_mmu_reload(vcpu); | |
5293 | } | |
5294 | ||
5295 | return 0; | |
5296 | } | |
5297 | ||
5298 | static int handle_vmfunc(struct kvm_vcpu *vcpu) | |
5299 | { | |
5300 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5301 | struct vmcs12 *vmcs12; | |
2b3eaf81 | 5302 | u32 function = kvm_rax_read(vcpu); |
55d2375e SC |
5303 | |
5304 | /* | |
5305 | * VMFUNC is only supported for nested guests, but we always enable the | |
5306 | * secondary control for simplicity; for non-nested mode, fake that we | |
5307 | * didn't by injecting #UD. | |
5308 | */ | |
5309 | if (!is_guest_mode(vcpu)) { | |
5310 | kvm_queue_exception(vcpu, UD_VECTOR); | |
5311 | return 1; | |
5312 | } | |
5313 | ||
5314 | vmcs12 = get_vmcs12(vcpu); | |
5315 | if ((vmcs12->vm_function_control & (1 << function)) == 0) | |
5316 | goto fail; | |
5317 | ||
5318 | switch (function) { | |
5319 | case 0: | |
5320 | if (nested_vmx_eptp_switching(vcpu, vmcs12)) | |
5321 | goto fail; | |
5322 | break; | |
5323 | default: | |
5324 | goto fail; | |
5325 | } | |
5326 | return kvm_skip_emulated_instruction(vcpu); | |
5327 | ||
5328 | fail: | |
5329 | nested_vmx_vmexit(vcpu, vmx->exit_reason, | |
5330 | vmcs_read32(VM_EXIT_INTR_INFO), | |
5331 | vmcs_readl(EXIT_QUALIFICATION)); | |
5332 | return 1; | |
5333 | } | |
5334 | ||
e71237d3 OU |
5335 | /* |
5336 | * Return true if an IO instruction with the specified port and size should cause | |
5337 | * a VM-exit into L1. | |
5338 | */ | |
5339 | bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port, | |
5340 | int size) | |
55d2375e | 5341 | { |
e71237d3 | 5342 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
55d2375e | 5343 | gpa_t bitmap, last_bitmap; |
55d2375e SC |
5344 | u8 b; |
5345 | ||
55d2375e SC |
5346 | last_bitmap = (gpa_t)-1; |
5347 | b = -1; | |
5348 | ||
5349 | while (size > 0) { | |
5350 | if (port < 0x8000) | |
5351 | bitmap = vmcs12->io_bitmap_a; | |
5352 | else if (port < 0x10000) | |
5353 | bitmap = vmcs12->io_bitmap_b; | |
5354 | else | |
5355 | return true; | |
5356 | bitmap += (port & 0x7fff) / 8; | |
5357 | ||
5358 | if (last_bitmap != bitmap) | |
5359 | if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1)) | |
5360 | return true; | |
5361 | if (b & (1 << (port & 7))) | |
5362 | return true; | |
5363 | ||
5364 | port++; | |
5365 | size--; | |
5366 | last_bitmap = bitmap; | |
5367 | } | |
5368 | ||
5369 | return false; | |
5370 | } | |
5371 | ||
e71237d3 OU |
5372 | static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu, |
5373 | struct vmcs12 *vmcs12) | |
5374 | { | |
5375 | unsigned long exit_qualification; | |
35a57134 | 5376 | unsigned short port; |
e71237d3 OU |
5377 | int size; |
5378 | ||
5379 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS)) | |
5380 | return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING); | |
5381 | ||
5382 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
5383 | ||
5384 | port = exit_qualification >> 16; | |
5385 | size = (exit_qualification & 7) + 1; | |
5386 | ||
5387 | return nested_vmx_check_io_bitmaps(vcpu, port, size); | |
5388 | } | |
5389 | ||
55d2375e | 5390 | /* |
463bfeee | 5391 | * Return 1 if we should exit from L2 to L1 to handle an MSR access, |
55d2375e SC |
5392 | * rather than handle it ourselves in L0. I.e., check whether L1 expressed |
5393 | * disinterest in the current event (read or write a specific MSR) by using an | |
5394 | * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps. | |
5395 | */ | |
5396 | static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu, | |
5397 | struct vmcs12 *vmcs12, u32 exit_reason) | |
5398 | { | |
2b3eaf81 | 5399 | u32 msr_index = kvm_rcx_read(vcpu); |
55d2375e SC |
5400 | gpa_t bitmap; |
5401 | ||
5402 | if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS)) | |
5403 | return true; | |
5404 | ||
5405 | /* | |
5406 | * The MSR_BITMAP page is divided into four 1024-byte bitmaps, | |
5407 | * for the four combinations of read/write and low/high MSR numbers. | |
5408 | * First we need to figure out which of the four to use: | |
5409 | */ | |
5410 | bitmap = vmcs12->msr_bitmap; | |
5411 | if (exit_reason == EXIT_REASON_MSR_WRITE) | |
5412 | bitmap += 2048; | |
5413 | if (msr_index >= 0xc0000000) { | |
5414 | msr_index -= 0xc0000000; | |
5415 | bitmap += 1024; | |
5416 | } | |
5417 | ||
5418 | /* Then read the msr_index'th bit from this bitmap: */ | |
5419 | if (msr_index < 1024*8) { | |
5420 | unsigned char b; | |
5421 | if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1)) | |
5422 | return true; | |
5423 | return 1 & (b >> (msr_index & 7)); | |
5424 | } else | |
5425 | return true; /* let L1 handle the wrong parameter */ | |
5426 | } | |
5427 | ||
5428 | /* | |
5429 | * Return 1 if we should exit from L2 to L1 to handle a CR access exit, | |
5430 | * rather than handle it ourselves in L0. I.e., check if L1 wanted to | |
5431 | * intercept (via guest_host_mask etc.) the current event. | |
5432 | */ | |
5433 | static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |
5434 | struct vmcs12 *vmcs12) | |
5435 | { | |
5436 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | |
5437 | int cr = exit_qualification & 15; | |
5438 | int reg; | |
5439 | unsigned long val; | |
5440 | ||
5441 | switch ((exit_qualification >> 4) & 3) { | |
5442 | case 0: /* mov to cr */ | |
5443 | reg = (exit_qualification >> 8) & 15; | |
5444 | val = kvm_register_readl(vcpu, reg); | |
5445 | switch (cr) { | |
5446 | case 0: | |
5447 | if (vmcs12->cr0_guest_host_mask & | |
5448 | (val ^ vmcs12->cr0_read_shadow)) | |
5449 | return true; | |
5450 | break; | |
5451 | case 3: | |
5452 | if ((vmcs12->cr3_target_count >= 1 && | |
5453 | vmcs12->cr3_target_value0 == val) || | |
5454 | (vmcs12->cr3_target_count >= 2 && | |
5455 | vmcs12->cr3_target_value1 == val) || | |
5456 | (vmcs12->cr3_target_count >= 3 && | |
5457 | vmcs12->cr3_target_value2 == val) || | |
5458 | (vmcs12->cr3_target_count >= 4 && | |
5459 | vmcs12->cr3_target_value3 == val)) | |
5460 | return false; | |
5461 | if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING)) | |
5462 | return true; | |
5463 | break; | |
5464 | case 4: | |
5465 | if (vmcs12->cr4_guest_host_mask & | |
5466 | (vmcs12->cr4_read_shadow ^ val)) | |
5467 | return true; | |
5468 | break; | |
5469 | case 8: | |
5470 | if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING)) | |
5471 | return true; | |
5472 | break; | |
5473 | } | |
5474 | break; | |
5475 | case 2: /* clts */ | |
5476 | if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) && | |
5477 | (vmcs12->cr0_read_shadow & X86_CR0_TS)) | |
5478 | return true; | |
5479 | break; | |
5480 | case 1: /* mov from cr */ | |
5481 | switch (cr) { | |
5482 | case 3: | |
5483 | if (vmcs12->cpu_based_vm_exec_control & | |
5484 | CPU_BASED_CR3_STORE_EXITING) | |
5485 | return true; | |
5486 | break; | |
5487 | case 8: | |
5488 | if (vmcs12->cpu_based_vm_exec_control & | |
5489 | CPU_BASED_CR8_STORE_EXITING) | |
5490 | return true; | |
5491 | break; | |
5492 | } | |
5493 | break; | |
5494 | case 3: /* lmsw */ | |
5495 | /* | |
5496 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of | |
5497 | * cr0. Other attempted changes are ignored, with no exit. | |
5498 | */ | |
5499 | val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; | |
5500 | if (vmcs12->cr0_guest_host_mask & 0xe & | |
5501 | (val ^ vmcs12->cr0_read_shadow)) | |
5502 | return true; | |
5503 | if ((vmcs12->cr0_guest_host_mask & 0x1) && | |
5504 | !(vmcs12->cr0_read_shadow & 0x1) && | |
5505 | (val & 0x1)) | |
5506 | return true; | |
5507 | break; | |
5508 | } | |
5509 | return false; | |
5510 | } | |
5511 | ||
5512 | static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu, | |
5513 | struct vmcs12 *vmcs12, gpa_t bitmap) | |
5514 | { | |
5515 | u32 vmx_instruction_info; | |
5516 | unsigned long field; | |
5517 | u8 b; | |
5518 | ||
5519 | if (!nested_cpu_has_shadow_vmcs(vmcs12)) | |
5520 | return true; | |
5521 | ||
5522 | /* Decode instruction info and find the field to access */ | |
5523 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | |
5524 | field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | |
5525 | ||
5526 | /* Out-of-range fields always cause a VM exit from L2 to L1 */ | |
5527 | if (field >> 15) | |
5528 | return true; | |
5529 | ||
5530 | if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1)) | |
5531 | return true; | |
5532 | ||
5533 | return 1 & (b >> (field & 7)); | |
5534 | } | |
5535 | ||
b045ae90 OU |
5536 | static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12) |
5537 | { | |
5538 | u32 entry_intr_info = vmcs12->vm_entry_intr_info_field; | |
5539 | ||
5540 | if (nested_cpu_has_mtf(vmcs12)) | |
5541 | return true; | |
5542 | ||
5543 | /* | |
5544 | * An MTF VM-exit may be injected into the guest by setting the | |
5545 | * interruption-type to 7 (other event) and the vector field to 0. Such | |
5546 | * is the case regardless of the 'monitor trap flag' VM-execution | |
5547 | * control. | |
5548 | */ | |
5549 | return entry_intr_info == (INTR_INFO_VALID_MASK | |
5550 | | INTR_TYPE_OTHER_EVENT); | |
5551 | } | |
5552 | ||
55d2375e | 5553 | /* |
69c09755 | 5554 | * Return true if we should exit from L2 to L1 to handle an exit, or false if we |
55d2375e SC |
5555 | * should handle it ourselves in L0 (and then continue L2). Only call this |
5556 | * when in is_guest_mode (L2). | |
5557 | */ | |
5558 | bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) | |
5559 | { | |
5560 | u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | |
5561 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5562 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | |
5563 | ||
96b100cd | 5564 | WARN_ON_ONCE(vmx->nested.nested_run_pending); |
55d2375e SC |
5565 | |
5566 | if (unlikely(vmx->fail)) { | |
380e0055 SC |
5567 | trace_kvm_nested_vmenter_failed( |
5568 | "hardware VM-instruction error: ", | |
5569 | vmcs_read32(VM_INSTRUCTION_ERROR)); | |
55d2375e SC |
5570 | return true; |
5571 | } | |
5572 | ||
55d2375e SC |
5573 | trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason, |
5574 | vmcs_readl(EXIT_QUALIFICATION), | |
5575 | vmx->idt_vectoring_info, | |
5576 | intr_info, | |
5577 | vmcs_read32(VM_EXIT_INTR_ERROR_CODE), | |
5578 | KVM_ISA_VMX); | |
5579 | ||
5580 | switch (exit_reason) { | |
5581 | case EXIT_REASON_EXCEPTION_NMI: | |
5582 | if (is_nmi(intr_info)) | |
5583 | return false; | |
5584 | else if (is_page_fault(intr_info)) | |
5585 | return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; | |
5586 | else if (is_debug(intr_info) && | |
5587 | vcpu->guest_debug & | |
5588 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | |
5589 | return false; | |
5590 | else if (is_breakpoint(intr_info) && | |
5591 | vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | |
5592 | return false; | |
5593 | return vmcs12->exception_bitmap & | |
5594 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); | |
5595 | case EXIT_REASON_EXTERNAL_INTERRUPT: | |
5596 | return false; | |
5597 | case EXIT_REASON_TRIPLE_FAULT: | |
5598 | return true; | |
9dadc2f9 XL |
5599 | case EXIT_REASON_INTERRUPT_WINDOW: |
5600 | return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING); | |
55d2375e | 5601 | case EXIT_REASON_NMI_WINDOW: |
4e2a0bc5 | 5602 | return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING); |
55d2375e SC |
5603 | case EXIT_REASON_TASK_SWITCH: |
5604 | return true; | |
5605 | case EXIT_REASON_CPUID: | |
5606 | return true; | |
5607 | case EXIT_REASON_HLT: | |
5608 | return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING); | |
5609 | case EXIT_REASON_INVD: | |
5610 | return true; | |
5611 | case EXIT_REASON_INVLPG: | |
5612 | return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); | |
5613 | case EXIT_REASON_RDPMC: | |
5614 | return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); | |
5615 | case EXIT_REASON_RDRAND: | |
5616 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); | |
5617 | case EXIT_REASON_RDSEED: | |
5618 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); | |
5619 | case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: | |
5620 | return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); | |
5621 | case EXIT_REASON_VMREAD: | |
5622 | return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, | |
5623 | vmcs12->vmread_bitmap); | |
5624 | case EXIT_REASON_VMWRITE: | |
5625 | return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12, | |
5626 | vmcs12->vmwrite_bitmap); | |
5627 | case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: | |
5628 | case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD: | |
5629 | case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME: | |
5630 | case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: | |
5631 | case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: | |
5632 | /* | |
5633 | * VMX instructions trap unconditionally. This allows L1 to | |
5634 | * emulate them for its L2 guest, i.e., allows 3-level nesting! | |
5635 | */ | |
5636 | return true; | |
5637 | case EXIT_REASON_CR_ACCESS: | |
5638 | return nested_vmx_exit_handled_cr(vcpu, vmcs12); | |
5639 | case EXIT_REASON_DR_ACCESS: | |
5640 | return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING); | |
5641 | case EXIT_REASON_IO_INSTRUCTION: | |
5642 | return nested_vmx_exit_handled_io(vcpu, vmcs12); | |
5643 | case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR: | |
5644 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC); | |
5645 | case EXIT_REASON_MSR_READ: | |
5646 | case EXIT_REASON_MSR_WRITE: | |
5647 | return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason); | |
5648 | case EXIT_REASON_INVALID_STATE: | |
5649 | return true; | |
5650 | case EXIT_REASON_MWAIT_INSTRUCTION: | |
5651 | return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING); | |
5652 | case EXIT_REASON_MONITOR_TRAP_FLAG: | |
b045ae90 | 5653 | return nested_vmx_exit_handled_mtf(vmcs12); |
55d2375e SC |
5654 | case EXIT_REASON_MONITOR_INSTRUCTION: |
5655 | return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING); | |
5656 | case EXIT_REASON_PAUSE_INSTRUCTION: | |
5657 | return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) || | |
5658 | nested_cpu_has2(vmcs12, | |
5659 | SECONDARY_EXEC_PAUSE_LOOP_EXITING); | |
5660 | case EXIT_REASON_MCE_DURING_VMENTRY: | |
5661 | return false; | |
5662 | case EXIT_REASON_TPR_BELOW_THRESHOLD: | |
5663 | return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW); | |
5664 | case EXIT_REASON_APIC_ACCESS: | |
5665 | case EXIT_REASON_APIC_WRITE: | |
5666 | case EXIT_REASON_EOI_INDUCED: | |
5667 | /* | |
5668 | * The controls for "virtualize APIC accesses," "APIC- | |
5669 | * register virtualization," and "virtual-interrupt | |
5670 | * delivery" only come from vmcs12. | |
5671 | */ | |
5672 | return true; | |
5673 | case EXIT_REASON_EPT_VIOLATION: | |
5674 | /* | |
5675 | * L0 always deals with the EPT violation. If nested EPT is | |
5676 | * used, and the nested mmu code discovers that the address is | |
5677 | * missing in the guest EPT table (EPT12), the EPT violation | |
5678 | * will be injected with nested_ept_inject_page_fault() | |
5679 | */ | |
5680 | return false; | |
5681 | case EXIT_REASON_EPT_MISCONFIG: | |
5682 | /* | |
5683 | * L2 never uses directly L1's EPT, but rather L0's own EPT | |
5684 | * table (shadow on EPT) or a merged EPT table that L0 built | |
5685 | * (EPT on EPT). So any problems with the structure of the | |
5686 | * table is L0's fault. | |
5687 | */ | |
5688 | return false; | |
5689 | case EXIT_REASON_INVPCID: | |
5690 | return | |
5691 | nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) && | |
5692 | nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING); | |
5693 | case EXIT_REASON_WBINVD: | |
5694 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING); | |
5695 | case EXIT_REASON_XSETBV: | |
5696 | return true; | |
5697 | case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS: | |
5698 | /* | |
5699 | * This should never happen, since it is not possible to | |
5700 | * set XSS to a non-zero value---neither in L1 nor in L2. | |
5701 | * If if it were, XSS would have to be checked against | |
5702 | * the XSS exit bitmap in vmcs12. | |
5703 | */ | |
5704 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); | |
5705 | case EXIT_REASON_PREEMPTION_TIMER: | |
5706 | return false; | |
5707 | case EXIT_REASON_PML_FULL: | |
5708 | /* We emulate PML support to L1. */ | |
5709 | return false; | |
5710 | case EXIT_REASON_VMFUNC: | |
5711 | /* VM functions are emulated through L2->L0 vmexits. */ | |
5712 | return false; | |
5713 | case EXIT_REASON_ENCLS: | |
5714 | /* SGX is never exposed to L1 */ | |
5715 | return false; | |
bf653b78 TX |
5716 | case EXIT_REASON_UMWAIT: |
5717 | case EXIT_REASON_TPAUSE: | |
5718 | return nested_cpu_has2(vmcs12, | |
5719 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE); | |
55d2375e SC |
5720 | default: |
5721 | return true; | |
5722 | } | |
5723 | } | |
5724 | ||
5725 | ||
5726 | static int vmx_get_nested_state(struct kvm_vcpu *vcpu, | |
5727 | struct kvm_nested_state __user *user_kvm_nested_state, | |
5728 | u32 user_data_size) | |
5729 | { | |
5730 | struct vcpu_vmx *vmx; | |
5731 | struct vmcs12 *vmcs12; | |
5732 | struct kvm_nested_state kvm_state = { | |
5733 | .flags = 0, | |
6ca00dfa | 5734 | .format = KVM_STATE_NESTED_FORMAT_VMX, |
55d2375e | 5735 | .size = sizeof(kvm_state), |
6ca00dfa LA |
5736 | .hdr.vmx.vmxon_pa = -1ull, |
5737 | .hdr.vmx.vmcs12_pa = -1ull, | |
55d2375e | 5738 | }; |
6ca00dfa LA |
5739 | struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = |
5740 | &user_kvm_nested_state->data.vmx[0]; | |
55d2375e SC |
5741 | |
5742 | if (!vcpu) | |
6ca00dfa | 5743 | return kvm_state.size + sizeof(*user_vmx_nested_state); |
55d2375e SC |
5744 | |
5745 | vmx = to_vmx(vcpu); | |
5746 | vmcs12 = get_vmcs12(vcpu); | |
5747 | ||
55d2375e SC |
5748 | if (nested_vmx_allowed(vcpu) && |
5749 | (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { | |
6ca00dfa LA |
5750 | kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; |
5751 | kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; | |
55d2375e SC |
5752 | |
5753 | if (vmx_has_valid_vmcs12(vcpu)) { | |
6ca00dfa | 5754 | kvm_state.size += sizeof(user_vmx_nested_state->vmcs12); |
55d2375e | 5755 | |
323d73a8 LA |
5756 | if (vmx->nested.hv_evmcs) |
5757 | kvm_state.flags |= KVM_STATE_NESTED_EVMCS; | |
5758 | ||
55d2375e SC |
5759 | if (is_guest_mode(vcpu) && |
5760 | nested_cpu_has_shadow_vmcs(vmcs12) && | |
5761 | vmcs12->vmcs_link_pointer != -1ull) | |
6ca00dfa | 5762 | kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12); |
55d2375e SC |
5763 | } |
5764 | ||
5765 | if (vmx->nested.smm.vmxon) | |
6ca00dfa | 5766 | kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; |
55d2375e SC |
5767 | |
5768 | if (vmx->nested.smm.guest_mode) | |
6ca00dfa | 5769 | kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; |
55d2375e SC |
5770 | |
5771 | if (is_guest_mode(vcpu)) { | |
5772 | kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE; | |
5773 | ||
5774 | if (vmx->nested.nested_run_pending) | |
5775 | kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING; | |
5ef8acbd OU |
5776 | |
5777 | if (vmx->nested.mtf_pending) | |
5778 | kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING; | |
55d2375e SC |
5779 | } |
5780 | } | |
5781 | ||
5782 | if (user_data_size < kvm_state.size) | |
5783 | goto out; | |
5784 | ||
5785 | if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state))) | |
5786 | return -EFAULT; | |
5787 | ||
5788 | if (!vmx_has_valid_vmcs12(vcpu)) | |
5789 | goto out; | |
5790 | ||
5791 | /* | |
5792 | * When running L2, the authoritative vmcs12 state is in the | |
5793 | * vmcs02. When running L1, the authoritative vmcs12 state is | |
5794 | * in the shadow or enlightened vmcs linked to vmcs01, unless | |
3731905e | 5795 | * need_vmcs12_to_shadow_sync is set, in which case, the authoritative |
55d2375e SC |
5796 | * vmcs12 state is in the vmcs12 already. |
5797 | */ | |
5798 | if (is_guest_mode(vcpu)) { | |
3731905e | 5799 | sync_vmcs02_to_vmcs12(vcpu, vmcs12); |
7952d769 | 5800 | sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12); |
3731905e | 5801 | } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { |
55d2375e SC |
5802 | if (vmx->nested.hv_evmcs) |
5803 | copy_enlightened_to_vmcs12(vmx); | |
5804 | else if (enable_shadow_vmcs) | |
5805 | copy_shadow_to_vmcs12(vmx); | |
5806 | } | |
5807 | ||
6ca00dfa LA |
5808 | BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE); |
5809 | BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE); | |
5810 | ||
3a33d030 TR |
5811 | /* |
5812 | * Copy over the full allocated size of vmcs12 rather than just the size | |
5813 | * of the struct. | |
5814 | */ | |
6ca00dfa | 5815 | if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE)) |
55d2375e SC |
5816 | return -EFAULT; |
5817 | ||
5818 | if (nested_cpu_has_shadow_vmcs(vmcs12) && | |
5819 | vmcs12->vmcs_link_pointer != -1ull) { | |
6ca00dfa | 5820 | if (copy_to_user(user_vmx_nested_state->shadow_vmcs12, |
3a33d030 | 5821 | get_shadow_vmcs12(vcpu), VMCS12_SIZE)) |
55d2375e SC |
5822 | return -EFAULT; |
5823 | } | |
5824 | ||
5825 | out: | |
5826 | return kvm_state.size; | |
5827 | } | |
5828 | ||
5829 | /* | |
5830 | * Forcibly leave nested mode in order to be able to reset the VCPU later on. | |
5831 | */ | |
5832 | void vmx_leave_nested(struct kvm_vcpu *vcpu) | |
5833 | { | |
5834 | if (is_guest_mode(vcpu)) { | |
5835 | to_vmx(vcpu)->nested.nested_run_pending = 0; | |
5836 | nested_vmx_vmexit(vcpu, -1, 0, 0); | |
5837 | } | |
5838 | free_nested(vcpu); | |
5839 | } | |
5840 | ||
5841 | static int vmx_set_nested_state(struct kvm_vcpu *vcpu, | |
5842 | struct kvm_nested_state __user *user_kvm_nested_state, | |
5843 | struct kvm_nested_state *kvm_state) | |
5844 | { | |
5845 | struct vcpu_vmx *vmx = to_vmx(vcpu); | |
5846 | struct vmcs12 *vmcs12; | |
5847 | u32 exit_qual; | |
6ca00dfa LA |
5848 | struct kvm_vmx_nested_state_data __user *user_vmx_nested_state = |
5849 | &user_kvm_nested_state->data.vmx[0]; | |
55d2375e SC |
5850 | int ret; |
5851 | ||
6ca00dfa | 5852 | if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX) |
55d2375e SC |
5853 | return -EINVAL; |
5854 | ||
6ca00dfa LA |
5855 | if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { |
5856 | if (kvm_state->hdr.vmx.smm.flags) | |
55d2375e SC |
5857 | return -EINVAL; |
5858 | ||
6ca00dfa | 5859 | if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) |
55d2375e SC |
5860 | return -EINVAL; |
5861 | ||
323d73a8 LA |
5862 | /* |
5863 | * KVM_STATE_NESTED_EVMCS used to signal that KVM should | |
5864 | * enable eVMCS capability on vCPU. However, since then | |
5865 | * code was changed such that flag signals vmcs12 should | |
5866 | * be copied into eVMCS in guest memory. | |
5867 | * | |
5868 | * To preserve backwards compatability, allow user | |
5869 | * to set this flag even when there is no VMXON region. | |
5870 | */ | |
9fd58877 PB |
5871 | if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS) |
5872 | return -EINVAL; | |
5873 | } else { | |
5874 | if (!nested_vmx_allowed(vcpu)) | |
5875 | return -EINVAL; | |
55d2375e | 5876 | |
9fd58877 PB |
5877 | if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) |
5878 | return -EINVAL; | |
323d73a8 | 5879 | } |
55d2375e | 5880 | |
6ca00dfa | 5881 | if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && |
55d2375e SC |
5882 | (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) |
5883 | return -EINVAL; | |
5884 | ||
6ca00dfa | 5885 | if (kvm_state->hdr.vmx.smm.flags & |
55d2375e SC |
5886 | ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) |
5887 | return -EINVAL; | |
5888 | ||
5889 | /* | |
5890 | * SMM temporarily disables VMX, so we cannot be in guest mode, | |
5891 | * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags | |
5892 | * must be zero. | |
5893 | */ | |
65b712f1 LA |
5894 | if (is_smm(vcpu) ? |
5895 | (kvm_state->flags & | |
5896 | (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING)) | |
5897 | : kvm_state->hdr.vmx.smm.flags) | |
55d2375e SC |
5898 | return -EINVAL; |
5899 | ||
6ca00dfa LA |
5900 | if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && |
5901 | !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) | |
55d2375e SC |
5902 | return -EINVAL; |
5903 | ||
323d73a8 LA |
5904 | if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) && |
5905 | (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) | |
9fd58877 | 5906 | return -EINVAL; |
55d2375e | 5907 | |
323d73a8 | 5908 | vmx_leave_nested(vcpu); |
9fd58877 PB |
5909 | |
5910 | if (kvm_state->hdr.vmx.vmxon_pa == -1ull) | |
5911 | return 0; | |
332d0797 | 5912 | |
6ca00dfa | 5913 | vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; |
55d2375e SC |
5914 | ret = enter_vmx_operation(vcpu); |
5915 | if (ret) | |
5916 | return ret; | |
5917 | ||
5918 | /* Empty 'VMXON' state is permitted */ | |
e8ab8d24 | 5919 | if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) |
55d2375e SC |
5920 | return 0; |
5921 | ||
6ca00dfa LA |
5922 | if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { |
5923 | if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || | |
5924 | !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) | |
55d2375e SC |
5925 | return -EINVAL; |
5926 | ||
6ca00dfa | 5927 | set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); |
55d2375e SC |
5928 | } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) { |
5929 | /* | |
e942dbf8 VK |
5930 | * nested_vmx_handle_enlightened_vmptrld() cannot be called |
5931 | * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be | |
5932 | * restored yet. EVMCS will be mapped from | |
5933 | * nested_get_vmcs12_pages(). | |
55d2375e | 5934 | */ |
e942dbf8 | 5935 | kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu); |
55d2375e SC |
5936 | } else { |
5937 | return -EINVAL; | |
5938 | } | |
5939 | ||
6ca00dfa | 5940 | if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { |
55d2375e SC |
5941 | vmx->nested.smm.vmxon = true; |
5942 | vmx->nested.vmxon = false; | |
5943 | ||
6ca00dfa | 5944 | if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) |
55d2375e SC |
5945 | vmx->nested.smm.guest_mode = true; |
5946 | } | |
5947 | ||
5948 | vmcs12 = get_vmcs12(vcpu); | |
6ca00dfa | 5949 | if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12))) |
55d2375e SC |
5950 | return -EFAULT; |
5951 | ||
5952 | if (vmcs12->hdr.revision_id != VMCS12_REVISION) | |
5953 | return -EINVAL; | |
5954 | ||
5955 | if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) | |
5956 | return 0; | |
5957 | ||
21be4ca1 SC |
5958 | vmx->nested.nested_run_pending = |
5959 | !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING); | |
5960 | ||
5ef8acbd OU |
5961 | vmx->nested.mtf_pending = |
5962 | !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING); | |
5963 | ||
21be4ca1 | 5964 | ret = -EINVAL; |
55d2375e SC |
5965 | if (nested_cpu_has_shadow_vmcs(vmcs12) && |
5966 | vmcs12->vmcs_link_pointer != -1ull) { | |
5967 | struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu); | |
5968 | ||
6ca00dfa LA |
5969 | if (kvm_state->size < |
5970 | sizeof(*kvm_state) + | |
5971 | sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12)) | |
21be4ca1 | 5972 | goto error_guest_mode; |
55d2375e SC |
5973 | |
5974 | if (copy_from_user(shadow_vmcs12, | |
6ca00dfa LA |
5975 | user_vmx_nested_state->shadow_vmcs12, |
5976 | sizeof(*shadow_vmcs12))) { | |
21be4ca1 SC |
5977 | ret = -EFAULT; |
5978 | goto error_guest_mode; | |
5979 | } | |
55d2375e SC |
5980 | |
5981 | if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION || | |
5982 | !shadow_vmcs12->hdr.shadow_vmcs) | |
21be4ca1 | 5983 | goto error_guest_mode; |
55d2375e SC |
5984 | } |
5985 | ||
5478ba34 SC |
5986 | if (nested_vmx_check_controls(vcpu, vmcs12) || |
5987 | nested_vmx_check_host_state(vcpu, vmcs12) || | |
5988 | nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual)) | |
21be4ca1 | 5989 | goto error_guest_mode; |
55d2375e SC |
5990 | |
5991 | vmx->nested.dirty_vmcs12 = true; | |
5992 | ret = nested_vmx_enter_non_root_mode(vcpu, false); | |
21be4ca1 SC |
5993 | if (ret) |
5994 | goto error_guest_mode; | |
55d2375e SC |
5995 | |
5996 | return 0; | |
21be4ca1 SC |
5997 | |
5998 | error_guest_mode: | |
5999 | vmx->nested.nested_run_pending = 0; | |
6000 | return ret; | |
55d2375e SC |
6001 | } |
6002 | ||
1b84292b | 6003 | void nested_vmx_set_vmcs_shadowing_bitmap(void) |
55d2375e SC |
6004 | { |
6005 | if (enable_shadow_vmcs) { | |
55d2375e | 6006 | vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); |
fadcead0 | 6007 | vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); |
55d2375e SC |
6008 | } |
6009 | } | |
6010 | ||
6011 | /* | |
6012 | * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be | |
6013 | * returned for the various VMX controls MSRs when nested VMX is enabled. | |
6014 | * The same values should also be used to verify that vmcs12 control fields are | |
6015 | * valid during nested entry from L1 to L2. | |
6016 | * Each of these control msrs has a low and high 32-bit half: A low bit is on | |
6017 | * if the corresponding bit in the (32-bit) control field *must* be on, and a | |
6018 | * bit in the high half is on if the corresponding bit in the control field | |
6019 | * may be on. See also vmx_control_verify(). | |
6020 | */ | |
a4443267 | 6021 | void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps) |
55d2375e SC |
6022 | { |
6023 | /* | |
6024 | * Note that as a general rule, the high half of the MSRs (bits in | |
6025 | * the control fields which may be 1) should be initialized by the | |
6026 | * intersection of the underlying hardware's MSR (i.e., features which | |
6027 | * can be supported) and the list of features we want to expose - | |
6028 | * because they are known to be properly supported in our code. | |
6029 | * Also, usually, the low half of the MSRs (bits which must be 1) can | |
6030 | * be set to 0, meaning that L1 may turn off any of these bits. The | |
6031 | * reason is that if one of these bits is necessary, it will appear | |
6032 | * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control | |
6033 | * fields of vmcs01 and vmcs02, will turn these bits off - and | |
6034 | * nested_vmx_exit_reflected() will not pass related exits to L1. | |
6035 | * These rules have exceptions below. | |
6036 | */ | |
6037 | ||
6038 | /* pin-based controls */ | |
6039 | rdmsr(MSR_IA32_VMX_PINBASED_CTLS, | |
6040 | msrs->pinbased_ctls_low, | |
6041 | msrs->pinbased_ctls_high); | |
6042 | msrs->pinbased_ctls_low |= | |
6043 | PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | |
6044 | msrs->pinbased_ctls_high &= | |
6045 | PIN_BASED_EXT_INTR_MASK | | |
6046 | PIN_BASED_NMI_EXITING | | |
6047 | PIN_BASED_VIRTUAL_NMIS | | |
a4443267 | 6048 | (enable_apicv ? PIN_BASED_POSTED_INTR : 0); |
55d2375e SC |
6049 | msrs->pinbased_ctls_high |= |
6050 | PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | | |
6051 | PIN_BASED_VMX_PREEMPTION_TIMER; | |
6052 | ||
6053 | /* exit controls */ | |
6054 | rdmsr(MSR_IA32_VMX_EXIT_CTLS, | |
6055 | msrs->exit_ctls_low, | |
6056 | msrs->exit_ctls_high); | |
6057 | msrs->exit_ctls_low = | |
6058 | VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; | |
6059 | ||
6060 | msrs->exit_ctls_high &= | |
6061 | #ifdef CONFIG_X86_64 | |
6062 | VM_EXIT_HOST_ADDR_SPACE_SIZE | | |
6063 | #endif | |
6064 | VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT; | |
6065 | msrs->exit_ctls_high |= | |
6066 | VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | | |
6067 | VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | | |
6068 | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; | |
6069 | ||
6070 | /* We support free control of debug control saving. */ | |
6071 | msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS; | |
6072 | ||
6073 | /* entry controls */ | |
6074 | rdmsr(MSR_IA32_VMX_ENTRY_CTLS, | |
6075 | msrs->entry_ctls_low, | |
6076 | msrs->entry_ctls_high); | |
6077 | msrs->entry_ctls_low = | |
6078 | VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; | |
6079 | msrs->entry_ctls_high &= | |
6080 | #ifdef CONFIG_X86_64 | |
6081 | VM_ENTRY_IA32E_MODE | | |
6082 | #endif | |
6083 | VM_ENTRY_LOAD_IA32_PAT; | |
6084 | msrs->entry_ctls_high |= | |
6085 | (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); | |
6086 | ||
6087 | /* We support free control of debug control loading. */ | |
6088 | msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS; | |
6089 | ||
6090 | /* cpu-based controls */ | |
6091 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, | |
6092 | msrs->procbased_ctls_low, | |
6093 | msrs->procbased_ctls_high); | |
6094 | msrs->procbased_ctls_low = | |
6095 | CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | |
6096 | msrs->procbased_ctls_high &= | |
9dadc2f9 | 6097 | CPU_BASED_INTR_WINDOW_EXITING | |
5e3d394f | 6098 | CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING | |
55d2375e SC |
6099 | CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING | |
6100 | CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING | | |
6101 | CPU_BASED_CR3_STORE_EXITING | | |
6102 | #ifdef CONFIG_X86_64 | |
6103 | CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING | | |
6104 | #endif | |
6105 | CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | | |
6106 | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG | | |
6107 | CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING | | |
6108 | CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING | | |
6109 | CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | |
6110 | /* | |
6111 | * We can allow some features even when not supported by the | |
6112 | * hardware. For example, L1 can specify an MSR bitmap - and we | |
6113 | * can use it to avoid exits to L1 - even when L0 runs L2 | |
6114 | * without MSR bitmaps. | |
6115 | */ | |
6116 | msrs->procbased_ctls_high |= | |
6117 | CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | | |
6118 | CPU_BASED_USE_MSR_BITMAPS; | |
6119 | ||
6120 | /* We support free control of CR3 access interception. */ | |
6121 | msrs->procbased_ctls_low &= | |
6122 | ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); | |
6123 | ||
6124 | /* | |
6125 | * secondary cpu-based controls. Do not include those that | |
6126 | * depend on CPUID bits, they are added later by vmx_cpuid_update. | |
6127 | */ | |
6b1971c6 VK |
6128 | if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) |
6129 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | |
6130 | msrs->secondary_ctls_low, | |
6131 | msrs->secondary_ctls_high); | |
6132 | ||
55d2375e SC |
6133 | msrs->secondary_ctls_low = 0; |
6134 | msrs->secondary_ctls_high &= | |
6135 | SECONDARY_EXEC_DESC | | |
6defc591 | 6136 | SECONDARY_EXEC_RDTSCP | |
55d2375e | 6137 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
6defc591 | 6138 | SECONDARY_EXEC_WBINVD_EXITING | |
55d2375e SC |
6139 | SECONDARY_EXEC_APIC_REGISTER_VIRT | |
6140 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | |
6defc591 PB |
6141 | SECONDARY_EXEC_RDRAND_EXITING | |
6142 | SECONDARY_EXEC_ENABLE_INVPCID | | |
6143 | SECONDARY_EXEC_RDSEED_EXITING | | |
6144 | SECONDARY_EXEC_XSAVES; | |
55d2375e SC |
6145 | |
6146 | /* | |
6147 | * We can emulate "VMCS shadowing," even if the hardware | |
6148 | * doesn't support it. | |
6149 | */ | |
6150 | msrs->secondary_ctls_high |= | |
6151 | SECONDARY_EXEC_SHADOW_VMCS; | |
6152 | ||
6153 | if (enable_ept) { | |
6154 | /* nested EPT: emulate EPT also to L1 */ | |
6155 | msrs->secondary_ctls_high |= | |
6156 | SECONDARY_EXEC_ENABLE_EPT; | |
bb1fcc70 SC |
6157 | msrs->ept_caps = |
6158 | VMX_EPT_PAGE_WALK_4_BIT | | |
6159 | VMX_EPT_PAGE_WALK_5_BIT | | |
6160 | VMX_EPTP_WB_BIT | | |
96d47010 SC |
6161 | VMX_EPT_INVEPT_BIT | |
6162 | VMX_EPT_EXECUTE_ONLY_BIT; | |
6163 | ||
55d2375e SC |
6164 | msrs->ept_caps &= ept_caps; |
6165 | msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT | | |
6166 | VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT | | |
6167 | VMX_EPT_1GB_PAGE_BIT; | |
6168 | if (enable_ept_ad_bits) { | |
6169 | msrs->secondary_ctls_high |= | |
6170 | SECONDARY_EXEC_ENABLE_PML; | |
6171 | msrs->ept_caps |= VMX_EPT_AD_BIT; | |
6172 | } | |
6173 | } | |
6174 | ||
6175 | if (cpu_has_vmx_vmfunc()) { | |
6176 | msrs->secondary_ctls_high |= | |
6177 | SECONDARY_EXEC_ENABLE_VMFUNC; | |
6178 | /* | |
6179 | * Advertise EPTP switching unconditionally | |
6180 | * since we emulate it | |
6181 | */ | |
6182 | if (enable_ept) | |
6183 | msrs->vmfunc_controls = | |
6184 | VMX_VMFUNC_EPTP_SWITCHING; | |
6185 | } | |
6186 | ||
6187 | /* | |
6188 | * Old versions of KVM use the single-context version without | |
6189 | * checking for support, so declare that it is supported even | |
6190 | * though it is treated as global context. The alternative is | |
6191 | * not failing the single-context invvpid, and it is worse. | |
6192 | */ | |
6193 | if (enable_vpid) { | |
6194 | msrs->secondary_ctls_high |= | |
6195 | SECONDARY_EXEC_ENABLE_VPID; | |
6196 | msrs->vpid_caps = VMX_VPID_INVVPID_BIT | | |
6197 | VMX_VPID_EXTENT_SUPPORTED_MASK; | |
6198 | } | |
6199 | ||
6200 | if (enable_unrestricted_guest) | |
6201 | msrs->secondary_ctls_high |= | |
6202 | SECONDARY_EXEC_UNRESTRICTED_GUEST; | |
6203 | ||
6204 | if (flexpriority_enabled) | |
6205 | msrs->secondary_ctls_high |= | |
6206 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | |
6207 | ||
6208 | /* miscellaneous data */ | |
6209 | rdmsr(MSR_IA32_VMX_MISC, | |
6210 | msrs->misc_low, | |
6211 | msrs->misc_high); | |
6212 | msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA; | |
6213 | msrs->misc_low |= | |
6214 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS | | |
6215 | VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE | | |
6216 | VMX_MISC_ACTIVITY_HLT; | |
6217 | msrs->misc_high = 0; | |
6218 | ||
6219 | /* | |
6220 | * This MSR reports some information about VMX support. We | |
6221 | * should return information about the VMX we emulate for the | |
6222 | * guest, and the VMCS structure we give it - not about the | |
6223 | * VMX support of the underlying hardware. | |
6224 | */ | |
6225 | msrs->basic = | |
6226 | VMCS12_REVISION | | |
6227 | VMX_BASIC_TRUE_CTLS | | |
6228 | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | | |
6229 | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); | |
6230 | ||
6231 | if (cpu_has_vmx_basic_inout()) | |
6232 | msrs->basic |= VMX_BASIC_INOUT; | |
6233 | ||
6234 | /* | |
6235 | * These MSRs specify bits which the guest must keep fixed on | |
6236 | * while L1 is in VMXON mode (in L1's root mode, or running an L2). | |
6237 | * We picked the standard core2 setting. | |
6238 | */ | |
6239 | #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE) | |
6240 | #define VMXON_CR4_ALWAYSON X86_CR4_VMXE | |
6241 | msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON; | |
6242 | msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON; | |
6243 | ||
6244 | /* These MSRs specify bits which the guest must keep fixed off. */ | |
6245 | rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1); | |
6246 | rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1); | |
6247 | ||
6248 | /* highest index: VMX_PREEMPTION_TIMER_VALUE */ | |
6249 | msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1; | |
6250 | } | |
6251 | ||
6252 | void nested_vmx_hardware_unsetup(void) | |
6253 | { | |
6254 | int i; | |
6255 | ||
6256 | if (enable_shadow_vmcs) { | |
6257 | for (i = 0; i < VMX_BITMAP_NR; i++) | |
6258 | free_page((unsigned long)vmx_bitmap[i]); | |
6259 | } | |
6260 | } | |
6261 | ||
72b0eaa9 SC |
6262 | __init int nested_vmx_hardware_setup(struct kvm_x86_ops *ops, |
6263 | int (*exit_handlers[])(struct kvm_vcpu *)) | |
55d2375e SC |
6264 | { |
6265 | int i; | |
6266 | ||
6267 | if (!cpu_has_vmx_shadow_vmcs()) | |
6268 | enable_shadow_vmcs = 0; | |
6269 | if (enable_shadow_vmcs) { | |
6270 | for (i = 0; i < VMX_BITMAP_NR; i++) { | |
41836839 BG |
6271 | /* |
6272 | * The vmx_bitmap is not tied to a VM and so should | |
6273 | * not be charged to a memcg. | |
6274 | */ | |
55d2375e SC |
6275 | vmx_bitmap[i] = (unsigned long *) |
6276 | __get_free_page(GFP_KERNEL); | |
6277 | if (!vmx_bitmap[i]) { | |
6278 | nested_vmx_hardware_unsetup(); | |
6279 | return -ENOMEM; | |
6280 | } | |
6281 | } | |
6282 | ||
6283 | init_vmcs_shadow_fields(); | |
6284 | } | |
6285 | ||
cc877670 LA |
6286 | exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear; |
6287 | exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch; | |
6288 | exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld; | |
6289 | exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst; | |
6290 | exit_handlers[EXIT_REASON_VMREAD] = handle_vmread; | |
6291 | exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume; | |
6292 | exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite; | |
6293 | exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff; | |
6294 | exit_handlers[EXIT_REASON_VMON] = handle_vmon; | |
6295 | exit_handlers[EXIT_REASON_INVEPT] = handle_invept; | |
6296 | exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid; | |
6297 | exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc; | |
55d2375e | 6298 | |
72b0eaa9 SC |
6299 | ops->check_nested_events = vmx_check_nested_events; |
6300 | ops->get_nested_state = vmx_get_nested_state; | |
6301 | ops->set_nested_state = vmx_set_nested_state; | |
6302 | ops->get_vmcs12_pages = nested_get_vmcs12_pages; | |
6303 | ops->nested_enable_evmcs = nested_enable_evmcs; | |
6304 | ops->nested_get_evmcs_version = nested_get_evmcs_version; | |
55d2375e SC |
6305 | |
6306 | return 0; | |
6307 | } |