1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
17 static bool __read_mostly enable_shadow_vmcs
= 1;
18 module_param_named(enable_shadow_vmcs
, enable_shadow_vmcs
, bool, S_IRUGO
);
20 static bool __read_mostly nested_early_check
= 0;
21 module_param(nested_early_check
, bool, S_IRUGO
);
23 #define CC(consistency_check) \
25 bool failed = (consistency_check); \
27 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
32 * Hyper-V requires all of these, so mark them as supported even though
33 * they are just treated the same as all-context.
35 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
36 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
37 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
38 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
39 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
41 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
48 static unsigned long *vmx_bitmap
[VMX_BITMAP_NR
];
50 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
51 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
53 struct shadow_vmcs_field
{
57 static struct shadow_vmcs_field shadow_read_only_fields
[] = {
58 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
59 #include "vmcs_shadow_fields.h"
61 static int max_shadow_read_only_fields
=
62 ARRAY_SIZE(shadow_read_only_fields
);
64 static struct shadow_vmcs_field shadow_read_write_fields
[] = {
65 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
66 #include "vmcs_shadow_fields.h"
68 static int max_shadow_read_write_fields
=
69 ARRAY_SIZE(shadow_read_write_fields
);
71 static void init_vmcs_shadow_fields(void)
75 memset(vmx_vmread_bitmap
, 0xff, PAGE_SIZE
);
76 memset(vmx_vmwrite_bitmap
, 0xff, PAGE_SIZE
);
78 for (i
= j
= 0; i
< max_shadow_read_only_fields
; i
++) {
79 struct shadow_vmcs_field entry
= shadow_read_only_fields
[i
];
80 u16 field
= entry
.encoding
;
82 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
83 (i
+ 1 == max_shadow_read_only_fields
||
84 shadow_read_only_fields
[i
+ 1].encoding
!= field
+ 1))
85 pr_err("Missing field from shadow_read_only_field %x\n",
88 clear_bit(field
, vmx_vmread_bitmap
);
93 entry
.offset
+= sizeof(u32
);
95 shadow_read_only_fields
[j
++] = entry
;
97 max_shadow_read_only_fields
= j
;
99 for (i
= j
= 0; i
< max_shadow_read_write_fields
; i
++) {
100 struct shadow_vmcs_field entry
= shadow_read_write_fields
[i
];
101 u16 field
= entry
.encoding
;
103 if (vmcs_field_width(field
) == VMCS_FIELD_WIDTH_U64
&&
104 (i
+ 1 == max_shadow_read_write_fields
||
105 shadow_read_write_fields
[i
+ 1].encoding
!= field
+ 1))
106 pr_err("Missing field from shadow_read_write_field %x\n",
109 WARN_ONCE(field
>= GUEST_ES_AR_BYTES
&&
110 field
<= GUEST_TR_AR_BYTES
,
111 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
114 * PML and the preemption timer can be emulated, but the
115 * processor cannot vmwrite to fields that don't exist
119 case GUEST_PML_INDEX
:
120 if (!cpu_has_vmx_pml())
123 case VMX_PREEMPTION_TIMER_VALUE
:
124 if (!cpu_has_vmx_preemption_timer())
127 case GUEST_INTR_STATUS
:
128 if (!cpu_has_vmx_apicv())
135 clear_bit(field
, vmx_vmwrite_bitmap
);
136 clear_bit(field
, vmx_vmread_bitmap
);
141 entry
.offset
+= sizeof(u32
);
143 shadow_read_write_fields
[j
++] = entry
;
145 max_shadow_read_write_fields
= j
;
149 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
150 * set the success or error code of an emulated VMX instruction (as specified
151 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
154 static int nested_vmx_succeed(struct kvm_vcpu
*vcpu
)
156 vmx_set_rflags(vcpu
, vmx_get_rflags(vcpu
)
157 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
158 X86_EFLAGS_ZF
| X86_EFLAGS_SF
| X86_EFLAGS_OF
));
159 return kvm_skip_emulated_instruction(vcpu
);
162 static int nested_vmx_failInvalid(struct kvm_vcpu
*vcpu
)
164 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
165 & ~(X86_EFLAGS_PF
| X86_EFLAGS_AF
| X86_EFLAGS_ZF
|
166 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
168 return kvm_skip_emulated_instruction(vcpu
);
171 static int nested_vmx_failValid(struct kvm_vcpu
*vcpu
,
172 u32 vm_instruction_error
)
174 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
177 * failValid writes the error number to the current VMCS, which
178 * can't be done if there isn't a current VMCS.
180 if (vmx
->nested
.current_vmptr
== -1ull && !vmx
->nested
.hv_evmcs
)
181 return nested_vmx_failInvalid(vcpu
);
183 vmx_set_rflags(vcpu
, (vmx_get_rflags(vcpu
)
184 & ~(X86_EFLAGS_CF
| X86_EFLAGS_PF
| X86_EFLAGS_AF
|
185 X86_EFLAGS_SF
| X86_EFLAGS_OF
))
187 get_vmcs12(vcpu
)->vm_instruction_error
= vm_instruction_error
;
189 * We don't need to force a shadow sync because
190 * VM_INSTRUCTION_ERROR is not shadowed
192 return kvm_skip_emulated_instruction(vcpu
);
195 static void nested_vmx_abort(struct kvm_vcpu
*vcpu
, u32 indicator
)
197 /* TODO: not to reset guest simply here. */
198 kvm_make_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
199 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator
);
202 static inline bool vmx_control_verify(u32 control
, u32 low
, u32 high
)
204 return fixed_bits_valid(control
, low
, high
);
207 static inline u64
vmx_control_msr(u32 low
, u32 high
)
209 return low
| ((u64
)high
<< 32);
212 static void vmx_disable_shadow_vmcs(struct vcpu_vmx
*vmx
)
214 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
215 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
216 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
219 static inline void nested_release_evmcs(struct kvm_vcpu
*vcpu
)
221 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
223 if (!vmx
->nested
.hv_evmcs
)
226 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.hv_evmcs_map
, true);
227 vmx
->nested
.hv_evmcs_vmptr
= 0;
228 vmx
->nested
.hv_evmcs
= NULL
;
232 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
233 * just stops using VMX.
235 static void free_nested(struct kvm_vcpu
*vcpu
)
237 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
239 if (!vmx
->nested
.vmxon
&& !vmx
->nested
.smm
.vmxon
)
242 kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES
, vcpu
);
244 vmx
->nested
.vmxon
= false;
245 vmx
->nested
.smm
.vmxon
= false;
246 free_vpid(vmx
->nested
.vpid02
);
247 vmx
->nested
.posted_intr_nv
= -1;
248 vmx
->nested
.current_vmptr
= -1ull;
249 if (enable_shadow_vmcs
) {
250 vmx_disable_shadow_vmcs(vmx
);
251 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
252 free_vmcs(vmx
->vmcs01
.shadow_vmcs
);
253 vmx
->vmcs01
.shadow_vmcs
= NULL
;
255 kfree(vmx
->nested
.cached_vmcs12
);
256 vmx
->nested
.cached_vmcs12
= NULL
;
257 kfree(vmx
->nested
.cached_shadow_vmcs12
);
258 vmx
->nested
.cached_shadow_vmcs12
= NULL
;
259 /* Unpin physical memory we referred to in the vmcs02 */
260 if (vmx
->nested
.apic_access_page
) {
261 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
262 vmx
->nested
.apic_access_page
= NULL
;
264 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
265 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
266 vmx
->nested
.pi_desc
= NULL
;
268 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
270 nested_release_evmcs(vcpu
);
272 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
275 static void vmx_sync_vmcs_host_state(struct vcpu_vmx
*vmx
,
276 struct loaded_vmcs
*prev
)
278 struct vmcs_host_state
*dest
, *src
;
280 if (unlikely(!vmx
->guest_state_loaded
))
283 src
= &prev
->host_state
;
284 dest
= &vmx
->loaded_vmcs
->host_state
;
286 vmx_set_host_fs_gs(dest
, src
->fs_sel
, src
->gs_sel
, src
->fs_base
, src
->gs_base
);
287 dest
->ldt_sel
= src
->ldt_sel
;
289 dest
->ds_sel
= src
->ds_sel
;
290 dest
->es_sel
= src
->es_sel
;
294 static void vmx_switch_vmcs(struct kvm_vcpu
*vcpu
, struct loaded_vmcs
*vmcs
)
296 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
297 struct loaded_vmcs
*prev
;
300 if (vmx
->loaded_vmcs
== vmcs
)
304 prev
= vmx
->loaded_vmcs
;
305 vmx
->loaded_vmcs
= vmcs
;
306 vmx_vcpu_load_vmcs(vcpu
, cpu
);
307 vmx_sync_vmcs_host_state(vmx
, prev
);
310 vmx_segment_cache_clear(vmx
);
314 * Ensure that the current vmcs of the logical processor is the
315 * vmcs01 of the vcpu before calling free_nested().
317 void nested_vmx_free_vcpu(struct kvm_vcpu
*vcpu
)
320 vmx_leave_nested(vcpu
);
321 vmx_switch_vmcs(vcpu
, &to_vmx(vcpu
)->vmcs01
);
326 static void nested_ept_inject_page_fault(struct kvm_vcpu
*vcpu
,
327 struct x86_exception
*fault
)
329 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
330 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
332 unsigned long exit_qualification
= vcpu
->arch
.exit_qualification
;
334 if (vmx
->nested
.pml_full
) {
335 exit_reason
= EXIT_REASON_PML_FULL
;
336 vmx
->nested
.pml_full
= false;
337 exit_qualification
&= INTR_INFO_UNBLOCK_NMI
;
338 } else if (fault
->error_code
& PFERR_RSVD_MASK
)
339 exit_reason
= EXIT_REASON_EPT_MISCONFIG
;
341 exit_reason
= EXIT_REASON_EPT_VIOLATION
;
343 nested_vmx_vmexit(vcpu
, exit_reason
, 0, exit_qualification
);
344 vmcs12
->guest_physical_address
= fault
->address
;
347 static void nested_ept_init_mmu_context(struct kvm_vcpu
*vcpu
)
349 WARN_ON(mmu_is_nested(vcpu
));
351 vcpu
->arch
.mmu
= &vcpu
->arch
.guest_mmu
;
352 kvm_init_shadow_ept_mmu(vcpu
,
353 to_vmx(vcpu
)->nested
.msrs
.ept_caps
&
354 VMX_EPT_EXECUTE_ONLY_BIT
,
355 nested_ept_ad_enabled(vcpu
),
356 nested_ept_get_eptp(vcpu
));
357 vcpu
->arch
.mmu
->get_guest_pgd
= nested_ept_get_eptp
;
358 vcpu
->arch
.mmu
->inject_page_fault
= nested_ept_inject_page_fault
;
359 vcpu
->arch
.mmu
->get_pdptr
= kvm_pdptr_read
;
361 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.nested_mmu
;
364 static void nested_ept_uninit_mmu_context(struct kvm_vcpu
*vcpu
)
366 vcpu
->arch
.mmu
= &vcpu
->arch
.root_mmu
;
367 vcpu
->arch
.walk_mmu
= &vcpu
->arch
.root_mmu
;
370 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12
*vmcs12
,
373 bool inequality
, bit
;
375 bit
= (vmcs12
->exception_bitmap
& (1u << PF_VECTOR
)) != 0;
377 (error_code
& vmcs12
->page_fault_error_code_mask
) !=
378 vmcs12
->page_fault_error_code_match
;
379 return inequality
^ bit
;
384 * KVM wants to inject page-faults which it got to the guest. This function
385 * checks whether in a nested guest, we need to inject them to L1 or L2.
387 static int nested_vmx_check_exception(struct kvm_vcpu
*vcpu
, unsigned long *exit_qual
)
389 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
390 unsigned int nr
= vcpu
->arch
.exception
.nr
;
391 bool has_payload
= vcpu
->arch
.exception
.has_payload
;
392 unsigned long payload
= vcpu
->arch
.exception
.payload
;
394 if (nr
== PF_VECTOR
) {
395 if (vcpu
->arch
.exception
.nested_apf
) {
396 *exit_qual
= vcpu
->arch
.apf
.nested_apf_token
;
399 if (nested_vmx_is_page_fault_vmexit(vmcs12
,
400 vcpu
->arch
.exception
.error_code
)) {
401 *exit_qual
= has_payload
? payload
: vcpu
->arch
.cr2
;
404 } else if (vmcs12
->exception_bitmap
& (1u << nr
)) {
405 if (nr
== DB_VECTOR
) {
407 payload
= vcpu
->arch
.dr6
;
408 payload
&= ~(DR6_FIXED_1
| DR6_BT
);
411 *exit_qual
= payload
;
421 static void vmx_inject_page_fault_nested(struct kvm_vcpu
*vcpu
,
422 struct x86_exception
*fault
)
424 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
426 WARN_ON(!is_guest_mode(vcpu
));
428 if (nested_vmx_is_page_fault_vmexit(vmcs12
, fault
->error_code
) &&
429 !to_vmx(vcpu
)->nested
.nested_run_pending
) {
430 vmcs12
->vm_exit_intr_error_code
= fault
->error_code
;
431 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
432 PF_VECTOR
| INTR_TYPE_HARD_EXCEPTION
|
433 INTR_INFO_DELIVER_CODE_MASK
| INTR_INFO_VALID_MASK
,
436 kvm_inject_page_fault(vcpu
, fault
);
440 static bool page_address_valid(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
442 return PAGE_ALIGNED(gpa
) && !(gpa
>> cpuid_maxphyaddr(vcpu
));
445 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu
*vcpu
,
446 struct vmcs12
*vmcs12
)
448 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
451 if (CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_a
)) ||
452 CC(!page_address_valid(vcpu
, vmcs12
->io_bitmap_b
)))
458 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu
*vcpu
,
459 struct vmcs12
*vmcs12
)
461 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
464 if (CC(!page_address_valid(vcpu
, vmcs12
->msr_bitmap
)))
470 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu
*vcpu
,
471 struct vmcs12
*vmcs12
)
473 if (!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
476 if (CC(!page_address_valid(vcpu
, vmcs12
->virtual_apic_page_addr
)))
483 * Check if MSR is intercepted for L01 MSR bitmap.
485 static bool msr_write_intercepted_l01(struct kvm_vcpu
*vcpu
, u32 msr
)
487 unsigned long *msr_bitmap
;
488 int f
= sizeof(unsigned long);
490 if (!cpu_has_vmx_msr_bitmap())
493 msr_bitmap
= to_vmx(vcpu
)->vmcs01
.msr_bitmap
;
496 return !!test_bit(msr
, msr_bitmap
+ 0x800 / f
);
497 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
499 return !!test_bit(msr
, msr_bitmap
+ 0xc00 / f
);
506 * If a msr is allowed by L0, we should check whether it is allowed by L1.
507 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
509 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1
,
510 unsigned long *msr_bitmap_nested
,
513 int f
= sizeof(unsigned long);
516 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
517 * have the write-low and read-high bitmap offsets the wrong way round.
518 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
521 if (type
& MSR_TYPE_R
&&
522 !test_bit(msr
, msr_bitmap_l1
+ 0x000 / f
))
524 __clear_bit(msr
, msr_bitmap_nested
+ 0x000 / f
);
526 if (type
& MSR_TYPE_W
&&
527 !test_bit(msr
, msr_bitmap_l1
+ 0x800 / f
))
529 __clear_bit(msr
, msr_bitmap_nested
+ 0x800 / f
);
531 } else if ((msr
>= 0xc0000000) && (msr
<= 0xc0001fff)) {
533 if (type
& MSR_TYPE_R
&&
534 !test_bit(msr
, msr_bitmap_l1
+ 0x400 / f
))
536 __clear_bit(msr
, msr_bitmap_nested
+ 0x400 / f
);
538 if (type
& MSR_TYPE_W
&&
539 !test_bit(msr
, msr_bitmap_l1
+ 0xc00 / f
))
541 __clear_bit(msr
, msr_bitmap_nested
+ 0xc00 / f
);
546 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap
)
550 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
551 unsigned word
= msr
/ BITS_PER_LONG
;
553 msr_bitmap
[word
] = ~0;
554 msr_bitmap
[word
+ (0x800 / sizeof(long))] = ~0;
559 * Merge L0's and L1's MSR bitmap, return false to indicate that
560 * we do not use the hardware.
562 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu
*vcpu
,
563 struct vmcs12
*vmcs12
)
566 unsigned long *msr_bitmap_l1
;
567 unsigned long *msr_bitmap_l0
= to_vmx(vcpu
)->nested
.vmcs02
.msr_bitmap
;
568 struct kvm_host_map
*map
= &to_vmx(vcpu
)->nested
.msr_bitmap_map
;
570 /* Nothing to do if the MSR bitmap is not in use. */
571 if (!cpu_has_vmx_msr_bitmap() ||
572 !nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
575 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->msr_bitmap
), map
))
578 msr_bitmap_l1
= (unsigned long *)map
->hva
;
581 * To keep the control flow simple, pay eight 8-byte writes (sixteen
582 * 4-byte writes on 32-bit systems) up front to enable intercepts for
583 * the x2APIC MSR range and selectively disable them below.
585 enable_x2apic_msr_intercepts(msr_bitmap_l0
);
587 if (nested_cpu_has_virt_x2apic_mode(vmcs12
)) {
588 if (nested_cpu_has_apic_reg_virt(vmcs12
)) {
590 * L0 need not intercept reads for MSRs between 0x800
591 * and 0x8ff, it just lets the processor take the value
592 * from the virtual-APIC page; take those 256 bits
593 * directly from the L1 bitmap.
595 for (msr
= 0x800; msr
<= 0x8ff; msr
+= BITS_PER_LONG
) {
596 unsigned word
= msr
/ BITS_PER_LONG
;
598 msr_bitmap_l0
[word
] = msr_bitmap_l1
[word
];
602 nested_vmx_disable_intercept_for_msr(
603 msr_bitmap_l1
, msr_bitmap_l0
,
604 X2APIC_MSR(APIC_TASKPRI
),
605 MSR_TYPE_R
| MSR_TYPE_W
);
607 if (nested_cpu_has_vid(vmcs12
)) {
608 nested_vmx_disable_intercept_for_msr(
609 msr_bitmap_l1
, msr_bitmap_l0
,
610 X2APIC_MSR(APIC_EOI
),
612 nested_vmx_disable_intercept_for_msr(
613 msr_bitmap_l1
, msr_bitmap_l0
,
614 X2APIC_MSR(APIC_SELF_IPI
),
619 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
620 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
621 MSR_FS_BASE
, MSR_TYPE_RW
);
623 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
624 MSR_GS_BASE
, MSR_TYPE_RW
);
626 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1
, msr_bitmap_l0
,
627 MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
630 * Checking the L0->L1 bitmap is trying to verify two things:
632 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
633 * ensures that we do not accidentally generate an L02 MSR bitmap
634 * from the L12 MSR bitmap that is too permissive.
635 * 2. That L1 or L2s have actually used the MSR. This avoids
636 * unnecessarily merging of the bitmap if the MSR is unused. This
637 * works properly because we only update the L01 MSR bitmap lazily.
638 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
639 * updated to reflect this when L1 (or its L2s) actually write to
642 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_SPEC_CTRL
))
643 nested_vmx_disable_intercept_for_msr(
644 msr_bitmap_l1
, msr_bitmap_l0
,
646 MSR_TYPE_R
| MSR_TYPE_W
);
648 if (!msr_write_intercepted_l01(vcpu
, MSR_IA32_PRED_CMD
))
649 nested_vmx_disable_intercept_for_msr(
650 msr_bitmap_l1
, msr_bitmap_l0
,
654 kvm_vcpu_unmap(vcpu
, &to_vmx(vcpu
)->nested
.msr_bitmap_map
, false);
659 static void nested_cache_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
660 struct vmcs12
*vmcs12
)
662 struct kvm_host_map map
;
663 struct vmcs12
*shadow
;
665 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
666 vmcs12
->vmcs_link_pointer
== -1ull)
669 shadow
= get_shadow_vmcs12(vcpu
);
671 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
))
674 memcpy(shadow
, map
.hva
, VMCS12_SIZE
);
675 kvm_vcpu_unmap(vcpu
, &map
, false);
678 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu
*vcpu
,
679 struct vmcs12
*vmcs12
)
681 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
683 if (!nested_cpu_has_shadow_vmcs(vmcs12
) ||
684 vmcs12
->vmcs_link_pointer
== -1ull)
687 kvm_write_guest(vmx
->vcpu
.kvm
, vmcs12
->vmcs_link_pointer
,
688 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
);
692 * In nested virtualization, check if L1 has set
693 * VM_EXIT_ACK_INTR_ON_EXIT
695 static bool nested_exit_intr_ack_set(struct kvm_vcpu
*vcpu
)
697 return get_vmcs12(vcpu
)->vm_exit_controls
&
698 VM_EXIT_ACK_INTR_ON_EXIT
;
701 static bool nested_exit_on_nmi(struct kvm_vcpu
*vcpu
)
703 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu
));
706 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu
*vcpu
,
707 struct vmcs12
*vmcs12
)
709 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
) &&
710 CC(!page_address_valid(vcpu
, vmcs12
->apic_access_addr
)))
716 static int nested_vmx_check_apicv_controls(struct kvm_vcpu
*vcpu
,
717 struct vmcs12
*vmcs12
)
719 if (!nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
720 !nested_cpu_has_apic_reg_virt(vmcs12
) &&
721 !nested_cpu_has_vid(vmcs12
) &&
722 !nested_cpu_has_posted_intr(vmcs12
))
726 * If virtualize x2apic mode is enabled,
727 * virtualize apic access must be disabled.
729 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12
) &&
730 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)))
734 * If virtual interrupt delivery is enabled,
735 * we must exit on external interrupts.
737 if (CC(nested_cpu_has_vid(vmcs12
) && !nested_exit_on_intr(vcpu
)))
741 * bits 15:8 should be zero in posted_intr_nv,
742 * the descriptor address has been already checked
743 * in nested_get_vmcs12_pages.
745 * bits 5:0 of posted_intr_desc_addr should be zero.
747 if (nested_cpu_has_posted_intr(vmcs12
) &&
748 (CC(!nested_cpu_has_vid(vmcs12
)) ||
749 CC(!nested_exit_intr_ack_set(vcpu
)) ||
750 CC((vmcs12
->posted_intr_nv
& 0xff00)) ||
751 CC((vmcs12
->posted_intr_desc_addr
& 0x3f)) ||
752 CC((vmcs12
->posted_intr_desc_addr
>> cpuid_maxphyaddr(vcpu
)))))
755 /* tpr shadow is needed by all apicv features. */
756 if (CC(!nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)))
762 static int nested_vmx_check_msr_switch(struct kvm_vcpu
*vcpu
,
769 maxphyaddr
= cpuid_maxphyaddr(vcpu
);
770 if (!IS_ALIGNED(addr
, 16) || addr
>> maxphyaddr
||
771 (addr
+ count
* sizeof(struct vmx_msr_entry
) - 1) >> maxphyaddr
)
777 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu
*vcpu
,
778 struct vmcs12
*vmcs12
)
780 if (CC(nested_vmx_check_msr_switch(vcpu
,
781 vmcs12
->vm_exit_msr_load_count
,
782 vmcs12
->vm_exit_msr_load_addr
)) ||
783 CC(nested_vmx_check_msr_switch(vcpu
,
784 vmcs12
->vm_exit_msr_store_count
,
785 vmcs12
->vm_exit_msr_store_addr
)))
791 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu
*vcpu
,
792 struct vmcs12
*vmcs12
)
794 if (CC(nested_vmx_check_msr_switch(vcpu
,
795 vmcs12
->vm_entry_msr_load_count
,
796 vmcs12
->vm_entry_msr_load_addr
)))
802 static int nested_vmx_check_pml_controls(struct kvm_vcpu
*vcpu
,
803 struct vmcs12
*vmcs12
)
805 if (!nested_cpu_has_pml(vmcs12
))
808 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
809 CC(!page_address_valid(vcpu
, vmcs12
->pml_address
)))
815 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu
*vcpu
,
816 struct vmcs12
*vmcs12
)
818 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_UNRESTRICTED_GUEST
) &&
819 !nested_cpu_has_ept(vmcs12
)))
824 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu
*vcpu
,
825 struct vmcs12
*vmcs12
)
827 if (CC(nested_cpu_has2(vmcs12
, SECONDARY_EXEC_MODE_BASED_EPT_EXEC
) &&
828 !nested_cpu_has_ept(vmcs12
)))
833 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu
*vcpu
,
834 struct vmcs12
*vmcs12
)
836 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
839 if (CC(!page_address_valid(vcpu
, vmcs12
->vmread_bitmap
)) ||
840 CC(!page_address_valid(vcpu
, vmcs12
->vmwrite_bitmap
)))
846 static int nested_vmx_msr_check_common(struct kvm_vcpu
*vcpu
,
847 struct vmx_msr_entry
*e
)
849 /* x2APIC MSR accesses are not allowed */
850 if (CC(vcpu
->arch
.apic_base
& X2APIC_ENABLE
&& e
->index
>> 8 == 0x8))
852 if (CC(e
->index
== MSR_IA32_UCODE_WRITE
) || /* SDM Table 35-2 */
853 CC(e
->index
== MSR_IA32_UCODE_REV
))
855 if (CC(e
->reserved
!= 0))
860 static int nested_vmx_load_msr_check(struct kvm_vcpu
*vcpu
,
861 struct vmx_msr_entry
*e
)
863 if (CC(e
->index
== MSR_FS_BASE
) ||
864 CC(e
->index
== MSR_GS_BASE
) ||
865 CC(e
->index
== MSR_IA32_SMM_MONITOR_CTL
) || /* SMM is not supported */
866 nested_vmx_msr_check_common(vcpu
, e
))
871 static int nested_vmx_store_msr_check(struct kvm_vcpu
*vcpu
,
872 struct vmx_msr_entry
*e
)
874 if (CC(e
->index
== MSR_IA32_SMBASE
) || /* SMM is not supported */
875 nested_vmx_msr_check_common(vcpu
, e
))
880 static u32
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu
*vcpu
)
882 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
883 u64 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
884 vmx
->nested
.msrs
.misc_high
);
886 return (vmx_misc_max_msr(vmx_misc
) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER
;
890 * Load guest's/host's msr at nested entry/exit.
891 * return 0 for success, entry index for failure.
893 * One of the failure modes for MSR load/store is when a list exceeds the
894 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
895 * as possible, process all valid entries before failing rather than precheck
896 * for a capacity violation.
898 static u32
nested_vmx_load_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
901 struct vmx_msr_entry e
;
902 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
904 for (i
= 0; i
< count
; i
++) {
905 if (unlikely(i
>= max_msr_list_size
))
908 if (kvm_vcpu_read_guest(vcpu
, gpa
+ i
* sizeof(e
),
910 pr_debug_ratelimited(
911 "%s cannot read MSR entry (%u, 0x%08llx)\n",
912 __func__
, i
, gpa
+ i
* sizeof(e
));
915 if (nested_vmx_load_msr_check(vcpu
, &e
)) {
916 pr_debug_ratelimited(
917 "%s check failed (%u, 0x%x, 0x%x)\n",
918 __func__
, i
, e
.index
, e
.reserved
);
921 if (kvm_set_msr(vcpu
, e
.index
, e
.value
)) {
922 pr_debug_ratelimited(
923 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
924 __func__
, i
, e
.index
, e
.value
);
933 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu
*vcpu
,
937 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
940 * If the L0 hypervisor stored a more accurate value for the TSC that
941 * does not include the time taken for emulation of the L2->L1
942 * VM-exit in L0, use the more accurate value.
944 if (msr_index
== MSR_IA32_TSC
) {
945 int index
= vmx_find_msr_index(&vmx
->msr_autostore
.guest
,
949 u64 val
= vmx
->msr_autostore
.guest
.val
[index
].value
;
951 *data
= kvm_read_l1_tsc(vcpu
, val
);
956 if (kvm_get_msr(vcpu
, msr_index
, data
)) {
957 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__
,
964 static bool read_and_check_msr_entry(struct kvm_vcpu
*vcpu
, u64 gpa
, int i
,
965 struct vmx_msr_entry
*e
)
967 if (kvm_vcpu_read_guest(vcpu
,
968 gpa
+ i
* sizeof(*e
),
969 e
, 2 * sizeof(u32
))) {
970 pr_debug_ratelimited(
971 "%s cannot read MSR entry (%u, 0x%08llx)\n",
972 __func__
, i
, gpa
+ i
* sizeof(*e
));
975 if (nested_vmx_store_msr_check(vcpu
, e
)) {
976 pr_debug_ratelimited(
977 "%s check failed (%u, 0x%x, 0x%x)\n",
978 __func__
, i
, e
->index
, e
->reserved
);
984 static int nested_vmx_store_msr(struct kvm_vcpu
*vcpu
, u64 gpa
, u32 count
)
988 struct vmx_msr_entry e
;
989 u32 max_msr_list_size
= nested_vmx_max_atomic_switch_msrs(vcpu
);
991 for (i
= 0; i
< count
; i
++) {
992 if (unlikely(i
>= max_msr_list_size
))
995 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
998 if (!nested_vmx_get_vmexit_msr_value(vcpu
, e
.index
, &data
))
1001 if (kvm_vcpu_write_guest(vcpu
,
1002 gpa
+ i
* sizeof(e
) +
1003 offsetof(struct vmx_msr_entry
, value
),
1004 &data
, sizeof(data
))) {
1005 pr_debug_ratelimited(
1006 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1007 __func__
, i
, e
.index
, data
);
1014 static bool nested_msr_store_list_has_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
)
1016 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1017 u32 count
= vmcs12
->vm_exit_msr_store_count
;
1018 u64 gpa
= vmcs12
->vm_exit_msr_store_addr
;
1019 struct vmx_msr_entry e
;
1022 for (i
= 0; i
< count
; i
++) {
1023 if (!read_and_check_msr_entry(vcpu
, gpa
, i
, &e
))
1026 if (e
.index
== msr_index
)
1032 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu
*vcpu
,
1035 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1036 struct vmx_msrs
*autostore
= &vmx
->msr_autostore
.guest
;
1037 bool in_vmcs12_store_list
;
1038 int msr_autostore_index
;
1039 bool in_autostore_list
;
1042 msr_autostore_index
= vmx_find_msr_index(autostore
, msr_index
);
1043 in_autostore_list
= msr_autostore_index
>= 0;
1044 in_vmcs12_store_list
= nested_msr_store_list_has_msr(vcpu
, msr_index
);
1046 if (in_vmcs12_store_list
&& !in_autostore_list
) {
1047 if (autostore
->nr
== NR_LOADSTORE_MSRS
) {
1049 * Emulated VMEntry does not fail here. Instead a less
1050 * accurate value will be returned by
1051 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1052 * instead of reading the value from the vmcs02 VMExit
1055 pr_warn_ratelimited(
1056 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1060 last
= autostore
->nr
++;
1061 autostore
->val
[last
].index
= msr_index
;
1062 } else if (!in_vmcs12_store_list
&& in_autostore_list
) {
1063 last
= --autostore
->nr
;
1064 autostore
->val
[msr_autostore_index
] = autostore
->val
[last
];
1068 static bool nested_cr3_valid(struct kvm_vcpu
*vcpu
, unsigned long val
)
1070 unsigned long invalid_mask
;
1072 invalid_mask
= (~0ULL) << cpuid_maxphyaddr(vcpu
);
1073 return (val
& invalid_mask
) == 0;
1077 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1078 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1079 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1080 * @entry_failure_code.
1082 static int nested_vmx_load_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
, bool nested_ept
,
1083 u32
*entry_failure_code
)
1085 if (cr3
!= kvm_read_cr3(vcpu
) || (!nested_ept
&& pdptrs_changed(vcpu
))) {
1086 if (CC(!nested_cr3_valid(vcpu
, cr3
))) {
1087 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
1092 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1093 * must not be dereferenced.
1095 if (is_pae_paging(vcpu
) && !nested_ept
) {
1096 if (CC(!load_pdptrs(vcpu
, vcpu
->arch
.walk_mmu
, cr3
))) {
1097 *entry_failure_code
= ENTRY_FAIL_PDPTE
;
1104 kvm_mmu_new_cr3(vcpu
, cr3
, false);
1106 vcpu
->arch
.cr3
= cr3
;
1107 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
1109 kvm_init_mmu(vcpu
, false);
1115 * Returns if KVM is able to config CPU to tag TLB entries
1116 * populated by L2 differently than TLB entries populated
1119 * If L0 uses EPT, L1 and L2 run with different EPTP because
1120 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1121 * are tagged with different EPTP.
1123 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1124 * with different VPID (L1 entries are tagged with vmx->vpid
1125 * while L2 entries are tagged with vmx->nested.vpid02).
1127 static bool nested_has_guest_tlb_tag(struct kvm_vcpu
*vcpu
)
1129 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1131 return enable_ept
||
1132 (nested_cpu_has_vpid(vmcs12
) && to_vmx(vcpu
)->nested
.vpid02
);
1135 static u16
nested_get_vpid02(struct kvm_vcpu
*vcpu
)
1137 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1139 return vmx
->nested
.vpid02
? vmx
->nested
.vpid02
: vmx
->vpid
;
1142 static bool is_bitwise_subset(u64 superset
, u64 subset
, u64 mask
)
1147 return (superset
| subset
) == superset
;
1150 static int vmx_restore_vmx_basic(struct vcpu_vmx
*vmx
, u64 data
)
1152 const u64 feature_and_reserved
=
1153 /* feature (except bit 48; see below) */
1154 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1156 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1157 u64 vmx_basic
= vmx
->nested
.msrs
.basic
;
1159 if (!is_bitwise_subset(vmx_basic
, data
, feature_and_reserved
))
1163 * KVM does not emulate a version of VMX that constrains physical
1164 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1166 if (data
& BIT_ULL(48))
1169 if (vmx_basic_vmcs_revision_id(vmx_basic
) !=
1170 vmx_basic_vmcs_revision_id(data
))
1173 if (vmx_basic_vmcs_size(vmx_basic
) > vmx_basic_vmcs_size(data
))
1176 vmx
->nested
.msrs
.basic
= data
;
1181 vmx_restore_control_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1186 switch (msr_index
) {
1187 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1188 lowp
= &vmx
->nested
.msrs
.pinbased_ctls_low
;
1189 highp
= &vmx
->nested
.msrs
.pinbased_ctls_high
;
1191 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1192 lowp
= &vmx
->nested
.msrs
.procbased_ctls_low
;
1193 highp
= &vmx
->nested
.msrs
.procbased_ctls_high
;
1195 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1196 lowp
= &vmx
->nested
.msrs
.exit_ctls_low
;
1197 highp
= &vmx
->nested
.msrs
.exit_ctls_high
;
1199 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1200 lowp
= &vmx
->nested
.msrs
.entry_ctls_low
;
1201 highp
= &vmx
->nested
.msrs
.entry_ctls_high
;
1203 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1204 lowp
= &vmx
->nested
.msrs
.secondary_ctls_low
;
1205 highp
= &vmx
->nested
.msrs
.secondary_ctls_high
;
1211 supported
= vmx_control_msr(*lowp
, *highp
);
1213 /* Check must-be-1 bits are still 1. */
1214 if (!is_bitwise_subset(data
, supported
, GENMASK_ULL(31, 0)))
1217 /* Check must-be-0 bits are still 0. */
1218 if (!is_bitwise_subset(supported
, data
, GENMASK_ULL(63, 32)))
1222 *highp
= data
>> 32;
1226 static int vmx_restore_vmx_misc(struct vcpu_vmx
*vmx
, u64 data
)
1228 const u64 feature_and_reserved_bits
=
1230 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1231 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1233 GENMASK_ULL(13, 9) | BIT_ULL(31);
1236 vmx_misc
= vmx_control_msr(vmx
->nested
.msrs
.misc_low
,
1237 vmx
->nested
.msrs
.misc_high
);
1239 if (!is_bitwise_subset(vmx_misc
, data
, feature_and_reserved_bits
))
1242 if ((vmx
->nested
.msrs
.pinbased_ctls_high
&
1243 PIN_BASED_VMX_PREEMPTION_TIMER
) &&
1244 vmx_misc_preemption_timer_rate(data
) !=
1245 vmx_misc_preemption_timer_rate(vmx_misc
))
1248 if (vmx_misc_cr3_count(data
) > vmx_misc_cr3_count(vmx_misc
))
1251 if (vmx_misc_max_msr(data
) > vmx_misc_max_msr(vmx_misc
))
1254 if (vmx_misc_mseg_revid(data
) != vmx_misc_mseg_revid(vmx_misc
))
1257 vmx
->nested
.msrs
.misc_low
= data
;
1258 vmx
->nested
.msrs
.misc_high
= data
>> 32;
1263 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx
*vmx
, u64 data
)
1265 u64 vmx_ept_vpid_cap
;
1267 vmx_ept_vpid_cap
= vmx_control_msr(vmx
->nested
.msrs
.ept_caps
,
1268 vmx
->nested
.msrs
.vpid_caps
);
1270 /* Every bit is either reserved or a feature bit. */
1271 if (!is_bitwise_subset(vmx_ept_vpid_cap
, data
, -1ULL))
1274 vmx
->nested
.msrs
.ept_caps
= data
;
1275 vmx
->nested
.msrs
.vpid_caps
= data
>> 32;
1279 static int vmx_restore_fixed0_msr(struct vcpu_vmx
*vmx
, u32 msr_index
, u64 data
)
1283 switch (msr_index
) {
1284 case MSR_IA32_VMX_CR0_FIXED0
:
1285 msr
= &vmx
->nested
.msrs
.cr0_fixed0
;
1287 case MSR_IA32_VMX_CR4_FIXED0
:
1288 msr
= &vmx
->nested
.msrs
.cr4_fixed0
;
1295 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1296 * must be 1 in the restored value.
1298 if (!is_bitwise_subset(data
, *msr
, -1ULL))
1306 * Called when userspace is restoring VMX MSRs.
1308 * Returns 0 on success, non-0 otherwise.
1310 int vmx_set_vmx_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1312 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1315 * Don't allow changes to the VMX capability MSRs while the vCPU
1316 * is in VMX operation.
1318 if (vmx
->nested
.vmxon
)
1321 switch (msr_index
) {
1322 case MSR_IA32_VMX_BASIC
:
1323 return vmx_restore_vmx_basic(vmx
, data
);
1324 case MSR_IA32_VMX_PINBASED_CTLS
:
1325 case MSR_IA32_VMX_PROCBASED_CTLS
:
1326 case MSR_IA32_VMX_EXIT_CTLS
:
1327 case MSR_IA32_VMX_ENTRY_CTLS
:
1329 * The "non-true" VMX capability MSRs are generated from the
1330 * "true" MSRs, so we do not support restoring them directly.
1332 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1333 * should restore the "true" MSRs with the must-be-1 bits
1334 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1335 * DEFAULT SETTINGS".
1338 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1339 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1340 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1341 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1342 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1343 return vmx_restore_control_msr(vmx
, msr_index
, data
);
1344 case MSR_IA32_VMX_MISC
:
1345 return vmx_restore_vmx_misc(vmx
, data
);
1346 case MSR_IA32_VMX_CR0_FIXED0
:
1347 case MSR_IA32_VMX_CR4_FIXED0
:
1348 return vmx_restore_fixed0_msr(vmx
, msr_index
, data
);
1349 case MSR_IA32_VMX_CR0_FIXED1
:
1350 case MSR_IA32_VMX_CR4_FIXED1
:
1352 * These MSRs are generated based on the vCPU's CPUID, so we
1353 * do not support restoring them directly.
1356 case MSR_IA32_VMX_EPT_VPID_CAP
:
1357 return vmx_restore_vmx_ept_vpid_cap(vmx
, data
);
1358 case MSR_IA32_VMX_VMCS_ENUM
:
1359 vmx
->nested
.msrs
.vmcs_enum
= data
;
1361 case MSR_IA32_VMX_VMFUNC
:
1362 if (data
& ~vmx
->nested
.msrs
.vmfunc_controls
)
1364 vmx
->nested
.msrs
.vmfunc_controls
= data
;
1368 * The rest of the VMX capability MSRs do not support restore.
1374 /* Returns 0 on success, non-0 otherwise. */
1375 int vmx_get_vmx_msr(struct nested_vmx_msrs
*msrs
, u32 msr_index
, u64
*pdata
)
1377 switch (msr_index
) {
1378 case MSR_IA32_VMX_BASIC
:
1379 *pdata
= msrs
->basic
;
1381 case MSR_IA32_VMX_TRUE_PINBASED_CTLS
:
1382 case MSR_IA32_VMX_PINBASED_CTLS
:
1383 *pdata
= vmx_control_msr(
1384 msrs
->pinbased_ctls_low
,
1385 msrs
->pinbased_ctls_high
);
1386 if (msr_index
== MSR_IA32_VMX_PINBASED_CTLS
)
1387 *pdata
|= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1389 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS
:
1390 case MSR_IA32_VMX_PROCBASED_CTLS
:
1391 *pdata
= vmx_control_msr(
1392 msrs
->procbased_ctls_low
,
1393 msrs
->procbased_ctls_high
);
1394 if (msr_index
== MSR_IA32_VMX_PROCBASED_CTLS
)
1395 *pdata
|= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
1397 case MSR_IA32_VMX_TRUE_EXIT_CTLS
:
1398 case MSR_IA32_VMX_EXIT_CTLS
:
1399 *pdata
= vmx_control_msr(
1400 msrs
->exit_ctls_low
,
1401 msrs
->exit_ctls_high
);
1402 if (msr_index
== MSR_IA32_VMX_EXIT_CTLS
)
1403 *pdata
|= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
1405 case MSR_IA32_VMX_TRUE_ENTRY_CTLS
:
1406 case MSR_IA32_VMX_ENTRY_CTLS
:
1407 *pdata
= vmx_control_msr(
1408 msrs
->entry_ctls_low
,
1409 msrs
->entry_ctls_high
);
1410 if (msr_index
== MSR_IA32_VMX_ENTRY_CTLS
)
1411 *pdata
|= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
1413 case MSR_IA32_VMX_MISC
:
1414 *pdata
= vmx_control_msr(
1418 case MSR_IA32_VMX_CR0_FIXED0
:
1419 *pdata
= msrs
->cr0_fixed0
;
1421 case MSR_IA32_VMX_CR0_FIXED1
:
1422 *pdata
= msrs
->cr0_fixed1
;
1424 case MSR_IA32_VMX_CR4_FIXED0
:
1425 *pdata
= msrs
->cr4_fixed0
;
1427 case MSR_IA32_VMX_CR4_FIXED1
:
1428 *pdata
= msrs
->cr4_fixed1
;
1430 case MSR_IA32_VMX_VMCS_ENUM
:
1431 *pdata
= msrs
->vmcs_enum
;
1433 case MSR_IA32_VMX_PROCBASED_CTLS2
:
1434 *pdata
= vmx_control_msr(
1435 msrs
->secondary_ctls_low
,
1436 msrs
->secondary_ctls_high
);
1438 case MSR_IA32_VMX_EPT_VPID_CAP
:
1439 *pdata
= msrs
->ept_caps
|
1440 ((u64
)msrs
->vpid_caps
<< 32);
1442 case MSR_IA32_VMX_VMFUNC
:
1443 *pdata
= msrs
->vmfunc_controls
;
1453 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1454 * been modified by the L1 guest. Note, "writable" in this context means
1455 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1456 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1457 * VM-exit information fields (which are actually writable if the vCPU is
1458 * configured to support "VMWRITE to any supported field in the VMCS").
1460 static void copy_shadow_to_vmcs12(struct vcpu_vmx
*vmx
)
1462 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1463 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1464 struct shadow_vmcs_field field
;
1468 if (WARN_ON(!shadow_vmcs
))
1473 vmcs_load(shadow_vmcs
);
1475 for (i
= 0; i
< max_shadow_read_write_fields
; i
++) {
1476 field
= shadow_read_write_fields
[i
];
1477 val
= __vmcs_readl(field
.encoding
);
1478 vmcs12_write_any(vmcs12
, field
.encoding
, field
.offset
, val
);
1481 vmcs_clear(shadow_vmcs
);
1482 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1487 static void copy_vmcs12_to_shadow(struct vcpu_vmx
*vmx
)
1489 const struct shadow_vmcs_field
*fields
[] = {
1490 shadow_read_write_fields
,
1491 shadow_read_only_fields
1493 const int max_fields
[] = {
1494 max_shadow_read_write_fields
,
1495 max_shadow_read_only_fields
1497 struct vmcs
*shadow_vmcs
= vmx
->vmcs01
.shadow_vmcs
;
1498 struct vmcs12
*vmcs12
= get_vmcs12(&vmx
->vcpu
);
1499 struct shadow_vmcs_field field
;
1503 if (WARN_ON(!shadow_vmcs
))
1506 vmcs_load(shadow_vmcs
);
1508 for (q
= 0; q
< ARRAY_SIZE(fields
); q
++) {
1509 for (i
= 0; i
< max_fields
[q
]; i
++) {
1510 field
= fields
[q
][i
];
1511 val
= vmcs12_read_any(vmcs12
, field
.encoding
,
1513 __vmcs_writel(field
.encoding
, val
);
1517 vmcs_clear(shadow_vmcs
);
1518 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1521 static int copy_enlightened_to_vmcs12(struct vcpu_vmx
*vmx
)
1523 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1524 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1526 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1527 vmcs12
->tpr_threshold
= evmcs
->tpr_threshold
;
1528 vmcs12
->guest_rip
= evmcs
->guest_rip
;
1530 if (unlikely(!(evmcs
->hv_clean_fields
&
1531 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC
))) {
1532 vmcs12
->guest_rsp
= evmcs
->guest_rsp
;
1533 vmcs12
->guest_rflags
= evmcs
->guest_rflags
;
1534 vmcs12
->guest_interruptibility_info
=
1535 evmcs
->guest_interruptibility_info
;
1538 if (unlikely(!(evmcs
->hv_clean_fields
&
1539 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC
))) {
1540 vmcs12
->cpu_based_vm_exec_control
=
1541 evmcs
->cpu_based_vm_exec_control
;
1544 if (unlikely(!(evmcs
->hv_clean_fields
&
1545 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN
))) {
1546 vmcs12
->exception_bitmap
= evmcs
->exception_bitmap
;
1549 if (unlikely(!(evmcs
->hv_clean_fields
&
1550 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY
))) {
1551 vmcs12
->vm_entry_controls
= evmcs
->vm_entry_controls
;
1554 if (unlikely(!(evmcs
->hv_clean_fields
&
1555 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT
))) {
1556 vmcs12
->vm_entry_intr_info_field
=
1557 evmcs
->vm_entry_intr_info_field
;
1558 vmcs12
->vm_entry_exception_error_code
=
1559 evmcs
->vm_entry_exception_error_code
;
1560 vmcs12
->vm_entry_instruction_len
=
1561 evmcs
->vm_entry_instruction_len
;
1564 if (unlikely(!(evmcs
->hv_clean_fields
&
1565 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1
))) {
1566 vmcs12
->host_ia32_pat
= evmcs
->host_ia32_pat
;
1567 vmcs12
->host_ia32_efer
= evmcs
->host_ia32_efer
;
1568 vmcs12
->host_cr0
= evmcs
->host_cr0
;
1569 vmcs12
->host_cr3
= evmcs
->host_cr3
;
1570 vmcs12
->host_cr4
= evmcs
->host_cr4
;
1571 vmcs12
->host_ia32_sysenter_esp
= evmcs
->host_ia32_sysenter_esp
;
1572 vmcs12
->host_ia32_sysenter_eip
= evmcs
->host_ia32_sysenter_eip
;
1573 vmcs12
->host_rip
= evmcs
->host_rip
;
1574 vmcs12
->host_ia32_sysenter_cs
= evmcs
->host_ia32_sysenter_cs
;
1575 vmcs12
->host_es_selector
= evmcs
->host_es_selector
;
1576 vmcs12
->host_cs_selector
= evmcs
->host_cs_selector
;
1577 vmcs12
->host_ss_selector
= evmcs
->host_ss_selector
;
1578 vmcs12
->host_ds_selector
= evmcs
->host_ds_selector
;
1579 vmcs12
->host_fs_selector
= evmcs
->host_fs_selector
;
1580 vmcs12
->host_gs_selector
= evmcs
->host_gs_selector
;
1581 vmcs12
->host_tr_selector
= evmcs
->host_tr_selector
;
1584 if (unlikely(!(evmcs
->hv_clean_fields
&
1585 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1
))) {
1586 vmcs12
->pin_based_vm_exec_control
=
1587 evmcs
->pin_based_vm_exec_control
;
1588 vmcs12
->vm_exit_controls
= evmcs
->vm_exit_controls
;
1589 vmcs12
->secondary_vm_exec_control
=
1590 evmcs
->secondary_vm_exec_control
;
1593 if (unlikely(!(evmcs
->hv_clean_fields
&
1594 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP
))) {
1595 vmcs12
->io_bitmap_a
= evmcs
->io_bitmap_a
;
1596 vmcs12
->io_bitmap_b
= evmcs
->io_bitmap_b
;
1599 if (unlikely(!(evmcs
->hv_clean_fields
&
1600 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP
))) {
1601 vmcs12
->msr_bitmap
= evmcs
->msr_bitmap
;
1604 if (unlikely(!(evmcs
->hv_clean_fields
&
1605 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
))) {
1606 vmcs12
->guest_es_base
= evmcs
->guest_es_base
;
1607 vmcs12
->guest_cs_base
= evmcs
->guest_cs_base
;
1608 vmcs12
->guest_ss_base
= evmcs
->guest_ss_base
;
1609 vmcs12
->guest_ds_base
= evmcs
->guest_ds_base
;
1610 vmcs12
->guest_fs_base
= evmcs
->guest_fs_base
;
1611 vmcs12
->guest_gs_base
= evmcs
->guest_gs_base
;
1612 vmcs12
->guest_ldtr_base
= evmcs
->guest_ldtr_base
;
1613 vmcs12
->guest_tr_base
= evmcs
->guest_tr_base
;
1614 vmcs12
->guest_gdtr_base
= evmcs
->guest_gdtr_base
;
1615 vmcs12
->guest_idtr_base
= evmcs
->guest_idtr_base
;
1616 vmcs12
->guest_es_limit
= evmcs
->guest_es_limit
;
1617 vmcs12
->guest_cs_limit
= evmcs
->guest_cs_limit
;
1618 vmcs12
->guest_ss_limit
= evmcs
->guest_ss_limit
;
1619 vmcs12
->guest_ds_limit
= evmcs
->guest_ds_limit
;
1620 vmcs12
->guest_fs_limit
= evmcs
->guest_fs_limit
;
1621 vmcs12
->guest_gs_limit
= evmcs
->guest_gs_limit
;
1622 vmcs12
->guest_ldtr_limit
= evmcs
->guest_ldtr_limit
;
1623 vmcs12
->guest_tr_limit
= evmcs
->guest_tr_limit
;
1624 vmcs12
->guest_gdtr_limit
= evmcs
->guest_gdtr_limit
;
1625 vmcs12
->guest_idtr_limit
= evmcs
->guest_idtr_limit
;
1626 vmcs12
->guest_es_ar_bytes
= evmcs
->guest_es_ar_bytes
;
1627 vmcs12
->guest_cs_ar_bytes
= evmcs
->guest_cs_ar_bytes
;
1628 vmcs12
->guest_ss_ar_bytes
= evmcs
->guest_ss_ar_bytes
;
1629 vmcs12
->guest_ds_ar_bytes
= evmcs
->guest_ds_ar_bytes
;
1630 vmcs12
->guest_fs_ar_bytes
= evmcs
->guest_fs_ar_bytes
;
1631 vmcs12
->guest_gs_ar_bytes
= evmcs
->guest_gs_ar_bytes
;
1632 vmcs12
->guest_ldtr_ar_bytes
= evmcs
->guest_ldtr_ar_bytes
;
1633 vmcs12
->guest_tr_ar_bytes
= evmcs
->guest_tr_ar_bytes
;
1634 vmcs12
->guest_es_selector
= evmcs
->guest_es_selector
;
1635 vmcs12
->guest_cs_selector
= evmcs
->guest_cs_selector
;
1636 vmcs12
->guest_ss_selector
= evmcs
->guest_ss_selector
;
1637 vmcs12
->guest_ds_selector
= evmcs
->guest_ds_selector
;
1638 vmcs12
->guest_fs_selector
= evmcs
->guest_fs_selector
;
1639 vmcs12
->guest_gs_selector
= evmcs
->guest_gs_selector
;
1640 vmcs12
->guest_ldtr_selector
= evmcs
->guest_ldtr_selector
;
1641 vmcs12
->guest_tr_selector
= evmcs
->guest_tr_selector
;
1644 if (unlikely(!(evmcs
->hv_clean_fields
&
1645 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2
))) {
1646 vmcs12
->tsc_offset
= evmcs
->tsc_offset
;
1647 vmcs12
->virtual_apic_page_addr
= evmcs
->virtual_apic_page_addr
;
1648 vmcs12
->xss_exit_bitmap
= evmcs
->xss_exit_bitmap
;
1651 if (unlikely(!(evmcs
->hv_clean_fields
&
1652 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR
))) {
1653 vmcs12
->cr0_guest_host_mask
= evmcs
->cr0_guest_host_mask
;
1654 vmcs12
->cr4_guest_host_mask
= evmcs
->cr4_guest_host_mask
;
1655 vmcs12
->cr0_read_shadow
= evmcs
->cr0_read_shadow
;
1656 vmcs12
->cr4_read_shadow
= evmcs
->cr4_read_shadow
;
1657 vmcs12
->guest_cr0
= evmcs
->guest_cr0
;
1658 vmcs12
->guest_cr3
= evmcs
->guest_cr3
;
1659 vmcs12
->guest_cr4
= evmcs
->guest_cr4
;
1660 vmcs12
->guest_dr7
= evmcs
->guest_dr7
;
1663 if (unlikely(!(evmcs
->hv_clean_fields
&
1664 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER
))) {
1665 vmcs12
->host_fs_base
= evmcs
->host_fs_base
;
1666 vmcs12
->host_gs_base
= evmcs
->host_gs_base
;
1667 vmcs12
->host_tr_base
= evmcs
->host_tr_base
;
1668 vmcs12
->host_gdtr_base
= evmcs
->host_gdtr_base
;
1669 vmcs12
->host_idtr_base
= evmcs
->host_idtr_base
;
1670 vmcs12
->host_rsp
= evmcs
->host_rsp
;
1673 if (unlikely(!(evmcs
->hv_clean_fields
&
1674 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT
))) {
1675 vmcs12
->ept_pointer
= evmcs
->ept_pointer
;
1676 vmcs12
->virtual_processor_id
= evmcs
->virtual_processor_id
;
1679 if (unlikely(!(evmcs
->hv_clean_fields
&
1680 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
))) {
1681 vmcs12
->vmcs_link_pointer
= evmcs
->vmcs_link_pointer
;
1682 vmcs12
->guest_ia32_debugctl
= evmcs
->guest_ia32_debugctl
;
1683 vmcs12
->guest_ia32_pat
= evmcs
->guest_ia32_pat
;
1684 vmcs12
->guest_ia32_efer
= evmcs
->guest_ia32_efer
;
1685 vmcs12
->guest_pdptr0
= evmcs
->guest_pdptr0
;
1686 vmcs12
->guest_pdptr1
= evmcs
->guest_pdptr1
;
1687 vmcs12
->guest_pdptr2
= evmcs
->guest_pdptr2
;
1688 vmcs12
->guest_pdptr3
= evmcs
->guest_pdptr3
;
1689 vmcs12
->guest_pending_dbg_exceptions
=
1690 evmcs
->guest_pending_dbg_exceptions
;
1691 vmcs12
->guest_sysenter_esp
= evmcs
->guest_sysenter_esp
;
1692 vmcs12
->guest_sysenter_eip
= evmcs
->guest_sysenter_eip
;
1693 vmcs12
->guest_bndcfgs
= evmcs
->guest_bndcfgs
;
1694 vmcs12
->guest_activity_state
= evmcs
->guest_activity_state
;
1695 vmcs12
->guest_sysenter_cs
= evmcs
->guest_sysenter_cs
;
1700 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1701 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1702 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1703 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1704 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1705 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1706 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1707 * vmcs12->page_fault_error_code_mask =
1708 * evmcs->page_fault_error_code_mask;
1709 * vmcs12->page_fault_error_code_match =
1710 * evmcs->page_fault_error_code_match;
1711 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1712 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1713 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1714 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1719 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1720 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1721 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1722 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1723 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1724 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1725 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1726 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1727 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1728 * vmcs12->exit_qualification = evmcs->exit_qualification;
1729 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1731 * Not present in struct vmcs12:
1732 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1733 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1734 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1735 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1741 static int copy_vmcs12_to_enlightened(struct vcpu_vmx
*vmx
)
1743 struct vmcs12
*vmcs12
= vmx
->nested
.cached_vmcs12
;
1744 struct hv_enlightened_vmcs
*evmcs
= vmx
->nested
.hv_evmcs
;
1747 * Should not be changed by KVM:
1749 * evmcs->host_es_selector = vmcs12->host_es_selector;
1750 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1751 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1752 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1753 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1754 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1755 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1756 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1757 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1758 * evmcs->host_cr0 = vmcs12->host_cr0;
1759 * evmcs->host_cr3 = vmcs12->host_cr3;
1760 * evmcs->host_cr4 = vmcs12->host_cr4;
1761 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1762 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1763 * evmcs->host_rip = vmcs12->host_rip;
1764 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1765 * evmcs->host_fs_base = vmcs12->host_fs_base;
1766 * evmcs->host_gs_base = vmcs12->host_gs_base;
1767 * evmcs->host_tr_base = vmcs12->host_tr_base;
1768 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1769 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1770 * evmcs->host_rsp = vmcs12->host_rsp;
1771 * sync_vmcs02_to_vmcs12() doesn't read these:
1772 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1773 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1774 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1775 * evmcs->ept_pointer = vmcs12->ept_pointer;
1776 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1777 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1778 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1779 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1780 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1781 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1782 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1783 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1784 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1785 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1786 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1787 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1788 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1789 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1790 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1791 * evmcs->page_fault_error_code_mask =
1792 * vmcs12->page_fault_error_code_mask;
1793 * evmcs->page_fault_error_code_match =
1794 * vmcs12->page_fault_error_code_match;
1795 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1796 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1797 * evmcs->tsc_offset = vmcs12->tsc_offset;
1798 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1799 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1800 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1801 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1802 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1803 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1804 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1805 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1807 * Not present in struct vmcs12:
1808 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1809 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1810 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1811 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1814 evmcs
->guest_es_selector
= vmcs12
->guest_es_selector
;
1815 evmcs
->guest_cs_selector
= vmcs12
->guest_cs_selector
;
1816 evmcs
->guest_ss_selector
= vmcs12
->guest_ss_selector
;
1817 evmcs
->guest_ds_selector
= vmcs12
->guest_ds_selector
;
1818 evmcs
->guest_fs_selector
= vmcs12
->guest_fs_selector
;
1819 evmcs
->guest_gs_selector
= vmcs12
->guest_gs_selector
;
1820 evmcs
->guest_ldtr_selector
= vmcs12
->guest_ldtr_selector
;
1821 evmcs
->guest_tr_selector
= vmcs12
->guest_tr_selector
;
1823 evmcs
->guest_es_limit
= vmcs12
->guest_es_limit
;
1824 evmcs
->guest_cs_limit
= vmcs12
->guest_cs_limit
;
1825 evmcs
->guest_ss_limit
= vmcs12
->guest_ss_limit
;
1826 evmcs
->guest_ds_limit
= vmcs12
->guest_ds_limit
;
1827 evmcs
->guest_fs_limit
= vmcs12
->guest_fs_limit
;
1828 evmcs
->guest_gs_limit
= vmcs12
->guest_gs_limit
;
1829 evmcs
->guest_ldtr_limit
= vmcs12
->guest_ldtr_limit
;
1830 evmcs
->guest_tr_limit
= vmcs12
->guest_tr_limit
;
1831 evmcs
->guest_gdtr_limit
= vmcs12
->guest_gdtr_limit
;
1832 evmcs
->guest_idtr_limit
= vmcs12
->guest_idtr_limit
;
1834 evmcs
->guest_es_ar_bytes
= vmcs12
->guest_es_ar_bytes
;
1835 evmcs
->guest_cs_ar_bytes
= vmcs12
->guest_cs_ar_bytes
;
1836 evmcs
->guest_ss_ar_bytes
= vmcs12
->guest_ss_ar_bytes
;
1837 evmcs
->guest_ds_ar_bytes
= vmcs12
->guest_ds_ar_bytes
;
1838 evmcs
->guest_fs_ar_bytes
= vmcs12
->guest_fs_ar_bytes
;
1839 evmcs
->guest_gs_ar_bytes
= vmcs12
->guest_gs_ar_bytes
;
1840 evmcs
->guest_ldtr_ar_bytes
= vmcs12
->guest_ldtr_ar_bytes
;
1841 evmcs
->guest_tr_ar_bytes
= vmcs12
->guest_tr_ar_bytes
;
1843 evmcs
->guest_es_base
= vmcs12
->guest_es_base
;
1844 evmcs
->guest_cs_base
= vmcs12
->guest_cs_base
;
1845 evmcs
->guest_ss_base
= vmcs12
->guest_ss_base
;
1846 evmcs
->guest_ds_base
= vmcs12
->guest_ds_base
;
1847 evmcs
->guest_fs_base
= vmcs12
->guest_fs_base
;
1848 evmcs
->guest_gs_base
= vmcs12
->guest_gs_base
;
1849 evmcs
->guest_ldtr_base
= vmcs12
->guest_ldtr_base
;
1850 evmcs
->guest_tr_base
= vmcs12
->guest_tr_base
;
1851 evmcs
->guest_gdtr_base
= vmcs12
->guest_gdtr_base
;
1852 evmcs
->guest_idtr_base
= vmcs12
->guest_idtr_base
;
1854 evmcs
->guest_ia32_pat
= vmcs12
->guest_ia32_pat
;
1855 evmcs
->guest_ia32_efer
= vmcs12
->guest_ia32_efer
;
1857 evmcs
->guest_pdptr0
= vmcs12
->guest_pdptr0
;
1858 evmcs
->guest_pdptr1
= vmcs12
->guest_pdptr1
;
1859 evmcs
->guest_pdptr2
= vmcs12
->guest_pdptr2
;
1860 evmcs
->guest_pdptr3
= vmcs12
->guest_pdptr3
;
1862 evmcs
->guest_pending_dbg_exceptions
=
1863 vmcs12
->guest_pending_dbg_exceptions
;
1864 evmcs
->guest_sysenter_esp
= vmcs12
->guest_sysenter_esp
;
1865 evmcs
->guest_sysenter_eip
= vmcs12
->guest_sysenter_eip
;
1867 evmcs
->guest_activity_state
= vmcs12
->guest_activity_state
;
1868 evmcs
->guest_sysenter_cs
= vmcs12
->guest_sysenter_cs
;
1870 evmcs
->guest_cr0
= vmcs12
->guest_cr0
;
1871 evmcs
->guest_cr3
= vmcs12
->guest_cr3
;
1872 evmcs
->guest_cr4
= vmcs12
->guest_cr4
;
1873 evmcs
->guest_dr7
= vmcs12
->guest_dr7
;
1875 evmcs
->guest_physical_address
= vmcs12
->guest_physical_address
;
1877 evmcs
->vm_instruction_error
= vmcs12
->vm_instruction_error
;
1878 evmcs
->vm_exit_reason
= vmcs12
->vm_exit_reason
;
1879 evmcs
->vm_exit_intr_info
= vmcs12
->vm_exit_intr_info
;
1880 evmcs
->vm_exit_intr_error_code
= vmcs12
->vm_exit_intr_error_code
;
1881 evmcs
->idt_vectoring_info_field
= vmcs12
->idt_vectoring_info_field
;
1882 evmcs
->idt_vectoring_error_code
= vmcs12
->idt_vectoring_error_code
;
1883 evmcs
->vm_exit_instruction_len
= vmcs12
->vm_exit_instruction_len
;
1884 evmcs
->vmx_instruction_info
= vmcs12
->vmx_instruction_info
;
1886 evmcs
->exit_qualification
= vmcs12
->exit_qualification
;
1888 evmcs
->guest_linear_address
= vmcs12
->guest_linear_address
;
1889 evmcs
->guest_rsp
= vmcs12
->guest_rsp
;
1890 evmcs
->guest_rflags
= vmcs12
->guest_rflags
;
1892 evmcs
->guest_interruptibility_info
=
1893 vmcs12
->guest_interruptibility_info
;
1894 evmcs
->cpu_based_vm_exec_control
= vmcs12
->cpu_based_vm_exec_control
;
1895 evmcs
->vm_entry_controls
= vmcs12
->vm_entry_controls
;
1896 evmcs
->vm_entry_intr_info_field
= vmcs12
->vm_entry_intr_info_field
;
1897 evmcs
->vm_entry_exception_error_code
=
1898 vmcs12
->vm_entry_exception_error_code
;
1899 evmcs
->vm_entry_instruction_len
= vmcs12
->vm_entry_instruction_len
;
1901 evmcs
->guest_rip
= vmcs12
->guest_rip
;
1903 evmcs
->guest_bndcfgs
= vmcs12
->guest_bndcfgs
;
1909 * This is an equivalent of the nested hypervisor executing the vmptrld
1912 static enum nested_evmptrld_status
nested_vmx_handle_enlightened_vmptrld(
1913 struct kvm_vcpu
*vcpu
, bool from_launch
)
1915 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1916 bool evmcs_gpa_changed
= false;
1919 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
))
1920 return EVMPTRLD_DISABLED
;
1922 if (!nested_enlightened_vmentry(vcpu
, &evmcs_gpa
))
1923 return EVMPTRLD_DISABLED
;
1925 if (unlikely(!vmx
->nested
.hv_evmcs
||
1926 evmcs_gpa
!= vmx
->nested
.hv_evmcs_vmptr
)) {
1927 if (!vmx
->nested
.hv_evmcs
)
1928 vmx
->nested
.current_vmptr
= -1ull;
1930 nested_release_evmcs(vcpu
);
1932 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(evmcs_gpa
),
1933 &vmx
->nested
.hv_evmcs_map
))
1934 return EVMPTRLD_ERROR
;
1936 vmx
->nested
.hv_evmcs
= vmx
->nested
.hv_evmcs_map
.hva
;
1939 * Currently, KVM only supports eVMCS version 1
1940 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1941 * value to first u32 field of eVMCS which should specify eVMCS
1944 * Guest should be aware of supported eVMCS versions by host by
1945 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1946 * expected to set this CPUID leaf according to the value
1947 * returned in vmcs_version from nested_enable_evmcs().
1949 * However, it turns out that Microsoft Hyper-V fails to comply
1950 * to their own invented interface: When Hyper-V use eVMCS, it
1951 * just sets first u32 field of eVMCS to revision_id specified
1952 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1953 * which is one of the supported versions specified in
1954 * CPUID.0x4000000A.EAX[0:15].
1956 * To overcome Hyper-V bug, we accept here either a supported
1957 * eVMCS version or VMCS12 revision_id as valid values for first
1958 * u32 field of eVMCS.
1960 if ((vmx
->nested
.hv_evmcs
->revision_id
!= KVM_EVMCS_VERSION
) &&
1961 (vmx
->nested
.hv_evmcs
->revision_id
!= VMCS12_REVISION
)) {
1962 nested_release_evmcs(vcpu
);
1963 return EVMPTRLD_VMFAIL
;
1966 vmx
->nested
.dirty_vmcs12
= true;
1967 vmx
->nested
.hv_evmcs_vmptr
= evmcs_gpa
;
1969 evmcs_gpa_changed
= true;
1971 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1972 * reloaded from guest's memory (read only fields, fields not
1973 * present in struct hv_enlightened_vmcs, ...). Make sure there
1977 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1978 memset(vmcs12
, 0, sizeof(*vmcs12
));
1979 vmcs12
->hdr
.revision_id
= VMCS12_REVISION
;
1985 * Clean fields data can't be used on VMLAUNCH and when we switch
1986 * between different L2 guests as KVM keeps a single VMCS12 per L1.
1988 if (from_launch
|| evmcs_gpa_changed
)
1989 vmx
->nested
.hv_evmcs
->hv_clean_fields
&=
1990 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
1992 return EVMPTRLD_SUCCEEDED
;
1995 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu
*vcpu
)
1997 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1999 if (vmx
->nested
.hv_evmcs
) {
2000 copy_vmcs12_to_enlightened(vmx
);
2001 /* All fields are clean */
2002 vmx
->nested
.hv_evmcs
->hv_clean_fields
|=
2003 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
2005 copy_vmcs12_to_shadow(vmx
);
2008 vmx
->nested
.need_vmcs12_to_shadow_sync
= false;
2011 static enum hrtimer_restart
vmx_preemption_timer_fn(struct hrtimer
*timer
)
2013 struct vcpu_vmx
*vmx
=
2014 container_of(timer
, struct vcpu_vmx
, nested
.preemption_timer
);
2016 vmx
->nested
.preemption_timer_expired
= true;
2017 kvm_make_request(KVM_REQ_EVENT
, &vmx
->vcpu
);
2018 kvm_vcpu_kick(&vmx
->vcpu
);
2020 return HRTIMER_NORESTART
;
2023 static void vmx_start_preemption_timer(struct kvm_vcpu
*vcpu
)
2025 u64 preemption_timeout
= get_vmcs12(vcpu
)->vmx_preemption_timer_value
;
2026 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2029 * A timer value of zero is architecturally guaranteed to cause
2030 * a VMExit prior to executing any instructions in the guest.
2032 if (preemption_timeout
== 0) {
2033 vmx_preemption_timer_fn(&vmx
->nested
.preemption_timer
);
2037 if (vcpu
->arch
.virtual_tsc_khz
== 0)
2040 preemption_timeout
<<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
2041 preemption_timeout
*= 1000000;
2042 do_div(preemption_timeout
, vcpu
->arch
.virtual_tsc_khz
);
2043 hrtimer_start(&vmx
->nested
.preemption_timer
,
2044 ns_to_ktime(preemption_timeout
), HRTIMER_MODE_REL
);
2047 static u64
nested_vmx_calc_efer(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2049 if (vmx
->nested
.nested_run_pending
&&
2050 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
))
2051 return vmcs12
->guest_ia32_efer
;
2052 else if (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
)
2053 return vmx
->vcpu
.arch
.efer
| (EFER_LMA
| EFER_LME
);
2055 return vmx
->vcpu
.arch
.efer
& ~(EFER_LMA
| EFER_LME
);
2058 static void prepare_vmcs02_constant_state(struct vcpu_vmx
*vmx
)
2061 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2062 * according to L0's settings (vmcs12 is irrelevant here). Host
2063 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2064 * will be set as needed prior to VMLAUNCH/VMRESUME.
2066 if (vmx
->nested
.vmcs02_initialized
)
2068 vmx
->nested
.vmcs02_initialized
= true;
2071 * We don't care what the EPTP value is we just need to guarantee
2072 * it's valid so we don't get a false positive when doing early
2073 * consistency checks.
2075 if (enable_ept
&& nested_early_check
)
2076 vmcs_write64(EPT_POINTER
, construct_eptp(&vmx
->vcpu
, 0));
2078 /* All VMFUNCs are currently emulated through L0 vmexits. */
2079 if (cpu_has_vmx_vmfunc())
2080 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
2082 if (cpu_has_vmx_posted_intr())
2083 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_NESTED_VECTOR
);
2085 if (cpu_has_vmx_msr_bitmap())
2086 vmcs_write64(MSR_BITMAP
, __pa(vmx
->nested
.vmcs02
.msr_bitmap
));
2089 * The PML address never changes, so it is constant in vmcs02.
2090 * Conceptually we want to copy the PML index from vmcs01 here,
2091 * and then back to vmcs01 on nested vmexit. But since we flush
2092 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
2093 * index is also effectively constant in vmcs02.
2096 vmcs_write64(PML_ADDRESS
, page_to_phys(vmx
->pml_pg
));
2097 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
2100 if (cpu_has_vmx_encls_vmexit())
2101 vmcs_write64(ENCLS_EXITING_BITMAP
, -1ull);
2104 * Set the MSR load/store lists to match L0's settings. Only the
2105 * addresses are constant (for vmcs02), the counts can change based
2106 * on L2's behavior, e.g. switching to/from long mode.
2108 vmcs_write64(VM_EXIT_MSR_STORE_ADDR
, __pa(vmx
->msr_autostore
.guest
.val
));
2109 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
2110 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
2112 vmx_set_constant_host_state(vmx
);
2115 static void prepare_vmcs02_early_rare(struct vcpu_vmx
*vmx
,
2116 struct vmcs12
*vmcs12
)
2118 prepare_vmcs02_constant_state(vmx
);
2120 vmcs_write64(VMCS_LINK_POINTER
, -1ull);
2123 if (nested_cpu_has_vpid(vmcs12
) && vmx
->nested
.vpid02
)
2124 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->nested
.vpid02
);
2126 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
2130 static void prepare_vmcs02_early(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2132 u32 exec_control
, vmcs12_exec_ctrl
;
2133 u64 guest_efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2135 if (vmx
->nested
.dirty_vmcs12
|| vmx
->nested
.hv_evmcs
)
2136 prepare_vmcs02_early_rare(vmx
, vmcs12
);
2141 exec_control
= vmx_pin_based_exec_ctrl(vmx
);
2142 exec_control
|= (vmcs12
->pin_based_vm_exec_control
&
2143 ~PIN_BASED_VMX_PREEMPTION_TIMER
);
2145 /* Posted interrupts setting is only taken from vmcs12. */
2146 if (nested_cpu_has_posted_intr(vmcs12
)) {
2147 vmx
->nested
.posted_intr_nv
= vmcs12
->posted_intr_nv
;
2148 vmx
->nested
.pi_pending
= false;
2150 exec_control
&= ~PIN_BASED_POSTED_INTR
;
2152 pin_controls_set(vmx
, exec_control
);
2157 exec_control
= vmx_exec_control(vmx
); /* L0's desires */
2158 exec_control
&= ~CPU_BASED_INTR_WINDOW_EXITING
;
2159 exec_control
&= ~CPU_BASED_NMI_WINDOW_EXITING
;
2160 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2161 exec_control
|= vmcs12
->cpu_based_vm_exec_control
;
2163 vmx
->nested
.l1_tpr_threshold
= -1;
2164 if (exec_control
& CPU_BASED_TPR_SHADOW
)
2165 vmcs_write32(TPR_THRESHOLD
, vmcs12
->tpr_threshold
);
2166 #ifdef CONFIG_X86_64
2168 exec_control
|= CPU_BASED_CR8_LOAD_EXITING
|
2169 CPU_BASED_CR8_STORE_EXITING
;
2173 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2174 * for I/O port accesses.
2176 exec_control
|= CPU_BASED_UNCOND_IO_EXITING
;
2177 exec_control
&= ~CPU_BASED_USE_IO_BITMAPS
;
2180 * This bit will be computed in nested_get_vmcs12_pages, because
2181 * we do not have access to L1's MSR bitmap yet. For now, keep
2182 * the same bit as before, hoping to avoid multiple VMWRITEs that
2183 * only set/clear this bit.
2185 exec_control
&= ~CPU_BASED_USE_MSR_BITMAPS
;
2186 exec_control
|= exec_controls_get(vmx
) & CPU_BASED_USE_MSR_BITMAPS
;
2188 exec_controls_set(vmx
, exec_control
);
2191 * SECONDARY EXEC CONTROLS
2193 if (cpu_has_secondary_exec_ctrls()) {
2194 exec_control
= vmx
->secondary_exec_control
;
2196 /* Take the following fields only from vmcs12 */
2197 exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
2198 SECONDARY_EXEC_ENABLE_INVPCID
|
2199 SECONDARY_EXEC_RDTSCP
|
2200 SECONDARY_EXEC_XSAVES
|
2201 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
|
2202 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
2203 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2204 SECONDARY_EXEC_ENABLE_VMFUNC
);
2205 if (nested_cpu_has(vmcs12
,
2206 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)) {
2207 vmcs12_exec_ctrl
= vmcs12
->secondary_vm_exec_control
&
2208 ~SECONDARY_EXEC_ENABLE_PML
;
2209 exec_control
|= vmcs12_exec_ctrl
;
2212 /* VMCS shadowing for L2 is emulated for now */
2213 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
2216 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2217 * will not have to rewrite the controls just for this bit.
2219 if (!boot_cpu_has(X86_FEATURE_UMIP
) && vmx_umip_emulated() &&
2220 (vmcs12
->guest_cr4
& X86_CR4_UMIP
))
2221 exec_control
|= SECONDARY_EXEC_DESC
;
2223 if (exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
2224 vmcs_write16(GUEST_INTR_STATUS
,
2225 vmcs12
->guest_intr_status
);
2227 secondary_exec_controls_set(vmx
, exec_control
);
2233 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2234 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2235 * on the related bits (if supported by the CPU) in the hope that
2236 * we can avoid VMWrites during vmx_set_efer().
2238 exec_control
= (vmcs12
->vm_entry_controls
| vmx_vmentry_ctrl()) &
2239 ~VM_ENTRY_IA32E_MODE
& ~VM_ENTRY_LOAD_IA32_EFER
;
2240 if (cpu_has_load_ia32_efer()) {
2241 if (guest_efer
& EFER_LMA
)
2242 exec_control
|= VM_ENTRY_IA32E_MODE
;
2243 if (guest_efer
!= host_efer
)
2244 exec_control
|= VM_ENTRY_LOAD_IA32_EFER
;
2246 vm_entry_controls_set(vmx
, exec_control
);
2251 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2252 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2253 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2255 exec_control
= vmx_vmexit_ctrl();
2256 if (cpu_has_load_ia32_efer() && guest_efer
!= host_efer
)
2257 exec_control
|= VM_EXIT_LOAD_IA32_EFER
;
2258 vm_exit_controls_set(vmx
, exec_control
);
2261 * Interrupt/Exception Fields
2263 if (vmx
->nested
.nested_run_pending
) {
2264 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
2265 vmcs12
->vm_entry_intr_info_field
);
2266 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
,
2267 vmcs12
->vm_entry_exception_error_code
);
2268 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
2269 vmcs12
->vm_entry_instruction_len
);
2270 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
,
2271 vmcs12
->guest_interruptibility_info
);
2272 vmx
->loaded_vmcs
->nmi_known_unmasked
=
2273 !(vmcs12
->guest_interruptibility_info
& GUEST_INTR_STATE_NMI
);
2275 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
2279 static void prepare_vmcs02_rare(struct vcpu_vmx
*vmx
, struct vmcs12
*vmcs12
)
2281 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2283 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2284 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2
)) {
2285 vmcs_write16(GUEST_ES_SELECTOR
, vmcs12
->guest_es_selector
);
2286 vmcs_write16(GUEST_CS_SELECTOR
, vmcs12
->guest_cs_selector
);
2287 vmcs_write16(GUEST_SS_SELECTOR
, vmcs12
->guest_ss_selector
);
2288 vmcs_write16(GUEST_DS_SELECTOR
, vmcs12
->guest_ds_selector
);
2289 vmcs_write16(GUEST_FS_SELECTOR
, vmcs12
->guest_fs_selector
);
2290 vmcs_write16(GUEST_GS_SELECTOR
, vmcs12
->guest_gs_selector
);
2291 vmcs_write16(GUEST_LDTR_SELECTOR
, vmcs12
->guest_ldtr_selector
);
2292 vmcs_write16(GUEST_TR_SELECTOR
, vmcs12
->guest_tr_selector
);
2293 vmcs_write32(GUEST_ES_LIMIT
, vmcs12
->guest_es_limit
);
2294 vmcs_write32(GUEST_CS_LIMIT
, vmcs12
->guest_cs_limit
);
2295 vmcs_write32(GUEST_SS_LIMIT
, vmcs12
->guest_ss_limit
);
2296 vmcs_write32(GUEST_DS_LIMIT
, vmcs12
->guest_ds_limit
);
2297 vmcs_write32(GUEST_FS_LIMIT
, vmcs12
->guest_fs_limit
);
2298 vmcs_write32(GUEST_GS_LIMIT
, vmcs12
->guest_gs_limit
);
2299 vmcs_write32(GUEST_LDTR_LIMIT
, vmcs12
->guest_ldtr_limit
);
2300 vmcs_write32(GUEST_TR_LIMIT
, vmcs12
->guest_tr_limit
);
2301 vmcs_write32(GUEST_GDTR_LIMIT
, vmcs12
->guest_gdtr_limit
);
2302 vmcs_write32(GUEST_IDTR_LIMIT
, vmcs12
->guest_idtr_limit
);
2303 vmcs_write32(GUEST_CS_AR_BYTES
, vmcs12
->guest_cs_ar_bytes
);
2304 vmcs_write32(GUEST_SS_AR_BYTES
, vmcs12
->guest_ss_ar_bytes
);
2305 vmcs_write32(GUEST_ES_AR_BYTES
, vmcs12
->guest_es_ar_bytes
);
2306 vmcs_write32(GUEST_DS_AR_BYTES
, vmcs12
->guest_ds_ar_bytes
);
2307 vmcs_write32(GUEST_FS_AR_BYTES
, vmcs12
->guest_fs_ar_bytes
);
2308 vmcs_write32(GUEST_GS_AR_BYTES
, vmcs12
->guest_gs_ar_bytes
);
2309 vmcs_write32(GUEST_LDTR_AR_BYTES
, vmcs12
->guest_ldtr_ar_bytes
);
2310 vmcs_write32(GUEST_TR_AR_BYTES
, vmcs12
->guest_tr_ar_bytes
);
2311 vmcs_writel(GUEST_ES_BASE
, vmcs12
->guest_es_base
);
2312 vmcs_writel(GUEST_CS_BASE
, vmcs12
->guest_cs_base
);
2313 vmcs_writel(GUEST_SS_BASE
, vmcs12
->guest_ss_base
);
2314 vmcs_writel(GUEST_DS_BASE
, vmcs12
->guest_ds_base
);
2315 vmcs_writel(GUEST_FS_BASE
, vmcs12
->guest_fs_base
);
2316 vmcs_writel(GUEST_GS_BASE
, vmcs12
->guest_gs_base
);
2317 vmcs_writel(GUEST_LDTR_BASE
, vmcs12
->guest_ldtr_base
);
2318 vmcs_writel(GUEST_TR_BASE
, vmcs12
->guest_tr_base
);
2319 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->guest_gdtr_base
);
2320 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->guest_idtr_base
);
2323 if (!hv_evmcs
|| !(hv_evmcs
->hv_clean_fields
&
2324 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
)) {
2325 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->guest_sysenter_cs
);
2326 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
2327 vmcs12
->guest_pending_dbg_exceptions
);
2328 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->guest_sysenter_esp
);
2329 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->guest_sysenter_eip
);
2332 * L1 may access the L2's PDPTR, so save them to construct
2336 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2337 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2338 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2339 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2342 if (kvm_mpx_supported() && vmx
->nested
.nested_run_pending
&&
2343 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
2344 vmcs_write64(GUEST_BNDCFGS
, vmcs12
->guest_bndcfgs
);
2347 if (nested_cpu_has_xsaves(vmcs12
))
2348 vmcs_write64(XSS_EXIT_BITMAP
, vmcs12
->xss_exit_bitmap
);
2351 * Whether page-faults are trapped is determined by a combination of
2352 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2353 * If enable_ept, L0 doesn't care about page faults and we should
2354 * set all of these to L1's desires. However, if !enable_ept, L0 does
2355 * care about (at least some) page faults, and because it is not easy
2356 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2357 * to exit on each and every L2 page fault. This is done by setting
2358 * MASK=MATCH=0 and (see below) EB.PF=1.
2359 * Note that below we don't need special code to set EB.PF beyond the
2360 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2361 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2362 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2364 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
,
2365 enable_ept
? vmcs12
->page_fault_error_code_mask
: 0);
2366 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
,
2367 enable_ept
? vmcs12
->page_fault_error_code_match
: 0);
2369 if (cpu_has_vmx_apicv()) {
2370 vmcs_write64(EOI_EXIT_BITMAP0
, vmcs12
->eoi_exit_bitmap0
);
2371 vmcs_write64(EOI_EXIT_BITMAP1
, vmcs12
->eoi_exit_bitmap1
);
2372 vmcs_write64(EOI_EXIT_BITMAP2
, vmcs12
->eoi_exit_bitmap2
);
2373 vmcs_write64(EOI_EXIT_BITMAP3
, vmcs12
->eoi_exit_bitmap3
);
2377 * Make sure the msr_autostore list is up to date before we set the
2378 * count in the vmcs02.
2380 prepare_vmx_msr_autostore_list(&vmx
->vcpu
, MSR_IA32_TSC
);
2382 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, vmx
->msr_autostore
.guest
.nr
);
2383 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
2384 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
2386 set_cr4_guest_host_mask(vmx
);
2390 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2391 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2392 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2393 * guest in a way that will both be appropriate to L1's requests, and our
2394 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2395 * function also has additional necessary side-effects, like setting various
2396 * vcpu->arch fields.
2397 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2398 * is assigned to entry_failure_code on failure.
2400 static int prepare_vmcs02(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
2401 u32
*entry_failure_code
)
2403 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2404 struct hv_enlightened_vmcs
*hv_evmcs
= vmx
->nested
.hv_evmcs
;
2405 bool load_guest_pdptrs_vmcs12
= false;
2407 if (vmx
->nested
.dirty_vmcs12
|| hv_evmcs
) {
2408 prepare_vmcs02_rare(vmx
, vmcs12
);
2409 vmx
->nested
.dirty_vmcs12
= false;
2411 load_guest_pdptrs_vmcs12
= !hv_evmcs
||
2412 !(hv_evmcs
->hv_clean_fields
&
2413 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1
);
2416 if (vmx
->nested
.nested_run_pending
&&
2417 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
)) {
2418 kvm_set_dr(vcpu
, 7, vmcs12
->guest_dr7
);
2419 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmcs12
->guest_ia32_debugctl
);
2421 kvm_set_dr(vcpu
, 7, vcpu
->arch
.dr7
);
2422 vmcs_write64(GUEST_IA32_DEBUGCTL
, vmx
->nested
.vmcs01_debugctl
);
2424 if (kvm_mpx_supported() && (!vmx
->nested
.nested_run_pending
||
2425 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
)))
2426 vmcs_write64(GUEST_BNDCFGS
, vmx
->nested
.vmcs01_guest_bndcfgs
);
2427 vmx_set_rflags(vcpu
, vmcs12
->guest_rflags
);
2429 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2430 * bitwise-or of what L1 wants to trap for L2, and what we want to
2431 * trap. Note that CR0.TS also needs updating - we do this later.
2433 update_exception_bitmap(vcpu
);
2434 vcpu
->arch
.cr0_guest_owned_bits
&= ~vmcs12
->cr0_guest_host_mask
;
2435 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vcpu
->arch
.cr0_guest_owned_bits
);
2437 if (vmx
->nested
.nested_run_pending
&&
2438 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
)) {
2439 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->guest_ia32_pat
);
2440 vcpu
->arch
.pat
= vmcs12
->guest_ia32_pat
;
2441 } else if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
) {
2442 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
2445 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
2447 if (kvm_has_tsc_control
)
2448 decache_tsc_multiplier(vmx
);
2452 * There is no direct mapping between vpid02 and vpid12, the
2453 * vpid02 is per-vCPU for L0 and reused while the value of
2454 * vpid12 is changed w/ one invvpid during nested vmentry.
2455 * The vpid12 is allocated by L1 for L2, so it will not
2456 * influence global bitmap(for vpid01 and vpid02 allocation)
2457 * even if spawn a lot of nested vCPUs.
2459 if (nested_cpu_has_vpid(vmcs12
) && nested_has_guest_tlb_tag(vcpu
)) {
2460 if (vmcs12
->virtual_processor_id
!= vmx
->nested
.last_vpid
) {
2461 vmx
->nested
.last_vpid
= vmcs12
->virtual_processor_id
;
2462 __vmx_flush_tlb(vcpu
, nested_get_vpid02(vcpu
), false);
2466 * If L1 use EPT, then L0 needs to execute INVEPT on
2467 * EPTP02 instead of EPTP01. Therefore, delay TLB
2468 * flush until vmcs02->eptp is fully updated by
2469 * KVM_REQ_LOAD_MMU_PGD. Note that this assumes
2470 * KVM_REQ_TLB_FLUSH is evaluated after
2471 * KVM_REQ_LOAD_MMU_PGD in vcpu_enter_guest().
2473 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2477 if (nested_cpu_has_ept(vmcs12
))
2478 nested_ept_init_mmu_context(vcpu
);
2481 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2482 * bits which we consider mandatory enabled.
2483 * The CR0_READ_SHADOW is what L2 should have expected to read given
2484 * the specifications by L1; It's not enough to take
2485 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2486 * have more bits than L1 expected.
2488 vmx_set_cr0(vcpu
, vmcs12
->guest_cr0
);
2489 vmcs_writel(CR0_READ_SHADOW
, nested_read_cr0(vmcs12
));
2491 vmx_set_cr4(vcpu
, vmcs12
->guest_cr4
);
2492 vmcs_writel(CR4_READ_SHADOW
, nested_read_cr4(vmcs12
));
2494 vcpu
->arch
.efer
= nested_vmx_calc_efer(vmx
, vmcs12
);
2495 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2496 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
2499 * Guest state is invalid and unrestricted guest is disabled,
2500 * which means L1 attempted VMEntry to L2 with invalid state.
2503 if (vmx
->emulation_required
) {
2504 *entry_failure_code
= ENTRY_FAIL_DEFAULT
;
2508 /* Shadow page tables on either EPT or shadow page tables. */
2509 if (nested_vmx_load_cr3(vcpu
, vmcs12
->guest_cr3
, nested_cpu_has_ept(vmcs12
),
2510 entry_failure_code
))
2514 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2515 * on nested VM-Exit, which can occur without actually running L2 and
2516 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2517 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2518 * transition to HLT instead of running L2.
2521 vmcs_writel(GUEST_CR3
, vmcs12
->guest_cr3
);
2523 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2524 if (load_guest_pdptrs_vmcs12
&& nested_cpu_has_ept(vmcs12
) &&
2525 is_pae_paging(vcpu
)) {
2526 vmcs_write64(GUEST_PDPTR0
, vmcs12
->guest_pdptr0
);
2527 vmcs_write64(GUEST_PDPTR1
, vmcs12
->guest_pdptr1
);
2528 vmcs_write64(GUEST_PDPTR2
, vmcs12
->guest_pdptr2
);
2529 vmcs_write64(GUEST_PDPTR3
, vmcs12
->guest_pdptr3
);
2533 vcpu
->arch
.walk_mmu
->inject_page_fault
= vmx_inject_page_fault_nested
;
2535 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2536 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
2537 vmcs12
->guest_ia32_perf_global_ctrl
)))
2540 kvm_rsp_write(vcpu
, vmcs12
->guest_rsp
);
2541 kvm_rip_write(vcpu
, vmcs12
->guest_rip
);
2545 static int nested_vmx_check_nmi_controls(struct vmcs12
*vmcs12
)
2547 if (CC(!nested_cpu_has_nmi_exiting(vmcs12
) &&
2548 nested_cpu_has_virtual_nmis(vmcs12
)))
2551 if (CC(!nested_cpu_has_virtual_nmis(vmcs12
) &&
2552 nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
)))
2558 static bool nested_vmx_check_eptp(struct kvm_vcpu
*vcpu
, u64 new_eptp
)
2560 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2561 int maxphyaddr
= cpuid_maxphyaddr(vcpu
);
2563 /* Check for memory type validity */
2564 switch (new_eptp
& VMX_EPTP_MT_MASK
) {
2565 case VMX_EPTP_MT_UC
:
2566 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_UC_BIT
)))
2569 case VMX_EPTP_MT_WB
:
2570 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPTP_WB_BIT
)))
2577 /* Page-walk levels validity. */
2578 switch (new_eptp
& VMX_EPTP_PWL_MASK
) {
2579 case VMX_EPTP_PWL_5
:
2580 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_5_BIT
)))
2583 case VMX_EPTP_PWL_4
:
2584 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_PAGE_WALK_4_BIT
)))
2591 /* Reserved bits should not be set */
2592 if (CC(new_eptp
>> maxphyaddr
|| ((new_eptp
>> 7) & 0x1f)))
2595 /* AD, if set, should be supported */
2596 if (new_eptp
& VMX_EPTP_AD_ENABLE_BIT
) {
2597 if (CC(!(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_AD_BIT
)))
2605 * Checks related to VM-Execution Control Fields
2607 static int nested_check_vm_execution_controls(struct kvm_vcpu
*vcpu
,
2608 struct vmcs12
*vmcs12
)
2610 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2612 if (CC(!vmx_control_verify(vmcs12
->pin_based_vm_exec_control
,
2613 vmx
->nested
.msrs
.pinbased_ctls_low
,
2614 vmx
->nested
.msrs
.pinbased_ctls_high
)) ||
2615 CC(!vmx_control_verify(vmcs12
->cpu_based_vm_exec_control
,
2616 vmx
->nested
.msrs
.procbased_ctls_low
,
2617 vmx
->nested
.msrs
.procbased_ctls_high
)))
2620 if (nested_cpu_has(vmcs12
, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) &&
2621 CC(!vmx_control_verify(vmcs12
->secondary_vm_exec_control
,
2622 vmx
->nested
.msrs
.secondary_ctls_low
,
2623 vmx
->nested
.msrs
.secondary_ctls_high
)))
2626 if (CC(vmcs12
->cr3_target_count
> nested_cpu_vmx_misc_cr3_count(vcpu
)) ||
2627 nested_vmx_check_io_bitmap_controls(vcpu
, vmcs12
) ||
2628 nested_vmx_check_msr_bitmap_controls(vcpu
, vmcs12
) ||
2629 nested_vmx_check_tpr_shadow_controls(vcpu
, vmcs12
) ||
2630 nested_vmx_check_apic_access_controls(vcpu
, vmcs12
) ||
2631 nested_vmx_check_apicv_controls(vcpu
, vmcs12
) ||
2632 nested_vmx_check_nmi_controls(vmcs12
) ||
2633 nested_vmx_check_pml_controls(vcpu
, vmcs12
) ||
2634 nested_vmx_check_unrestricted_guest_controls(vcpu
, vmcs12
) ||
2635 nested_vmx_check_mode_based_ept_exec_controls(vcpu
, vmcs12
) ||
2636 nested_vmx_check_shadow_vmcs_controls(vcpu
, vmcs12
) ||
2637 CC(nested_cpu_has_vpid(vmcs12
) && !vmcs12
->virtual_processor_id
))
2640 if (!nested_cpu_has_preemption_timer(vmcs12
) &&
2641 nested_cpu_has_save_preemption_timer(vmcs12
))
2644 if (nested_cpu_has_ept(vmcs12
) &&
2645 CC(!nested_vmx_check_eptp(vcpu
, vmcs12
->ept_pointer
)))
2648 if (nested_cpu_has_vmfunc(vmcs12
)) {
2649 if (CC(vmcs12
->vm_function_control
&
2650 ~vmx
->nested
.msrs
.vmfunc_controls
))
2653 if (nested_cpu_has_eptp_switching(vmcs12
)) {
2654 if (CC(!nested_cpu_has_ept(vmcs12
)) ||
2655 CC(!page_address_valid(vcpu
, vmcs12
->eptp_list_address
)))
2664 * Checks related to VM-Exit Control Fields
2666 static int nested_check_vm_exit_controls(struct kvm_vcpu
*vcpu
,
2667 struct vmcs12
*vmcs12
)
2669 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2671 if (CC(!vmx_control_verify(vmcs12
->vm_exit_controls
,
2672 vmx
->nested
.msrs
.exit_ctls_low
,
2673 vmx
->nested
.msrs
.exit_ctls_high
)) ||
2674 CC(nested_vmx_check_exit_msr_switch_controls(vcpu
, vmcs12
)))
2681 * Checks related to VM-Entry Control Fields
2683 static int nested_check_vm_entry_controls(struct kvm_vcpu
*vcpu
,
2684 struct vmcs12
*vmcs12
)
2686 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2688 if (CC(!vmx_control_verify(vmcs12
->vm_entry_controls
,
2689 vmx
->nested
.msrs
.entry_ctls_low
,
2690 vmx
->nested
.msrs
.entry_ctls_high
)))
2694 * From the Intel SDM, volume 3:
2695 * Fields relevant to VM-entry event injection must be set properly.
2696 * These fields are the VM-entry interruption-information field, the
2697 * VM-entry exception error code, and the VM-entry instruction length.
2699 if (vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) {
2700 u32 intr_info
= vmcs12
->vm_entry_intr_info_field
;
2701 u8 vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
2702 u32 intr_type
= intr_info
& INTR_INFO_INTR_TYPE_MASK
;
2703 bool has_error_code
= intr_info
& INTR_INFO_DELIVER_CODE_MASK
;
2704 bool should_have_error_code
;
2705 bool urg
= nested_cpu_has2(vmcs12
,
2706 SECONDARY_EXEC_UNRESTRICTED_GUEST
);
2707 bool prot_mode
= !urg
|| vmcs12
->guest_cr0
& X86_CR0_PE
;
2709 /* VM-entry interruption-info field: interruption type */
2710 if (CC(intr_type
== INTR_TYPE_RESERVED
) ||
2711 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&&
2712 !nested_cpu_supports_monitor_trap_flag(vcpu
)))
2715 /* VM-entry interruption-info field: vector */
2716 if (CC(intr_type
== INTR_TYPE_NMI_INTR
&& vector
!= NMI_VECTOR
) ||
2717 CC(intr_type
== INTR_TYPE_HARD_EXCEPTION
&& vector
> 31) ||
2718 CC(intr_type
== INTR_TYPE_OTHER_EVENT
&& vector
!= 0))
2721 /* VM-entry interruption-info field: deliver error code */
2722 should_have_error_code
=
2723 intr_type
== INTR_TYPE_HARD_EXCEPTION
&& prot_mode
&&
2724 x86_exception_has_error_code(vector
);
2725 if (CC(has_error_code
!= should_have_error_code
))
2728 /* VM-entry exception error code */
2729 if (CC(has_error_code
&&
2730 vmcs12
->vm_entry_exception_error_code
& GENMASK(31, 16)))
2733 /* VM-entry interruption-info field: reserved bits */
2734 if (CC(intr_info
& INTR_INFO_RESVD_BITS_MASK
))
2737 /* VM-entry instruction length */
2738 switch (intr_type
) {
2739 case INTR_TYPE_SOFT_EXCEPTION
:
2740 case INTR_TYPE_SOFT_INTR
:
2741 case INTR_TYPE_PRIV_SW_EXCEPTION
:
2742 if (CC(vmcs12
->vm_entry_instruction_len
> 15) ||
2743 CC(vmcs12
->vm_entry_instruction_len
== 0 &&
2744 CC(!nested_cpu_has_zero_length_injection(vcpu
))))
2749 if (nested_vmx_check_entry_msr_switch_controls(vcpu
, vmcs12
))
2755 static int nested_vmx_check_controls(struct kvm_vcpu
*vcpu
,
2756 struct vmcs12
*vmcs12
)
2758 if (nested_check_vm_execution_controls(vcpu
, vmcs12
) ||
2759 nested_check_vm_exit_controls(vcpu
, vmcs12
) ||
2760 nested_check_vm_entry_controls(vcpu
, vmcs12
))
2763 if (to_vmx(vcpu
)->nested
.enlightened_vmcs_enabled
)
2764 return nested_evmcs_check_controls(vmcs12
);
2769 static int nested_vmx_check_host_state(struct kvm_vcpu
*vcpu
,
2770 struct vmcs12
*vmcs12
)
2774 if (CC(!nested_host_cr0_valid(vcpu
, vmcs12
->host_cr0
)) ||
2775 CC(!nested_host_cr4_valid(vcpu
, vmcs12
->host_cr4
)) ||
2776 CC(!nested_cr3_valid(vcpu
, vmcs12
->host_cr3
)))
2779 if (CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_esp
, vcpu
)) ||
2780 CC(is_noncanonical_address(vmcs12
->host_ia32_sysenter_eip
, vcpu
)))
2783 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) &&
2784 CC(!kvm_pat_valid(vmcs12
->host_ia32_pat
)))
2787 if ((vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2788 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
2789 vmcs12
->host_ia32_perf_global_ctrl
)))
2792 #ifdef CONFIG_X86_64
2793 ia32e
= !!(vcpu
->arch
.efer
& EFER_LMA
);
2799 if (CC(!(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)) ||
2800 CC(!(vmcs12
->host_cr4
& X86_CR4_PAE
)))
2803 if (CC(vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
) ||
2804 CC(vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) ||
2805 CC(vmcs12
->host_cr4
& X86_CR4_PCIDE
) ||
2806 CC((vmcs12
->host_rip
) >> 32))
2810 if (CC(vmcs12
->host_cs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2811 CC(vmcs12
->host_ss_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2812 CC(vmcs12
->host_ds_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2813 CC(vmcs12
->host_es_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2814 CC(vmcs12
->host_fs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2815 CC(vmcs12
->host_gs_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2816 CC(vmcs12
->host_tr_selector
& (SEGMENT_RPL_MASK
| SEGMENT_TI_MASK
)) ||
2817 CC(vmcs12
->host_cs_selector
== 0) ||
2818 CC(vmcs12
->host_tr_selector
== 0) ||
2819 CC(vmcs12
->host_ss_selector
== 0 && !ia32e
))
2822 if (CC(is_noncanonical_address(vmcs12
->host_fs_base
, vcpu
)) ||
2823 CC(is_noncanonical_address(vmcs12
->host_gs_base
, vcpu
)) ||
2824 CC(is_noncanonical_address(vmcs12
->host_gdtr_base
, vcpu
)) ||
2825 CC(is_noncanonical_address(vmcs12
->host_idtr_base
, vcpu
)) ||
2826 CC(is_noncanonical_address(vmcs12
->host_tr_base
, vcpu
)) ||
2827 CC(is_noncanonical_address(vmcs12
->host_rip
, vcpu
)))
2831 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2832 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2833 * the values of the LMA and LME bits in the field must each be that of
2834 * the host address-space size VM-exit control.
2836 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
) {
2837 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->host_ia32_efer
)) ||
2838 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LMA
)) ||
2839 CC(ia32e
!= !!(vmcs12
->host_ia32_efer
& EFER_LME
)))
2846 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu
*vcpu
,
2847 struct vmcs12
*vmcs12
)
2850 struct vmcs12
*shadow
;
2851 struct kvm_host_map map
;
2853 if (vmcs12
->vmcs_link_pointer
== -1ull)
2856 if (CC(!page_address_valid(vcpu
, vmcs12
->vmcs_link_pointer
)))
2859 if (CC(kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->vmcs_link_pointer
), &map
)))
2864 if (CC(shadow
->hdr
.revision_id
!= VMCS12_REVISION
) ||
2865 CC(shadow
->hdr
.shadow_vmcs
!= nested_cpu_has_shadow_vmcs(vmcs12
)))
2868 kvm_vcpu_unmap(vcpu
, &map
, false);
2873 * Checks related to Guest Non-register State
2875 static int nested_check_guest_non_reg_state(struct vmcs12
*vmcs12
)
2877 if (CC(vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_ACTIVE
&&
2878 vmcs12
->guest_activity_state
!= GUEST_ACTIVITY_HLT
))
2884 static int nested_vmx_check_guest_state(struct kvm_vcpu
*vcpu
,
2885 struct vmcs12
*vmcs12
,
2890 *exit_qual
= ENTRY_FAIL_DEFAULT
;
2892 if (CC(!nested_guest_cr0_valid(vcpu
, vmcs12
->guest_cr0
)) ||
2893 CC(!nested_guest_cr4_valid(vcpu
, vmcs12
->guest_cr4
)))
2896 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) &&
2897 CC(!kvm_dr7_valid(vmcs12
->guest_dr7
)))
2900 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PAT
) &&
2901 CC(!kvm_pat_valid(vmcs12
->guest_ia32_pat
)))
2904 if (nested_vmx_check_vmcs_link_ptr(vcpu
, vmcs12
)) {
2905 *exit_qual
= ENTRY_FAIL_VMCS_LINK_PTR
;
2909 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
) &&
2910 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu
),
2911 vmcs12
->guest_ia32_perf_global_ctrl
)))
2915 * If the load IA32_EFER VM-entry control is 1, the following checks
2916 * are performed on the field for the IA32_EFER MSR:
2917 * - Bits reserved in the IA32_EFER MSR must be 0.
2918 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2919 * the IA-32e mode guest VM-exit control. It must also be identical
2920 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2923 if (to_vmx(vcpu
)->nested
.nested_run_pending
&&
2924 (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_IA32_EFER
)) {
2925 ia32e
= (vmcs12
->vm_entry_controls
& VM_ENTRY_IA32E_MODE
) != 0;
2926 if (CC(!kvm_valid_efer(vcpu
, vmcs12
->guest_ia32_efer
)) ||
2927 CC(ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LMA
)) ||
2928 CC(((vmcs12
->guest_cr0
& X86_CR0_PG
) &&
2929 ia32e
!= !!(vmcs12
->guest_ia32_efer
& EFER_LME
))))
2933 if ((vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
) &&
2934 (CC(is_noncanonical_address(vmcs12
->guest_bndcfgs
& PAGE_MASK
, vcpu
)) ||
2935 CC((vmcs12
->guest_bndcfgs
& MSR_IA32_BNDCFGS_RSVD
))))
2938 if (nested_check_guest_non_reg_state(vmcs12
))
2944 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu
*vcpu
)
2946 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2947 unsigned long cr3
, cr4
;
2950 if (!nested_early_check
)
2953 if (vmx
->msr_autoload
.host
.nr
)
2954 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
2955 if (vmx
->msr_autoload
.guest
.nr
)
2956 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
2960 vmx_prepare_switch_to_guest(vcpu
);
2963 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2964 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2965 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
2966 * there is no need to preserve other bits or save/restore the field.
2968 vmcs_writel(GUEST_RFLAGS
, 0);
2970 cr3
= __get_current_cr3_fast();
2971 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
2972 vmcs_writel(HOST_CR3
, cr3
);
2973 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
2976 cr4
= cr4_read_shadow();
2977 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
2978 vmcs_writel(HOST_CR4
, cr4
);
2979 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
2983 "sub $%c[wordsize], %%" _ASM_SP
"\n\t" /* temporarily adjust RSP for CALL */
2984 "cmp %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2986 __ex("vmwrite %%" _ASM_SP
", %[HOST_RSP]") "\n\t"
2987 "mov %%" _ASM_SP
", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
2989 "add $%c[wordsize], %%" _ASM_SP
"\n\t" /* un-adjust RSP */
2991 /* Check if vmlaunch or vmresume is needed */
2992 "cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
2995 * VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
2996 * RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
2997 * Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
2998 * results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
3000 "call vmx_vmenter\n\t"
3003 : ASM_CALL_CONSTRAINT
, CC_OUT(be
) (vm_fail
)
3004 : [HOST_RSP
]"r"((unsigned long)HOST_RSP
),
3005 [loaded_vmcs
]"r"(vmx
->loaded_vmcs
),
3006 [launched
]"i"(offsetof(struct loaded_vmcs
, launched
)),
3007 [host_state_rsp
]"i"(offsetof(struct loaded_vmcs
, host_state
.rsp
)),
3008 [wordsize
]"i"(sizeof(ulong
))
3012 if (vmx
->msr_autoload
.host
.nr
)
3013 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
3014 if (vmx
->msr_autoload
.guest
.nr
)
3015 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
3018 u32 error
= vmcs_read32(VM_INSTRUCTION_ERROR
);
3022 trace_kvm_nested_vmenter_failed(
3023 "early hardware check VM-instruction error: ", error
);
3024 WARN_ON_ONCE(error
!= VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3029 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3032 if (hw_breakpoint_active())
3033 set_debugreg(__this_cpu_read(cpu_dr7
), 7);
3037 * A non-failing VMEntry means we somehow entered guest mode with
3038 * an illegal RIP, and that's just the tip of the iceberg. There
3039 * is no telling what memory has been modified or what state has
3040 * been exposed to unknown code. Hitting this all but guarantees
3041 * a (very critical) hardware issue.
3043 WARN_ON(!(vmcs_read32(VM_EXIT_REASON
) &
3044 VMX_EXIT_REASONS_FAILED_VMENTRY
));
3049 static bool nested_get_vmcs12_pages(struct kvm_vcpu
*vcpu
)
3051 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3052 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3053 struct kvm_host_map
*map
;
3058 * hv_evmcs may end up being not mapped after migration (when
3059 * L2 was running), map it here to make sure vmcs12 changes are
3060 * properly reflected.
3062 if (vmx
->nested
.enlightened_vmcs_enabled
&& !vmx
->nested
.hv_evmcs
) {
3063 enum nested_evmptrld_status evmptrld_status
=
3064 nested_vmx_handle_enlightened_vmptrld(vcpu
, false);
3066 if (evmptrld_status
== EVMPTRLD_VMFAIL
||
3067 evmptrld_status
== EVMPTRLD_ERROR
) {
3068 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3070 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3071 vcpu
->run
->internal
.suberror
=
3072 KVM_INTERNAL_ERROR_EMULATION
;
3073 vcpu
->run
->internal
.ndata
= 0;
3078 if (nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3080 * Translate L1 physical address to host physical
3081 * address for vmcs02. Keep the page pinned, so this
3082 * physical address remains valid. We keep a reference
3083 * to it so we can release it later.
3085 if (vmx
->nested
.apic_access_page
) { /* shouldn't happen */
3086 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
3087 vmx
->nested
.apic_access_page
= NULL
;
3089 page
= kvm_vcpu_gpa_to_page(vcpu
, vmcs12
->apic_access_addr
);
3090 if (!is_error_page(page
)) {
3091 vmx
->nested
.apic_access_page
= page
;
3092 hpa
= page_to_phys(vmx
->nested
.apic_access_page
);
3093 vmcs_write64(APIC_ACCESS_ADDR
, hpa
);
3095 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3097 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
3098 vcpu
->run
->internal
.suberror
=
3099 KVM_INTERNAL_ERROR_EMULATION
;
3100 vcpu
->run
->internal
.ndata
= 0;
3105 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3106 map
= &vmx
->nested
.virtual_apic_map
;
3108 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->virtual_apic_page_addr
), map
)) {
3109 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, pfn_to_hpa(map
->pfn
));
3110 } else if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
) &&
3111 nested_cpu_has(vmcs12
, CPU_BASED_CR8_STORE_EXITING
) &&
3112 !nested_cpu_has2(vmcs12
, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)) {
3114 * The processor will never use the TPR shadow, simply
3115 * clear the bit from the execution control. Such a
3116 * configuration is useless, but it happens in tests.
3117 * For any other configuration, failing the vm entry is
3118 * _not_ what the processor does but it's basically the
3119 * only possibility we have.
3121 exec_controls_clearbit(vmx
, CPU_BASED_TPR_SHADOW
);
3124 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3125 * force VM-Entry to fail.
3127 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, -1ull);
3131 if (nested_cpu_has_posted_intr(vmcs12
)) {
3132 map
= &vmx
->nested
.pi_desc_map
;
3134 if (!kvm_vcpu_map(vcpu
, gpa_to_gfn(vmcs12
->posted_intr_desc_addr
), map
)) {
3135 vmx
->nested
.pi_desc
=
3136 (struct pi_desc
*)(((void *)map
->hva
) +
3137 offset_in_page(vmcs12
->posted_intr_desc_addr
));
3138 vmcs_write64(POSTED_INTR_DESC_ADDR
,
3139 pfn_to_hpa(map
->pfn
) + offset_in_page(vmcs12
->posted_intr_desc_addr
));
3142 if (nested_vmx_prepare_msr_bitmap(vcpu
, vmcs12
))
3143 exec_controls_setbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3145 exec_controls_clearbit(vmx
, CPU_BASED_USE_MSR_BITMAPS
);
3150 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3151 * for running VMX instructions (except VMXON, whose prerequisites are
3152 * slightly different). It also specifies what exception to inject otherwise.
3153 * Note that many of these exceptions have priority over VM exits, so they
3154 * don't have to be checked again here.
3156 static int nested_vmx_check_permission(struct kvm_vcpu
*vcpu
)
3158 if (!to_vmx(vcpu
)->nested
.vmxon
) {
3159 kvm_queue_exception(vcpu
, UD_VECTOR
);
3163 if (vmx_get_cpl(vcpu
)) {
3164 kvm_inject_gp(vcpu
, 0);
3171 static u8
vmx_has_apicv_interrupt(struct kvm_vcpu
*vcpu
)
3173 u8 rvi
= vmx_get_rvi();
3174 u8 vppr
= kvm_lapic_get_reg(vcpu
->arch
.apic
, APIC_PROCPRI
);
3176 return ((rvi
& 0xf0) > (vppr
& 0xf0));
3179 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
3180 struct vmcs12
*vmcs12
);
3183 * If from_vmentry is false, this is being called from state restore (either RSM
3184 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3187 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3188 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3189 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3190 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3192 enum nvmx_vmentry_status
nested_vmx_enter_non_root_mode(struct kvm_vcpu
*vcpu
,
3195 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3196 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3197 bool evaluate_pending_interrupts
;
3198 u32 exit_reason
= EXIT_REASON_INVALID_STATE
;
3201 evaluate_pending_interrupts
= exec_controls_get(vmx
) &
3202 (CPU_BASED_INTR_WINDOW_EXITING
| CPU_BASED_NMI_WINDOW_EXITING
);
3203 if (likely(!evaluate_pending_interrupts
) && kvm_vcpu_apicv_active(vcpu
))
3204 evaluate_pending_interrupts
|= vmx_has_apicv_interrupt(vcpu
);
3206 if (!(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
))
3207 vmx
->nested
.vmcs01_debugctl
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
3208 if (kvm_mpx_supported() &&
3209 !(vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_BNDCFGS
))
3210 vmx
->nested
.vmcs01_guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
3213 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3214 * nested early checks are disabled. In the event of a "late" VM-Fail,
3215 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3216 * software model to the pre-VMEntry host state. When EPT is disabled,
3217 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3218 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3219 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3220 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3221 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3222 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3223 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3224 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3225 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3226 * path would need to manually save/restore vmcs01.GUEST_CR3.
3228 if (!enable_ept
&& !nested_early_check
)
3229 vmcs_writel(GUEST_CR3
, vcpu
->arch
.cr3
);
3231 vmx_switch_vmcs(vcpu
, &vmx
->nested
.vmcs02
);
3233 prepare_vmcs02_early(vmx
, vmcs12
);
3236 if (unlikely(!nested_get_vmcs12_pages(vcpu
)))
3237 return NVMX_VMENTRY_KVM_INTERNAL_ERROR
;
3239 if (nested_vmx_check_vmentry_hw(vcpu
)) {
3240 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3241 return NVMX_VMENTRY_VMFAIL
;
3244 if (nested_vmx_check_guest_state(vcpu
, vmcs12
, &exit_qual
))
3245 goto vmentry_fail_vmexit
;
3248 enter_guest_mode(vcpu
);
3249 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
3250 vcpu
->arch
.tsc_offset
+= vmcs12
->tsc_offset
;
3252 if (prepare_vmcs02(vcpu
, vmcs12
, &exit_qual
))
3253 goto vmentry_fail_vmexit_guest_mode
;
3256 exit_reason
= EXIT_REASON_MSR_LOAD_FAIL
;
3257 exit_qual
= nested_vmx_load_msr(vcpu
,
3258 vmcs12
->vm_entry_msr_load_addr
,
3259 vmcs12
->vm_entry_msr_load_count
);
3261 goto vmentry_fail_vmexit_guest_mode
;
3264 * The MMU is not initialized to point at the right entities yet and
3265 * "get pages" would need to read data from the guest (i.e. we will
3266 * need to perform gpa to hpa translation). Request a call
3267 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3268 * have already been set at vmentry time and should not be reset.
3270 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES
, vcpu
);
3274 * If L1 had a pending IRQ/NMI until it executed
3275 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3276 * disallowed (e.g. interrupts disabled), L0 needs to
3277 * evaluate if this pending event should cause an exit from L2
3278 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3279 * intercept EXTERNAL_INTERRUPT).
3281 * Usually this would be handled by the processor noticing an
3282 * IRQ/NMI window request, or checking RVI during evaluation of
3283 * pending virtual interrupts. However, this setting was done
3284 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3285 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3287 if (unlikely(evaluate_pending_interrupts
))
3288 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
3291 * Do not start the preemption timer hrtimer until after we know
3292 * we are successful, so that only nested_vmx_vmexit needs to cancel
3295 vmx
->nested
.preemption_timer_expired
= false;
3296 if (nested_cpu_has_preemption_timer(vmcs12
))
3297 vmx_start_preemption_timer(vcpu
);
3300 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3301 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3302 * returned as far as L1 is concerned. It will only return (and set
3303 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3305 return NVMX_VMENTRY_SUCCESS
;
3308 * A failed consistency check that leads to a VMExit during L1's
3309 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3310 * 26.7 "VM-entry failures during or after loading guest state".
3312 vmentry_fail_vmexit_guest_mode
:
3313 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
3314 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
3315 leave_guest_mode(vcpu
);
3317 vmentry_fail_vmexit
:
3318 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
3321 return NVMX_VMENTRY_VMEXIT
;
3323 load_vmcs12_host_state(vcpu
, vmcs12
);
3324 vmcs12
->vm_exit_reason
= exit_reason
| VMX_EXIT_REASONS_FAILED_VMENTRY
;
3325 vmcs12
->exit_qualification
= exit_qual
;
3326 if (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
)
3327 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
3328 return NVMX_VMENTRY_VMEXIT
;
3332 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3333 * for running an L2 nested guest.
3335 static int nested_vmx_run(struct kvm_vcpu
*vcpu
, bool launch
)
3337 struct vmcs12
*vmcs12
;
3338 enum nvmx_vmentry_status status
;
3339 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3340 u32 interrupt_shadow
= vmx_get_interrupt_shadow(vcpu
);
3341 enum nested_evmptrld_status evmptrld_status
;
3343 if (!nested_vmx_check_permission(vcpu
))
3346 evmptrld_status
= nested_vmx_handle_enlightened_vmptrld(vcpu
, launch
);
3347 if (evmptrld_status
== EVMPTRLD_ERROR
) {
3348 kvm_queue_exception(vcpu
, UD_VECTOR
);
3350 } else if (evmptrld_status
== EVMPTRLD_VMFAIL
) {
3351 return nested_vmx_failInvalid(vcpu
);
3354 if (!vmx
->nested
.hv_evmcs
&& vmx
->nested
.current_vmptr
== -1ull)
3355 return nested_vmx_failInvalid(vcpu
);
3357 vmcs12
= get_vmcs12(vcpu
);
3360 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3361 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3362 * rather than RFLAGS.ZF, and no error number is stored to the
3363 * VM-instruction error field.
3365 if (vmcs12
->hdr
.shadow_vmcs
)
3366 return nested_vmx_failInvalid(vcpu
);
3368 if (vmx
->nested
.hv_evmcs
) {
3369 copy_enlightened_to_vmcs12(vmx
);
3370 /* Enlightened VMCS doesn't have launch state */
3371 vmcs12
->launch_state
= !launch
;
3372 } else if (enable_shadow_vmcs
) {
3373 copy_shadow_to_vmcs12(vmx
);
3377 * The nested entry process starts with enforcing various prerequisites
3378 * on vmcs12 as required by the Intel SDM, and act appropriately when
3379 * they fail: As the SDM explains, some conditions should cause the
3380 * instruction to fail, while others will cause the instruction to seem
3381 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3382 * To speed up the normal (success) code path, we should avoid checking
3383 * for misconfigurations which will anyway be caught by the processor
3384 * when using the merged vmcs02.
3386 if (interrupt_shadow
& KVM_X86_SHADOW_INT_MOV_SS
)
3387 return nested_vmx_failValid(vcpu
,
3388 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS
);
3390 if (vmcs12
->launch_state
== launch
)
3391 return nested_vmx_failValid(vcpu
,
3392 launch
? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3393 : VMXERR_VMRESUME_NONLAUNCHED_VMCS
);
3395 if (nested_vmx_check_controls(vcpu
, vmcs12
))
3396 return nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3398 if (nested_vmx_check_host_state(vcpu
, vmcs12
))
3399 return nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD
);
3402 * We're finally done with prerequisite checking, and can start with
3405 vmx
->nested
.nested_run_pending
= 1;
3406 status
= nested_vmx_enter_non_root_mode(vcpu
, true);
3407 if (unlikely(status
!= NVMX_VMENTRY_SUCCESS
))
3408 goto vmentry_failed
;
3410 /* Hide L1D cache contents from the nested guest. */
3411 vmx
->vcpu
.arch
.l1tf_flush_l1d
= true;
3414 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3415 * also be used as part of restoring nVMX state for
3416 * snapshot restore (migration).
3418 * In this flow, it is assumed that vmcs12 cache was
3419 * trasferred as part of captured nVMX state and should
3420 * therefore not be read from guest memory (which may not
3421 * exist on destination host yet).
3423 nested_cache_shadow_vmcs12(vcpu
, vmcs12
);
3426 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3427 * awakened by event injection or by an NMI-window VM-exit or
3428 * by an interrupt-window VM-exit, halt the vcpu.
3430 if ((vmcs12
->guest_activity_state
== GUEST_ACTIVITY_HLT
) &&
3431 !(vmcs12
->vm_entry_intr_info_field
& INTR_INFO_VALID_MASK
) &&
3432 !(vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_NMI_WINDOW_EXITING
) &&
3433 !((vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_INTR_WINDOW_EXITING
) &&
3434 (vmcs12
->guest_rflags
& X86_EFLAGS_IF
))) {
3435 vmx
->nested
.nested_run_pending
= 0;
3436 return kvm_vcpu_halt(vcpu
);
3441 vmx
->nested
.nested_run_pending
= 0;
3442 if (status
== NVMX_VMENTRY_KVM_INTERNAL_ERROR
)
3444 if (status
== NVMX_VMENTRY_VMEXIT
)
3446 WARN_ON_ONCE(status
!= NVMX_VMENTRY_VMFAIL
);
3447 return nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
3451 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3452 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3453 * This function returns the new value we should put in vmcs12.guest_cr0.
3454 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3455 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3456 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3457 * didn't trap the bit, because if L1 did, so would L0).
3458 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3459 * been modified by L2, and L1 knows it. So just leave the old value of
3460 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3461 * isn't relevant, because if L0 traps this bit it can set it to anything.
3462 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3463 * changed these bits, and therefore they need to be updated, but L0
3464 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3465 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3467 static inline unsigned long
3468 vmcs12_guest_cr0(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3471 /*1*/ (vmcs_readl(GUEST_CR0
) & vcpu
->arch
.cr0_guest_owned_bits
) |
3472 /*2*/ (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
) |
3473 /*3*/ (vmcs_readl(CR0_READ_SHADOW
) & ~(vmcs12
->cr0_guest_host_mask
|
3474 vcpu
->arch
.cr0_guest_owned_bits
));
3477 static inline unsigned long
3478 vmcs12_guest_cr4(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3481 /*1*/ (vmcs_readl(GUEST_CR4
) & vcpu
->arch
.cr4_guest_owned_bits
) |
3482 /*2*/ (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
) |
3483 /*3*/ (vmcs_readl(CR4_READ_SHADOW
) & ~(vmcs12
->cr4_guest_host_mask
|
3484 vcpu
->arch
.cr4_guest_owned_bits
));
3487 static void vmcs12_save_pending_event(struct kvm_vcpu
*vcpu
,
3488 struct vmcs12
*vmcs12
)
3493 if (vcpu
->arch
.exception
.injected
) {
3494 nr
= vcpu
->arch
.exception
.nr
;
3495 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3497 if (kvm_exception_is_soft(nr
)) {
3498 vmcs12
->vm_exit_instruction_len
=
3499 vcpu
->arch
.event_exit_inst_len
;
3500 idt_vectoring
|= INTR_TYPE_SOFT_EXCEPTION
;
3502 idt_vectoring
|= INTR_TYPE_HARD_EXCEPTION
;
3504 if (vcpu
->arch
.exception
.has_error_code
) {
3505 idt_vectoring
|= VECTORING_INFO_DELIVER_CODE_MASK
;
3506 vmcs12
->idt_vectoring_error_code
=
3507 vcpu
->arch
.exception
.error_code
;
3510 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3511 } else if (vcpu
->arch
.nmi_injected
) {
3512 vmcs12
->idt_vectoring_info_field
=
3513 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
;
3514 } else if (vcpu
->arch
.interrupt
.injected
) {
3515 nr
= vcpu
->arch
.interrupt
.nr
;
3516 idt_vectoring
= nr
| VECTORING_INFO_VALID_MASK
;
3518 if (vcpu
->arch
.interrupt
.soft
) {
3519 idt_vectoring
|= INTR_TYPE_SOFT_INTR
;
3520 vmcs12
->vm_entry_instruction_len
=
3521 vcpu
->arch
.event_exit_inst_len
;
3523 idt_vectoring
|= INTR_TYPE_EXT_INTR
;
3525 vmcs12
->idt_vectoring_info_field
= idt_vectoring
;
3530 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu
*vcpu
)
3532 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3536 * Don't need to mark the APIC access page dirty; it is never
3537 * written to by the CPU during APIC virtualization.
3540 if (nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
)) {
3541 gfn
= vmcs12
->virtual_apic_page_addr
>> PAGE_SHIFT
;
3542 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3545 if (nested_cpu_has_posted_intr(vmcs12
)) {
3546 gfn
= vmcs12
->posted_intr_desc_addr
>> PAGE_SHIFT
;
3547 kvm_vcpu_mark_page_dirty(vcpu
, gfn
);
3551 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu
*vcpu
)
3553 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3558 if (!vmx
->nested
.pi_desc
|| !vmx
->nested
.pi_pending
)
3561 vmx
->nested
.pi_pending
= false;
3562 if (!pi_test_and_clear_on(vmx
->nested
.pi_desc
))
3565 max_irr
= find_last_bit((unsigned long *)vmx
->nested
.pi_desc
->pir
, 256);
3566 if (max_irr
!= 256) {
3567 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
3571 __kvm_apic_update_irr(vmx
->nested
.pi_desc
->pir
,
3572 vapic_page
, &max_irr
);
3573 status
= vmcs_read16(GUEST_INTR_STATUS
);
3574 if ((u8
)max_irr
> ((u8
)status
& 0xff)) {
3576 status
|= (u8
)max_irr
;
3577 vmcs_write16(GUEST_INTR_STATUS
, status
);
3581 nested_mark_vmcs12_pages_dirty(vcpu
);
3584 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu
*vcpu
,
3585 unsigned long exit_qual
)
3587 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
3588 unsigned int nr
= vcpu
->arch
.exception
.nr
;
3589 u32 intr_info
= nr
| INTR_INFO_VALID_MASK
;
3591 if (vcpu
->arch
.exception
.has_error_code
) {
3592 vmcs12
->vm_exit_intr_error_code
= vcpu
->arch
.exception
.error_code
;
3593 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
3596 if (kvm_exception_is_soft(nr
))
3597 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
3599 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
3601 if (!(vmcs12
->idt_vectoring_info_field
& VECTORING_INFO_VALID_MASK
) &&
3602 vmx_get_nmi_mask(vcpu
))
3603 intr_info
|= INTR_INFO_UNBLOCK_NMI
;
3605 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
, intr_info
, exit_qual
);
3609 * Returns true if a debug trap is pending delivery.
3611 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3612 * exception may be inferred from the presence of an exception payload.
3614 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu
*vcpu
)
3616 return vcpu
->arch
.exception
.pending
&&
3617 vcpu
->arch
.exception
.nr
== DB_VECTOR
&&
3618 vcpu
->arch
.exception
.payload
;
3622 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3623 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3624 * represents these debug traps with a payload that is said to be compatible
3625 * with the 'pending debug exceptions' field, write the payload to the VMCS
3626 * field if a VM-exit is delivered before the debug trap.
3628 static void nested_vmx_update_pending_dbg(struct kvm_vcpu
*vcpu
)
3630 if (vmx_pending_dbg_trap(vcpu
))
3631 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
3632 vcpu
->arch
.exception
.payload
);
3635 static int vmx_check_nested_events(struct kvm_vcpu
*vcpu
)
3637 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3638 unsigned long exit_qual
;
3639 bool block_nested_events
=
3640 vmx
->nested
.nested_run_pending
|| kvm_event_needs_reinjection(vcpu
);
3641 bool mtf_pending
= vmx
->nested
.mtf_pending
;
3642 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
3645 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3646 * this state is discarded.
3648 if (!block_nested_events
)
3649 vmx
->nested
.mtf_pending
= false;
3651 if (lapic_in_kernel(vcpu
) &&
3652 test_bit(KVM_APIC_INIT
, &apic
->pending_events
)) {
3653 if (block_nested_events
)
3655 nested_vmx_update_pending_dbg(vcpu
);
3656 clear_bit(KVM_APIC_INIT
, &apic
->pending_events
);
3657 nested_vmx_vmexit(vcpu
, EXIT_REASON_INIT_SIGNAL
, 0, 0);
3662 * Process any exceptions that are not debug traps before MTF.
3664 if (vcpu
->arch
.exception
.pending
&&
3665 !vmx_pending_dbg_trap(vcpu
) &&
3666 nested_vmx_check_exception(vcpu
, &exit_qual
)) {
3667 if (block_nested_events
)
3669 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3674 if (block_nested_events
)
3676 nested_vmx_update_pending_dbg(vcpu
);
3677 nested_vmx_vmexit(vcpu
, EXIT_REASON_MONITOR_TRAP_FLAG
, 0, 0);
3681 if (vcpu
->arch
.exception
.pending
&&
3682 nested_vmx_check_exception(vcpu
, &exit_qual
)) {
3683 if (block_nested_events
)
3685 nested_vmx_inject_exception_vmexit(vcpu
, exit_qual
);
3689 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu
)) &&
3690 vmx
->nested
.preemption_timer_expired
) {
3691 if (block_nested_events
)
3693 nested_vmx_vmexit(vcpu
, EXIT_REASON_PREEMPTION_TIMER
, 0, 0);
3697 if (vcpu
->arch
.nmi_pending
&& nested_exit_on_nmi(vcpu
)) {
3698 if (block_nested_events
)
3700 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXCEPTION_NMI
,
3701 NMI_VECTOR
| INTR_TYPE_NMI_INTR
|
3702 INTR_INFO_VALID_MASK
, 0);
3704 * The NMI-triggered VM exit counts as injection:
3705 * clear this one and block further NMIs.
3707 vcpu
->arch
.nmi_pending
= 0;
3708 vmx_set_nmi_mask(vcpu
, true);
3712 if (kvm_cpu_has_interrupt(vcpu
) && nested_exit_on_intr(vcpu
)) {
3713 if (block_nested_events
)
3715 nested_vmx_vmexit(vcpu
, EXIT_REASON_EXTERNAL_INTERRUPT
, 0, 0);
3719 vmx_complete_nested_posted_interrupt(vcpu
);
3723 static u32
vmx_get_preemption_timer_value(struct kvm_vcpu
*vcpu
)
3726 hrtimer_get_remaining(&to_vmx(vcpu
)->nested
.preemption_timer
);
3729 if (ktime_to_ns(remaining
) <= 0)
3732 value
= ktime_to_ns(remaining
) * vcpu
->arch
.virtual_tsc_khz
;
3733 do_div(value
, 1000000);
3734 return value
>> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
;
3737 static bool is_vmcs12_ext_field(unsigned long field
)
3740 case GUEST_ES_SELECTOR
:
3741 case GUEST_CS_SELECTOR
:
3742 case GUEST_SS_SELECTOR
:
3743 case GUEST_DS_SELECTOR
:
3744 case GUEST_FS_SELECTOR
:
3745 case GUEST_GS_SELECTOR
:
3746 case GUEST_LDTR_SELECTOR
:
3747 case GUEST_TR_SELECTOR
:
3748 case GUEST_ES_LIMIT
:
3749 case GUEST_CS_LIMIT
:
3750 case GUEST_SS_LIMIT
:
3751 case GUEST_DS_LIMIT
:
3752 case GUEST_FS_LIMIT
:
3753 case GUEST_GS_LIMIT
:
3754 case GUEST_LDTR_LIMIT
:
3755 case GUEST_TR_LIMIT
:
3756 case GUEST_GDTR_LIMIT
:
3757 case GUEST_IDTR_LIMIT
:
3758 case GUEST_ES_AR_BYTES
:
3759 case GUEST_DS_AR_BYTES
:
3760 case GUEST_FS_AR_BYTES
:
3761 case GUEST_GS_AR_BYTES
:
3762 case GUEST_LDTR_AR_BYTES
:
3763 case GUEST_TR_AR_BYTES
:
3770 case GUEST_LDTR_BASE
:
3772 case GUEST_GDTR_BASE
:
3773 case GUEST_IDTR_BASE
:
3774 case GUEST_PENDING_DBG_EXCEPTIONS
:
3784 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
3785 struct vmcs12
*vmcs12
)
3787 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3789 vmcs12
->guest_es_selector
= vmcs_read16(GUEST_ES_SELECTOR
);
3790 vmcs12
->guest_cs_selector
= vmcs_read16(GUEST_CS_SELECTOR
);
3791 vmcs12
->guest_ss_selector
= vmcs_read16(GUEST_SS_SELECTOR
);
3792 vmcs12
->guest_ds_selector
= vmcs_read16(GUEST_DS_SELECTOR
);
3793 vmcs12
->guest_fs_selector
= vmcs_read16(GUEST_FS_SELECTOR
);
3794 vmcs12
->guest_gs_selector
= vmcs_read16(GUEST_GS_SELECTOR
);
3795 vmcs12
->guest_ldtr_selector
= vmcs_read16(GUEST_LDTR_SELECTOR
);
3796 vmcs12
->guest_tr_selector
= vmcs_read16(GUEST_TR_SELECTOR
);
3797 vmcs12
->guest_es_limit
= vmcs_read32(GUEST_ES_LIMIT
);
3798 vmcs12
->guest_cs_limit
= vmcs_read32(GUEST_CS_LIMIT
);
3799 vmcs12
->guest_ss_limit
= vmcs_read32(GUEST_SS_LIMIT
);
3800 vmcs12
->guest_ds_limit
= vmcs_read32(GUEST_DS_LIMIT
);
3801 vmcs12
->guest_fs_limit
= vmcs_read32(GUEST_FS_LIMIT
);
3802 vmcs12
->guest_gs_limit
= vmcs_read32(GUEST_GS_LIMIT
);
3803 vmcs12
->guest_ldtr_limit
= vmcs_read32(GUEST_LDTR_LIMIT
);
3804 vmcs12
->guest_tr_limit
= vmcs_read32(GUEST_TR_LIMIT
);
3805 vmcs12
->guest_gdtr_limit
= vmcs_read32(GUEST_GDTR_LIMIT
);
3806 vmcs12
->guest_idtr_limit
= vmcs_read32(GUEST_IDTR_LIMIT
);
3807 vmcs12
->guest_es_ar_bytes
= vmcs_read32(GUEST_ES_AR_BYTES
);
3808 vmcs12
->guest_ds_ar_bytes
= vmcs_read32(GUEST_DS_AR_BYTES
);
3809 vmcs12
->guest_fs_ar_bytes
= vmcs_read32(GUEST_FS_AR_BYTES
);
3810 vmcs12
->guest_gs_ar_bytes
= vmcs_read32(GUEST_GS_AR_BYTES
);
3811 vmcs12
->guest_ldtr_ar_bytes
= vmcs_read32(GUEST_LDTR_AR_BYTES
);
3812 vmcs12
->guest_tr_ar_bytes
= vmcs_read32(GUEST_TR_AR_BYTES
);
3813 vmcs12
->guest_es_base
= vmcs_readl(GUEST_ES_BASE
);
3814 vmcs12
->guest_cs_base
= vmcs_readl(GUEST_CS_BASE
);
3815 vmcs12
->guest_ss_base
= vmcs_readl(GUEST_SS_BASE
);
3816 vmcs12
->guest_ds_base
= vmcs_readl(GUEST_DS_BASE
);
3817 vmcs12
->guest_fs_base
= vmcs_readl(GUEST_FS_BASE
);
3818 vmcs12
->guest_gs_base
= vmcs_readl(GUEST_GS_BASE
);
3819 vmcs12
->guest_ldtr_base
= vmcs_readl(GUEST_LDTR_BASE
);
3820 vmcs12
->guest_tr_base
= vmcs_readl(GUEST_TR_BASE
);
3821 vmcs12
->guest_gdtr_base
= vmcs_readl(GUEST_GDTR_BASE
);
3822 vmcs12
->guest_idtr_base
= vmcs_readl(GUEST_IDTR_BASE
);
3823 vmcs12
->guest_pending_dbg_exceptions
=
3824 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
);
3825 if (kvm_mpx_supported())
3826 vmcs12
->guest_bndcfgs
= vmcs_read64(GUEST_BNDCFGS
);
3828 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
= false;
3831 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu
*vcpu
,
3832 struct vmcs12
*vmcs12
)
3834 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3837 if (!vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
)
3841 WARN_ON_ONCE(vmx
->loaded_vmcs
!= &vmx
->vmcs01
);
3844 vmx
->loaded_vmcs
= &vmx
->nested
.vmcs02
;
3845 vmx_vcpu_load(&vmx
->vcpu
, cpu
);
3847 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
3849 vmx
->loaded_vmcs
= &vmx
->vmcs01
;
3850 vmx_vcpu_load(&vmx
->vcpu
, cpu
);
3855 * Update the guest state fields of vmcs12 to reflect changes that
3856 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3857 * VM-entry controls is also updated, since this is really a guest
3860 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
)
3862 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3864 if (vmx
->nested
.hv_evmcs
)
3865 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
3867 vmx
->nested
.need_sync_vmcs02_to_vmcs12_rare
= !vmx
->nested
.hv_evmcs
;
3869 vmcs12
->guest_cr0
= vmcs12_guest_cr0(vcpu
, vmcs12
);
3870 vmcs12
->guest_cr4
= vmcs12_guest_cr4(vcpu
, vmcs12
);
3872 vmcs12
->guest_rsp
= kvm_rsp_read(vcpu
);
3873 vmcs12
->guest_rip
= kvm_rip_read(vcpu
);
3874 vmcs12
->guest_rflags
= vmcs_readl(GUEST_RFLAGS
);
3876 vmcs12
->guest_cs_ar_bytes
= vmcs_read32(GUEST_CS_AR_BYTES
);
3877 vmcs12
->guest_ss_ar_bytes
= vmcs_read32(GUEST_SS_AR_BYTES
);
3879 vmcs12
->guest_sysenter_cs
= vmcs_read32(GUEST_SYSENTER_CS
);
3880 vmcs12
->guest_sysenter_esp
= vmcs_readl(GUEST_SYSENTER_ESP
);
3881 vmcs12
->guest_sysenter_eip
= vmcs_readl(GUEST_SYSENTER_EIP
);
3883 vmcs12
->guest_interruptibility_info
=
3884 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
3886 if (vcpu
->arch
.mp_state
== KVM_MP_STATE_HALTED
)
3887 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_HLT
;
3889 vmcs12
->guest_activity_state
= GUEST_ACTIVITY_ACTIVE
;
3891 if (nested_cpu_has_preemption_timer(vmcs12
) &&
3892 vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
)
3893 vmcs12
->vmx_preemption_timer_value
=
3894 vmx_get_preemption_timer_value(vcpu
);
3897 * In some cases (usually, nested EPT), L2 is allowed to change its
3898 * own CR3 without exiting. If it has changed it, we must keep it.
3899 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3900 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3902 * Additionally, restore L2's PDPTR to vmcs12.
3905 vmcs12
->guest_cr3
= vmcs_readl(GUEST_CR3
);
3906 if (nested_cpu_has_ept(vmcs12
) && is_pae_paging(vcpu
)) {
3907 vmcs12
->guest_pdptr0
= vmcs_read64(GUEST_PDPTR0
);
3908 vmcs12
->guest_pdptr1
= vmcs_read64(GUEST_PDPTR1
);
3909 vmcs12
->guest_pdptr2
= vmcs_read64(GUEST_PDPTR2
);
3910 vmcs12
->guest_pdptr3
= vmcs_read64(GUEST_PDPTR3
);
3914 vmcs12
->guest_linear_address
= vmcs_readl(GUEST_LINEAR_ADDRESS
);
3916 if (nested_cpu_has_vid(vmcs12
))
3917 vmcs12
->guest_intr_status
= vmcs_read16(GUEST_INTR_STATUS
);
3919 vmcs12
->vm_entry_controls
=
3920 (vmcs12
->vm_entry_controls
& ~VM_ENTRY_IA32E_MODE
) |
3921 (vm_entry_controls_get(to_vmx(vcpu
)) & VM_ENTRY_IA32E_MODE
);
3923 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_DEBUG_CONTROLS
)
3924 kvm_get_dr(vcpu
, 7, (unsigned long *)&vmcs12
->guest_dr7
);
3926 if (vmcs12
->vm_exit_controls
& VM_EXIT_SAVE_IA32_EFER
)
3927 vmcs12
->guest_ia32_efer
= vcpu
->arch
.efer
;
3931 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3932 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3933 * and this function updates it to reflect the changes to the guest state while
3934 * L2 was running (and perhaps made some exits which were handled directly by L0
3935 * without going back to L1), and to reflect the exit reason.
3936 * Note that we do not have to copy here all VMCS fields, just those that
3937 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3938 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3939 * which already writes to vmcs12 directly.
3941 static void prepare_vmcs12(struct kvm_vcpu
*vcpu
, struct vmcs12
*vmcs12
,
3942 u32 exit_reason
, u32 exit_intr_info
,
3943 unsigned long exit_qualification
)
3945 /* update exit information fields: */
3946 vmcs12
->vm_exit_reason
= exit_reason
;
3947 vmcs12
->exit_qualification
= exit_qualification
;
3948 vmcs12
->vm_exit_intr_info
= exit_intr_info
;
3950 vmcs12
->idt_vectoring_info_field
= 0;
3951 vmcs12
->vm_exit_instruction_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
3952 vmcs12
->vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
3954 if (!(vmcs12
->vm_exit_reason
& VMX_EXIT_REASONS_FAILED_VMENTRY
)) {
3955 vmcs12
->launch_state
= 1;
3957 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3958 * instead of reading the real value. */
3959 vmcs12
->vm_entry_intr_info_field
&= ~INTR_INFO_VALID_MASK
;
3962 * Transfer the event that L0 or L1 may wanted to inject into
3963 * L2 to IDT_VECTORING_INFO_FIELD.
3965 vmcs12_save_pending_event(vcpu
, vmcs12
);
3968 * According to spec, there's no need to store the guest's
3969 * MSRs if the exit is due to a VM-entry failure that occurs
3970 * during or after loading the guest state. Since this exit
3971 * does not fall in that category, we need to save the MSRs.
3973 if (nested_vmx_store_msr(vcpu
,
3974 vmcs12
->vm_exit_msr_store_addr
,
3975 vmcs12
->vm_exit_msr_store_count
))
3976 nested_vmx_abort(vcpu
,
3977 VMX_ABORT_SAVE_GUEST_MSR_FAIL
);
3981 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3982 * preserved above and would only end up incorrectly in L1.
3984 vcpu
->arch
.nmi_injected
= false;
3985 kvm_clear_exception_queue(vcpu
);
3986 kvm_clear_interrupt_queue(vcpu
);
3990 * A part of what we need to when the nested L2 guest exits and we want to
3991 * run its L1 parent, is to reset L1's guest state to the host state specified
3993 * This function is to be called not only on normal nested exit, but also on
3994 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3995 * Failures During or After Loading Guest State").
3996 * This function should be called when the active VMCS is L1's (vmcs01).
3998 static void load_vmcs12_host_state(struct kvm_vcpu
*vcpu
,
3999 struct vmcs12
*vmcs12
)
4001 struct kvm_segment seg
;
4002 u32 entry_failure_code
;
4004 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_EFER
)
4005 vcpu
->arch
.efer
= vmcs12
->host_ia32_efer
;
4006 else if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4007 vcpu
->arch
.efer
|= (EFER_LMA
| EFER_LME
);
4009 vcpu
->arch
.efer
&= ~(EFER_LMA
| EFER_LME
);
4010 vmx_set_efer(vcpu
, vcpu
->arch
.efer
);
4012 kvm_rsp_write(vcpu
, vmcs12
->host_rsp
);
4013 kvm_rip_write(vcpu
, vmcs12
->host_rip
);
4014 vmx_set_rflags(vcpu
, X86_EFLAGS_FIXED
);
4015 vmx_set_interrupt_shadow(vcpu
, 0);
4018 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4019 * actually changed, because vmx_set_cr0 refers to efer set above.
4021 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4022 * (KVM doesn't change it);
4024 vcpu
->arch
.cr0_guest_owned_bits
= X86_CR0_TS
;
4025 vmx_set_cr0(vcpu
, vmcs12
->host_cr0
);
4027 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4028 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4029 vmx_set_cr4(vcpu
, vmcs12
->host_cr4
);
4031 nested_ept_uninit_mmu_context(vcpu
);
4034 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4035 * couldn't have changed.
4037 if (nested_vmx_load_cr3(vcpu
, vmcs12
->host_cr3
, false, &entry_failure_code
))
4038 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_PDPTE_FAIL
);
4041 vcpu
->arch
.walk_mmu
->inject_page_fault
= kvm_inject_page_fault
;
4044 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
4045 * VMEntry/VMExit. Thus, no need to flush TLB.
4047 * If vmcs12 doesn't use VPID, L1 expects TLB to be
4048 * flushed on every VMEntry/VMExit.
4050 * Otherwise, we can preserve TLB entries as long as we are
4051 * able to tag L1 TLB entries differently than L2 TLB entries.
4053 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
4054 * and therefore we request the TLB flush to happen only after VMCS EPTP
4055 * has been set by KVM_REQ_LOAD_MMU_PGD.
4058 (!nested_cpu_has_vpid(vmcs12
) || !nested_has_guest_tlb_tag(vcpu
))) {
4059 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
4062 vmcs_write32(GUEST_SYSENTER_CS
, vmcs12
->host_ia32_sysenter_cs
);
4063 vmcs_writel(GUEST_SYSENTER_ESP
, vmcs12
->host_ia32_sysenter_esp
);
4064 vmcs_writel(GUEST_SYSENTER_EIP
, vmcs12
->host_ia32_sysenter_eip
);
4065 vmcs_writel(GUEST_IDTR_BASE
, vmcs12
->host_idtr_base
);
4066 vmcs_writel(GUEST_GDTR_BASE
, vmcs12
->host_gdtr_base
);
4067 vmcs_write32(GUEST_IDTR_LIMIT
, 0xFFFF);
4068 vmcs_write32(GUEST_GDTR_LIMIT
, 0xFFFF);
4070 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4071 if (vmcs12
->vm_exit_controls
& VM_EXIT_CLEAR_BNDCFGS
)
4072 vmcs_write64(GUEST_BNDCFGS
, 0);
4074 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PAT
) {
4075 vmcs_write64(GUEST_IA32_PAT
, vmcs12
->host_ia32_pat
);
4076 vcpu
->arch
.pat
= vmcs12
->host_ia32_pat
;
4078 if (vmcs12
->vm_exit_controls
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
4079 WARN_ON_ONCE(kvm_set_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
,
4080 vmcs12
->host_ia32_perf_global_ctrl
));
4082 /* Set L1 segment info according to Intel SDM
4083 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4084 seg
= (struct kvm_segment
) {
4086 .limit
= 0xFFFFFFFF,
4087 .selector
= vmcs12
->host_cs_selector
,
4093 if (vmcs12
->vm_exit_controls
& VM_EXIT_HOST_ADDR_SPACE_SIZE
)
4097 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_CS
);
4098 seg
= (struct kvm_segment
) {
4100 .limit
= 0xFFFFFFFF,
4107 seg
.selector
= vmcs12
->host_ds_selector
;
4108 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_DS
);
4109 seg
.selector
= vmcs12
->host_es_selector
;
4110 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_ES
);
4111 seg
.selector
= vmcs12
->host_ss_selector
;
4112 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_SS
);
4113 seg
.selector
= vmcs12
->host_fs_selector
;
4114 seg
.base
= vmcs12
->host_fs_base
;
4115 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_FS
);
4116 seg
.selector
= vmcs12
->host_gs_selector
;
4117 seg
.base
= vmcs12
->host_gs_base
;
4118 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_GS
);
4119 seg
= (struct kvm_segment
) {
4120 .base
= vmcs12
->host_tr_base
,
4122 .selector
= vmcs12
->host_tr_selector
,
4126 vmx_set_segment(vcpu
, &seg
, VCPU_SREG_TR
);
4128 kvm_set_dr(vcpu
, 7, 0x400);
4129 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
4131 if (cpu_has_vmx_msr_bitmap())
4132 vmx_update_msr_bitmap(vcpu
);
4134 if (nested_vmx_load_msr(vcpu
, vmcs12
->vm_exit_msr_load_addr
,
4135 vmcs12
->vm_exit_msr_load_count
))
4136 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4139 static inline u64
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx
*vmx
)
4141 struct shared_msr_entry
*efer_msr
;
4144 if (vm_entry_controls_get(vmx
) & VM_ENTRY_LOAD_IA32_EFER
)
4145 return vmcs_read64(GUEST_IA32_EFER
);
4147 if (cpu_has_load_ia32_efer())
4150 for (i
= 0; i
< vmx
->msr_autoload
.guest
.nr
; ++i
) {
4151 if (vmx
->msr_autoload
.guest
.val
[i
].index
== MSR_EFER
)
4152 return vmx
->msr_autoload
.guest
.val
[i
].value
;
4155 efer_msr
= find_msr_entry(vmx
, MSR_EFER
);
4157 return efer_msr
->data
;
4162 static void nested_vmx_restore_host_state(struct kvm_vcpu
*vcpu
)
4164 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4165 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4166 struct vmx_msr_entry g
, h
;
4170 vcpu
->arch
.pat
= vmcs_read64(GUEST_IA32_PAT
);
4172 if (vmcs12
->vm_entry_controls
& VM_ENTRY_LOAD_DEBUG_CONTROLS
) {
4174 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4175 * as vmcs01.GUEST_DR7 contains a userspace defined value
4176 * and vcpu->arch.dr7 is not squirreled away before the
4177 * nested VMENTER (not worth adding a variable in nested_vmx).
4179 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
)
4180 kvm_set_dr(vcpu
, 7, DR7_FIXED_1
);
4182 WARN_ON(kvm_set_dr(vcpu
, 7, vmcs_readl(GUEST_DR7
)));
4186 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4187 * handle a variety of side effects to KVM's software model.
4189 vmx_set_efer(vcpu
, nested_vmx_get_vmcs01_guest_efer(vmx
));
4191 vcpu
->arch
.cr0_guest_owned_bits
= X86_CR0_TS
;
4192 vmx_set_cr0(vcpu
, vmcs_readl(CR0_READ_SHADOW
));
4194 vcpu
->arch
.cr4_guest_owned_bits
= ~vmcs_readl(CR4_GUEST_HOST_MASK
);
4195 vmx_set_cr4(vcpu
, vmcs_readl(CR4_READ_SHADOW
));
4197 nested_ept_uninit_mmu_context(vcpu
);
4198 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
4199 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR3
);
4202 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4203 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4204 * VMFail, like everything else we just need to ensure our
4205 * software model is up-to-date.
4208 ept_save_pdptrs(vcpu
);
4210 kvm_mmu_reset_context(vcpu
);
4212 if (cpu_has_vmx_msr_bitmap())
4213 vmx_update_msr_bitmap(vcpu
);
4216 * This nasty bit of open coding is a compromise between blindly
4217 * loading L1's MSRs using the exit load lists (incorrect emulation
4218 * of VMFail), leaving the nested VM's MSRs in the software model
4219 * (incorrect behavior) and snapshotting the modified MSRs (too
4220 * expensive since the lists are unbound by hardware). For each
4221 * MSR that was (prematurely) loaded from the nested VMEntry load
4222 * list, reload it from the exit load list if it exists and differs
4223 * from the guest value. The intent is to stuff host state as
4224 * silently as possible, not to fully process the exit load list.
4226 for (i
= 0; i
< vmcs12
->vm_entry_msr_load_count
; i
++) {
4227 gpa
= vmcs12
->vm_entry_msr_load_addr
+ (i
* sizeof(g
));
4228 if (kvm_vcpu_read_guest(vcpu
, gpa
, &g
, sizeof(g
))) {
4229 pr_debug_ratelimited(
4230 "%s read MSR index failed (%u, 0x%08llx)\n",
4235 for (j
= 0; j
< vmcs12
->vm_exit_msr_load_count
; j
++) {
4236 gpa
= vmcs12
->vm_exit_msr_load_addr
+ (j
* sizeof(h
));
4237 if (kvm_vcpu_read_guest(vcpu
, gpa
, &h
, sizeof(h
))) {
4238 pr_debug_ratelimited(
4239 "%s read MSR failed (%u, 0x%08llx)\n",
4243 if (h
.index
!= g
.index
)
4245 if (h
.value
== g
.value
)
4248 if (nested_vmx_load_msr_check(vcpu
, &h
)) {
4249 pr_debug_ratelimited(
4250 "%s check failed (%u, 0x%x, 0x%x)\n",
4251 __func__
, j
, h
.index
, h
.reserved
);
4255 if (kvm_set_msr(vcpu
, h
.index
, h
.value
)) {
4256 pr_debug_ratelimited(
4257 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4258 __func__
, j
, h
.index
, h
.value
);
4267 nested_vmx_abort(vcpu
, VMX_ABORT_LOAD_HOST_MSR_FAIL
);
4271 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4272 * and modify vmcs12 to make it see what it would expect to see there if
4273 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4275 void nested_vmx_vmexit(struct kvm_vcpu
*vcpu
, u32 exit_reason
,
4276 u32 exit_intr_info
, unsigned long exit_qualification
)
4278 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4279 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
4281 /* trying to cancel vmlaunch/vmresume is a bug */
4282 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
4284 leave_guest_mode(vcpu
);
4286 if (nested_cpu_has_preemption_timer(vmcs12
))
4287 hrtimer_cancel(&to_vmx(vcpu
)->nested
.preemption_timer
);
4289 if (vmcs12
->cpu_based_vm_exec_control
& CPU_BASED_USE_TSC_OFFSETTING
)
4290 vcpu
->arch
.tsc_offset
-= vmcs12
->tsc_offset
;
4292 if (likely(!vmx
->fail
)) {
4293 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
4295 if (exit_reason
!= -1)
4296 prepare_vmcs12(vcpu
, vmcs12
, exit_reason
, exit_intr_info
,
4297 exit_qualification
);
4300 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4301 * also be used to capture vmcs12 cache as part of
4302 * capturing nVMX state for snapshot (migration).
4304 * Otherwise, this flush will dirty guest memory at a
4305 * point it is already assumed by user-space to be
4308 nested_flush_cached_shadow_vmcs12(vcpu
, vmcs12
);
4311 * The only expected VM-instruction error is "VM entry with
4312 * invalid control field(s)." Anything else indicates a
4313 * problem with L0. And we should never get here with a
4314 * VMFail of any type if early consistency checks are enabled.
4316 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR
) !=
4317 VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4318 WARN_ON_ONCE(nested_early_check
);
4321 vmx_switch_vmcs(vcpu
, &vmx
->vmcs01
);
4323 /* Update any VMCS fields that might have changed while L2 ran */
4324 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, vmx
->msr_autoload
.host
.nr
);
4325 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, vmx
->msr_autoload
.guest
.nr
);
4326 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
4327 if (vmx
->nested
.l1_tpr_threshold
!= -1)
4328 vmcs_write32(TPR_THRESHOLD
, vmx
->nested
.l1_tpr_threshold
);
4330 if (kvm_has_tsc_control
)
4331 decache_tsc_multiplier(vmx
);
4333 if (vmx
->nested
.change_vmcs01_virtual_apic_mode
) {
4334 vmx
->nested
.change_vmcs01_virtual_apic_mode
= false;
4335 vmx_set_virtual_apic_mode(vcpu
);
4338 /* Unpin physical memory we referred to in vmcs02 */
4339 if (vmx
->nested
.apic_access_page
) {
4340 kvm_release_page_clean(vmx
->nested
.apic_access_page
);
4341 vmx
->nested
.apic_access_page
= NULL
;
4343 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.virtual_apic_map
, true);
4344 kvm_vcpu_unmap(vcpu
, &vmx
->nested
.pi_desc_map
, true);
4345 vmx
->nested
.pi_desc
= NULL
;
4348 * We are now running in L2, mmu_notifier will force to reload the
4349 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
4351 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
4353 if ((exit_reason
!= -1) && (enable_shadow_vmcs
|| vmx
->nested
.hv_evmcs
))
4354 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
4356 /* in case we halted in L2 */
4357 vcpu
->arch
.mp_state
= KVM_MP_STATE_RUNNABLE
;
4359 if (likely(!vmx
->fail
)) {
4360 if (exit_reason
== EXIT_REASON_EXTERNAL_INTERRUPT
&&
4361 nested_exit_intr_ack_set(vcpu
)) {
4362 int irq
= kvm_cpu_get_interrupt(vcpu
);
4364 vmcs12
->vm_exit_intr_info
= irq
|
4365 INTR_INFO_VALID_MASK
| INTR_TYPE_EXT_INTR
;
4368 if (exit_reason
!= -1)
4369 trace_kvm_nested_vmexit_inject(vmcs12
->vm_exit_reason
,
4370 vmcs12
->exit_qualification
,
4371 vmcs12
->idt_vectoring_info_field
,
4372 vmcs12
->vm_exit_intr_info
,
4373 vmcs12
->vm_exit_intr_error_code
,
4376 load_vmcs12_host_state(vcpu
, vmcs12
);
4382 * After an early L2 VM-entry failure, we're now back
4383 * in L1 which thinks it just finished a VMLAUNCH or
4384 * VMRESUME instruction, so we need to set the failure
4385 * flag and the VM-instruction error field of the VMCS
4386 * accordingly, and skip the emulated instruction.
4388 (void)nested_vmx_failValid(vcpu
, VMXERR_ENTRY_INVALID_CONTROL_FIELD
);
4391 * Restore L1's host state to KVM's software model. We're here
4392 * because a consistency check was caught by hardware, which
4393 * means some amount of guest state has been propagated to KVM's
4394 * model and needs to be unwound to the host's state.
4396 nested_vmx_restore_host_state(vcpu
);
4402 * Decode the memory-address operand of a vmx instruction, as recorded on an
4403 * exit caused by such an instruction (run by a guest hypervisor).
4404 * On success, returns 0. When the operand is invalid, returns 1 and throws
4407 int get_vmx_mem_address(struct kvm_vcpu
*vcpu
, unsigned long exit_qualification
,
4408 u32 vmx_instruction_info
, bool wr
, int len
, gva_t
*ret
)
4412 struct kvm_segment s
;
4415 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4416 * Execution", on an exit, vmx_instruction_info holds most of the
4417 * addressing components of the operand. Only the displacement part
4418 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4419 * For how an actual address is calculated from all these components,
4420 * refer to Vol. 1, "Operand Addressing".
4422 int scaling
= vmx_instruction_info
& 3;
4423 int addr_size
= (vmx_instruction_info
>> 7) & 7;
4424 bool is_reg
= vmx_instruction_info
& (1u << 10);
4425 int seg_reg
= (vmx_instruction_info
>> 15) & 7;
4426 int index_reg
= (vmx_instruction_info
>> 18) & 0xf;
4427 bool index_is_valid
= !(vmx_instruction_info
& (1u << 22));
4428 int base_reg
= (vmx_instruction_info
>> 23) & 0xf;
4429 bool base_is_valid
= !(vmx_instruction_info
& (1u << 27));
4432 kvm_queue_exception(vcpu
, UD_VECTOR
);
4436 /* Addr = segment_base + offset */
4437 /* offset = base + [index * scale] + displacement */
4438 off
= exit_qualification
; /* holds the displacement */
4440 off
= (gva_t
)sign_extend64(off
, 31);
4441 else if (addr_size
== 0)
4442 off
= (gva_t
)sign_extend64(off
, 15);
4444 off
+= kvm_register_read(vcpu
, base_reg
);
4446 off
+= kvm_register_read(vcpu
, index_reg
) << scaling
;
4447 vmx_get_segment(vcpu
, &s
, seg_reg
);
4450 * The effective address, i.e. @off, of a memory operand is truncated
4451 * based on the address size of the instruction. Note that this is
4452 * the *effective address*, i.e. the address prior to accounting for
4453 * the segment's base.
4455 if (addr_size
== 1) /* 32 bit */
4457 else if (addr_size
== 0) /* 16 bit */
4460 /* Checks for #GP/#SS exceptions. */
4462 if (is_long_mode(vcpu
)) {
4464 * The virtual/linear address is never truncated in 64-bit
4465 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4466 * address when using FS/GS with a non-zero base.
4468 if (seg_reg
== VCPU_SREG_FS
|| seg_reg
== VCPU_SREG_GS
)
4469 *ret
= s
.base
+ off
;
4473 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4474 * non-canonical form. This is the only check on the memory
4475 * destination for long mode!
4477 exn
= is_noncanonical_address(*ret
, vcpu
);
4480 * When not in long mode, the virtual/linear address is
4481 * unconditionally truncated to 32 bits regardless of the
4484 *ret
= (s
.base
+ off
) & 0xffffffff;
4486 /* Protected mode: apply checks for segment validity in the
4488 * - segment type check (#GP(0) may be thrown)
4489 * - usability check (#GP(0)/#SS(0))
4490 * - limit check (#GP(0)/#SS(0))
4493 /* #GP(0) if the destination operand is located in a
4494 * read-only data segment or any code segment.
4496 exn
= ((s
.type
& 0xa) == 0 || (s
.type
& 8));
4498 /* #GP(0) if the source operand is located in an
4499 * execute-only code segment
4501 exn
= ((s
.type
& 0xa) == 8);
4503 kvm_queue_exception_e(vcpu
, GP_VECTOR
, 0);
4506 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4508 exn
= (s
.unusable
!= 0);
4511 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4512 * outside the segment limit. All CPUs that support VMX ignore
4513 * limit checks for flat segments, i.e. segments with base==0,
4514 * limit==0xffffffff and of type expand-up data or code.
4516 if (!(s
.base
== 0 && s
.limit
== 0xffffffff &&
4517 ((s
.type
& 8) || !(s
.type
& 4))))
4518 exn
= exn
|| ((u64
)off
+ len
- 1 > s
.limit
);
4521 kvm_queue_exception_e(vcpu
,
4522 seg_reg
== VCPU_SREG_SS
?
4523 SS_VECTOR
: GP_VECTOR
,
4531 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu
*vcpu
)
4533 struct vcpu_vmx
*vmx
;
4535 if (!nested_vmx_allowed(vcpu
))
4539 if (kvm_x86_ops
.pmu_ops
->is_valid_msr(vcpu
, MSR_CORE_PERF_GLOBAL_CTRL
)) {
4540 vmx
->nested
.msrs
.entry_ctls_high
|=
4541 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4542 vmx
->nested
.msrs
.exit_ctls_high
|=
4543 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4545 vmx
->nested
.msrs
.entry_ctls_high
&=
4546 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4547 vmx
->nested
.msrs
.exit_ctls_high
&=
4548 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4552 static int nested_vmx_get_vmptr(struct kvm_vcpu
*vcpu
, gpa_t
*vmpointer
)
4555 struct x86_exception e
;
4557 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
4558 vmcs_read32(VMX_INSTRUCTION_INFO
), false,
4559 sizeof(*vmpointer
), &gva
))
4562 if (kvm_read_guest_virt(vcpu
, gva
, vmpointer
, sizeof(*vmpointer
), &e
)) {
4563 kvm_inject_page_fault(vcpu
, &e
);
4571 * Allocate a shadow VMCS and associate it with the currently loaded
4572 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4573 * VMCS is also VMCLEARed, so that it is ready for use.
4575 static struct vmcs
*alloc_shadow_vmcs(struct kvm_vcpu
*vcpu
)
4577 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4578 struct loaded_vmcs
*loaded_vmcs
= vmx
->loaded_vmcs
;
4581 * We should allocate a shadow vmcs for vmcs01 only when L1
4582 * executes VMXON and free it when L1 executes VMXOFF.
4583 * As it is invalid to execute VMXON twice, we shouldn't reach
4584 * here when vmcs01 already have an allocated shadow vmcs.
4586 WARN_ON(loaded_vmcs
== &vmx
->vmcs01
&& loaded_vmcs
->shadow_vmcs
);
4588 if (!loaded_vmcs
->shadow_vmcs
) {
4589 loaded_vmcs
->shadow_vmcs
= alloc_vmcs(true);
4590 if (loaded_vmcs
->shadow_vmcs
)
4591 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
4593 return loaded_vmcs
->shadow_vmcs
;
4596 static int enter_vmx_operation(struct kvm_vcpu
*vcpu
)
4598 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4601 r
= alloc_loaded_vmcs(&vmx
->nested
.vmcs02
);
4605 vmx
->nested
.cached_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4606 if (!vmx
->nested
.cached_vmcs12
)
4607 goto out_cached_vmcs12
;
4609 vmx
->nested
.cached_shadow_vmcs12
= kzalloc(VMCS12_SIZE
, GFP_KERNEL_ACCOUNT
);
4610 if (!vmx
->nested
.cached_shadow_vmcs12
)
4611 goto out_cached_shadow_vmcs12
;
4613 if (enable_shadow_vmcs
&& !alloc_shadow_vmcs(vcpu
))
4614 goto out_shadow_vmcs
;
4616 hrtimer_init(&vmx
->nested
.preemption_timer
, CLOCK_MONOTONIC
,
4617 HRTIMER_MODE_REL_PINNED
);
4618 vmx
->nested
.preemption_timer
.function
= vmx_preemption_timer_fn
;
4620 vmx
->nested
.vpid02
= allocate_vpid();
4622 vmx
->nested
.vmcs02_initialized
= false;
4623 vmx
->nested
.vmxon
= true;
4625 if (vmx_pt_mode_is_host_guest()) {
4626 vmx
->pt_desc
.guest
.ctl
= 0;
4627 pt_update_intercept_for_msr(vmx
);
4633 kfree(vmx
->nested
.cached_shadow_vmcs12
);
4635 out_cached_shadow_vmcs12
:
4636 kfree(vmx
->nested
.cached_vmcs12
);
4639 free_loaded_vmcs(&vmx
->nested
.vmcs02
);
4646 * Emulate the VMXON instruction.
4647 * Currently, we just remember that VMX is active, and do not save or even
4648 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4649 * do not currently need to store anything in that guest-allocated memory
4650 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4651 * argument is different from the VMXON pointer (which the spec says they do).
4653 static int handle_vmon(struct kvm_vcpu
*vcpu
)
4658 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4659 const u64 VMXON_NEEDED_FEATURES
= FEAT_CTL_LOCKED
4660 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
4663 * The Intel VMX Instruction Reference lists a bunch of bits that are
4664 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4665 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4666 * Otherwise, we should fail with #UD. But most faulting conditions
4667 * have already been checked by hardware, prior to the VM-exit for
4668 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4669 * that bit set to 1 in non-root mode.
4671 if (!kvm_read_cr4_bits(vcpu
, X86_CR4_VMXE
)) {
4672 kvm_queue_exception(vcpu
, UD_VECTOR
);
4676 /* CPL=0 must be checked manually. */
4677 if (vmx_get_cpl(vcpu
)) {
4678 kvm_inject_gp(vcpu
, 0);
4682 if (vmx
->nested
.vmxon
)
4683 return nested_vmx_failValid(vcpu
,
4684 VMXERR_VMXON_IN_VMX_ROOT_OPERATION
);
4686 if ((vmx
->msr_ia32_feature_control
& VMXON_NEEDED_FEATURES
)
4687 != VMXON_NEEDED_FEATURES
) {
4688 kvm_inject_gp(vcpu
, 0);
4692 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
4697 * The first 4 bytes of VMXON region contain the supported
4698 * VMCS revision identifier
4700 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4701 * which replaces physical address width with 32
4703 if (!page_address_valid(vcpu
, vmptr
))
4704 return nested_vmx_failInvalid(vcpu
);
4706 if (kvm_read_guest(vcpu
->kvm
, vmptr
, &revision
, sizeof(revision
)) ||
4707 revision
!= VMCS12_REVISION
)
4708 return nested_vmx_failInvalid(vcpu
);
4710 vmx
->nested
.vmxon_ptr
= vmptr
;
4711 ret
= enter_vmx_operation(vcpu
);
4715 return nested_vmx_succeed(vcpu
);
4718 static inline void nested_release_vmcs12(struct kvm_vcpu
*vcpu
)
4720 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4722 if (vmx
->nested
.current_vmptr
== -1ull)
4725 copy_vmcs02_to_vmcs12_rare(vcpu
, get_vmcs12(vcpu
));
4727 if (enable_shadow_vmcs
) {
4728 /* copy to memory all shadowed fields in case
4729 they were modified */
4730 copy_shadow_to_vmcs12(vmx
);
4731 vmx_disable_shadow_vmcs(vmx
);
4733 vmx
->nested
.posted_intr_nv
= -1;
4735 /* Flush VMCS12 to guest memory */
4736 kvm_vcpu_write_guest_page(vcpu
,
4737 vmx
->nested
.current_vmptr
>> PAGE_SHIFT
,
4738 vmx
->nested
.cached_vmcs12
, 0, VMCS12_SIZE
);
4740 kvm_mmu_free_roots(vcpu
, &vcpu
->arch
.guest_mmu
, KVM_MMU_ROOTS_ALL
);
4742 vmx
->nested
.current_vmptr
= -1ull;
4745 /* Emulate the VMXOFF instruction */
4746 static int handle_vmoff(struct kvm_vcpu
*vcpu
)
4748 if (!nested_vmx_check_permission(vcpu
))
4753 /* Process a latched INIT during time CPU was in VMX operation */
4754 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4756 return nested_vmx_succeed(vcpu
);
4759 /* Emulate the VMCLEAR instruction */
4760 static int handle_vmclear(struct kvm_vcpu
*vcpu
)
4762 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4767 if (!nested_vmx_check_permission(vcpu
))
4770 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
4773 if (!page_address_valid(vcpu
, vmptr
))
4774 return nested_vmx_failValid(vcpu
,
4775 VMXERR_VMCLEAR_INVALID_ADDRESS
);
4777 if (vmptr
== vmx
->nested
.vmxon_ptr
)
4778 return nested_vmx_failValid(vcpu
,
4779 VMXERR_VMCLEAR_VMXON_POINTER
);
4782 * When Enlightened VMEntry is enabled on the calling CPU we treat
4783 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
4784 * way to distinguish it from VMCS12) and we must not corrupt it by
4785 * writing to the non-existent 'launch_state' field. The area doesn't
4786 * have to be the currently active EVMCS on the calling CPU and there's
4787 * nothing KVM has to do to transition it from 'active' to 'non-active'
4788 * state. It is possible that the area will stay mapped as
4789 * vmx->nested.hv_evmcs but this shouldn't be a problem.
4791 if (likely(!vmx
->nested
.enlightened_vmcs_enabled
||
4792 !nested_enlightened_vmentry(vcpu
, &evmcs_gpa
))) {
4793 if (vmptr
== vmx
->nested
.current_vmptr
)
4794 nested_release_vmcs12(vcpu
);
4796 kvm_vcpu_write_guest(vcpu
,
4797 vmptr
+ offsetof(struct vmcs12
,
4799 &zero
, sizeof(zero
));
4802 return nested_vmx_succeed(vcpu
);
4805 /* Emulate the VMLAUNCH instruction */
4806 static int handle_vmlaunch(struct kvm_vcpu
*vcpu
)
4808 return nested_vmx_run(vcpu
, true);
4811 /* Emulate the VMRESUME instruction */
4812 static int handle_vmresume(struct kvm_vcpu
*vcpu
)
4815 return nested_vmx_run(vcpu
, false);
4818 static int handle_vmread(struct kvm_vcpu
*vcpu
)
4820 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
4822 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4823 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4824 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4825 struct x86_exception e
;
4826 unsigned long field
;
4832 if (!nested_vmx_check_permission(vcpu
))
4836 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
4837 * any VMREAD sets the ALU flags for VMfailInvalid.
4839 if (vmx
->nested
.current_vmptr
== -1ull ||
4840 (is_guest_mode(vcpu
) &&
4841 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
4842 return nested_vmx_failInvalid(vcpu
);
4844 /* Decode instruction info and find the field to read */
4845 field
= kvm_register_readl(vcpu
, (((instr_info
) >> 28) & 0xf));
4847 offset
= vmcs_field_to_offset(field
);
4849 return nested_vmx_failValid(vcpu
,
4850 VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
4852 if (!is_guest_mode(vcpu
) && is_vmcs12_ext_field(field
))
4853 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4855 /* Read the field, zero-extended to a u64 value */
4856 value
= vmcs12_read_any(vmcs12
, field
, offset
);
4859 * Now copy part of this value to register or memory, as requested.
4860 * Note that the number of bits actually copied is 32 or 64 depending
4861 * on the guest's mode (32 or 64 bit), not on the given field's length.
4863 if (instr_info
& BIT(10)) {
4864 kvm_register_writel(vcpu
, (((instr_info
) >> 3) & 0xf), value
);
4866 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
4867 if (get_vmx_mem_address(vcpu
, exit_qualification
,
4868 instr_info
, true, len
, &gva
))
4870 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4871 if (kvm_write_guest_virt_system(vcpu
, gva
, &value
, len
, &e
)) {
4872 kvm_inject_page_fault(vcpu
, &e
);
4877 return nested_vmx_succeed(vcpu
);
4880 static bool is_shadow_field_rw(unsigned long field
)
4883 #define SHADOW_FIELD_RW(x, y) case x:
4884 #include "vmcs_shadow_fields.h"
4892 static bool is_shadow_field_ro(unsigned long field
)
4895 #define SHADOW_FIELD_RO(x, y) case x:
4896 #include "vmcs_shadow_fields.h"
4904 static int handle_vmwrite(struct kvm_vcpu
*vcpu
)
4906 struct vmcs12
*vmcs12
= is_guest_mode(vcpu
) ? get_shadow_vmcs12(vcpu
)
4908 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
4909 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
4910 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4911 struct x86_exception e
;
4912 unsigned long field
;
4918 * The value to write might be 32 or 64 bits, depending on L1's long
4919 * mode, and eventually we need to write that into a field of several
4920 * possible lengths. The code below first zero-extends the value to 64
4921 * bit (value), and then copies only the appropriate number of
4922 * bits into the vmcs12 field.
4926 if (!nested_vmx_check_permission(vcpu
))
4930 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
4931 * any VMWRITE sets the ALU flags for VMfailInvalid.
4933 if (vmx
->nested
.current_vmptr
== -1ull ||
4934 (is_guest_mode(vcpu
) &&
4935 get_vmcs12(vcpu
)->vmcs_link_pointer
== -1ull))
4936 return nested_vmx_failInvalid(vcpu
);
4938 if (instr_info
& BIT(10))
4939 value
= kvm_register_readl(vcpu
, (((instr_info
) >> 3) & 0xf));
4941 len
= is_64_bit_mode(vcpu
) ? 8 : 4;
4942 if (get_vmx_mem_address(vcpu
, exit_qualification
,
4943 instr_info
, false, len
, &gva
))
4945 if (kvm_read_guest_virt(vcpu
, gva
, &value
, len
, &e
)) {
4946 kvm_inject_page_fault(vcpu
, &e
);
4951 field
= kvm_register_readl(vcpu
, (((instr_info
) >> 28) & 0xf));
4953 offset
= vmcs_field_to_offset(field
);
4955 return nested_vmx_failValid(vcpu
,
4956 VMXERR_UNSUPPORTED_VMCS_COMPONENT
);
4959 * If the vCPU supports "VMWRITE to any supported field in the
4960 * VMCS," then the "read-only" fields are actually read/write.
4962 if (vmcs_field_readonly(field
) &&
4963 !nested_cpu_has_vmwrite_any_field(vcpu
))
4964 return nested_vmx_failValid(vcpu
,
4965 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT
);
4968 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
4969 * vmcs12, else we may crush a field or consume a stale value.
4971 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
))
4972 copy_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
4975 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
4976 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
4977 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
4978 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
4979 * from L1 will return a different value than VMREAD from L2 (L1 sees
4980 * the stripped down value, L2 sees the full value as stored by KVM).
4982 if (field
>= GUEST_ES_AR_BYTES
&& field
<= GUEST_TR_AR_BYTES
)
4985 vmcs12_write_any(vmcs12
, field
, offset
, value
);
4988 * Do not track vmcs12 dirty-state if in guest-mode as we actually
4989 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
4990 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
4991 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
4993 if (!is_guest_mode(vcpu
) && !is_shadow_field_rw(field
)) {
4995 * L1 can read these fields without exiting, ensure the
4996 * shadow VMCS is up-to-date.
4998 if (enable_shadow_vmcs
&& is_shadow_field_ro(field
)) {
5000 vmcs_load(vmx
->vmcs01
.shadow_vmcs
);
5002 __vmcs_writel(field
, value
);
5004 vmcs_clear(vmx
->vmcs01
.shadow_vmcs
);
5005 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
5008 vmx
->nested
.dirty_vmcs12
= true;
5011 return nested_vmx_succeed(vcpu
);
5014 static void set_current_vmptr(struct vcpu_vmx
*vmx
, gpa_t vmptr
)
5016 vmx
->nested
.current_vmptr
= vmptr
;
5017 if (enable_shadow_vmcs
) {
5018 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_SHADOW_VMCS
);
5019 vmcs_write64(VMCS_LINK_POINTER
,
5020 __pa(vmx
->vmcs01
.shadow_vmcs
));
5021 vmx
->nested
.need_vmcs12_to_shadow_sync
= true;
5023 vmx
->nested
.dirty_vmcs12
= true;
5026 /* Emulate the VMPTRLD instruction */
5027 static int handle_vmptrld(struct kvm_vcpu
*vcpu
)
5029 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5032 if (!nested_vmx_check_permission(vcpu
))
5035 if (nested_vmx_get_vmptr(vcpu
, &vmptr
))
5038 if (!page_address_valid(vcpu
, vmptr
))
5039 return nested_vmx_failValid(vcpu
,
5040 VMXERR_VMPTRLD_INVALID_ADDRESS
);
5042 if (vmptr
== vmx
->nested
.vmxon_ptr
)
5043 return nested_vmx_failValid(vcpu
,
5044 VMXERR_VMPTRLD_VMXON_POINTER
);
5046 /* Forbid normal VMPTRLD if Enlightened version was used */
5047 if (vmx
->nested
.hv_evmcs
)
5050 if (vmx
->nested
.current_vmptr
!= vmptr
) {
5051 struct kvm_host_map map
;
5052 struct vmcs12
*new_vmcs12
;
5054 if (kvm_vcpu_map(vcpu
, gpa_to_gfn(vmptr
), &map
)) {
5056 * Reads from an unbacked page return all 1s,
5057 * which means that the 32 bits located at the
5058 * given physical address won't match the required
5059 * VMCS12_REVISION identifier.
5061 return nested_vmx_failValid(vcpu
,
5062 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5065 new_vmcs12
= map
.hva
;
5067 if (new_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
5068 (new_vmcs12
->hdr
.shadow_vmcs
&&
5069 !nested_cpu_has_vmx_shadow_vmcs(vcpu
))) {
5070 kvm_vcpu_unmap(vcpu
, &map
, false);
5071 return nested_vmx_failValid(vcpu
,
5072 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID
);
5075 nested_release_vmcs12(vcpu
);
5078 * Load VMCS12 from guest memory since it is not already
5081 memcpy(vmx
->nested
.cached_vmcs12
, new_vmcs12
, VMCS12_SIZE
);
5082 kvm_vcpu_unmap(vcpu
, &map
, false);
5084 set_current_vmptr(vmx
, vmptr
);
5087 return nested_vmx_succeed(vcpu
);
5090 /* Emulate the VMPTRST instruction */
5091 static int handle_vmptrst(struct kvm_vcpu
*vcpu
)
5093 unsigned long exit_qual
= vmcs_readl(EXIT_QUALIFICATION
);
5094 u32 instr_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5095 gpa_t current_vmptr
= to_vmx(vcpu
)->nested
.current_vmptr
;
5096 struct x86_exception e
;
5099 if (!nested_vmx_check_permission(vcpu
))
5102 if (unlikely(to_vmx(vcpu
)->nested
.hv_evmcs
))
5105 if (get_vmx_mem_address(vcpu
, exit_qual
, instr_info
,
5106 true, sizeof(gpa_t
), &gva
))
5108 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5109 if (kvm_write_guest_virt_system(vcpu
, gva
, (void *)¤t_vmptr
,
5110 sizeof(gpa_t
), &e
)) {
5111 kvm_inject_page_fault(vcpu
, &e
);
5114 return nested_vmx_succeed(vcpu
);
5117 /* Emulate the INVEPT instruction */
5118 static int handle_invept(struct kvm_vcpu
*vcpu
)
5120 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5121 u32 vmx_instruction_info
, types
;
5124 struct x86_exception e
;
5129 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5130 SECONDARY_EXEC_ENABLE_EPT
) ||
5131 !(vmx
->nested
.msrs
.ept_caps
& VMX_EPT_INVEPT_BIT
)) {
5132 kvm_queue_exception(vcpu
, UD_VECTOR
);
5136 if (!nested_vmx_check_permission(vcpu
))
5139 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5140 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5142 types
= (vmx
->nested
.msrs
.ept_caps
>> VMX_EPT_EXTENT_SHIFT
) & 6;
5144 if (type
>= 32 || !(types
& (1 << type
)))
5145 return nested_vmx_failValid(vcpu
,
5146 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5148 /* According to the Intel VMX instruction reference, the memory
5149 * operand is read even if it isn't needed (e.g., for type==global)
5151 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
5152 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5154 if (kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
)) {
5155 kvm_inject_page_fault(vcpu
, &e
);
5160 case VMX_EPT_EXTENT_GLOBAL
:
5161 case VMX_EPT_EXTENT_CONTEXT
:
5163 * TODO: Sync the necessary shadow EPT roots here, rather than
5164 * at the next emulated VM-entry.
5172 return nested_vmx_succeed(vcpu
);
5175 static int handle_invvpid(struct kvm_vcpu
*vcpu
)
5177 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5178 u32 vmx_instruction_info
;
5179 unsigned long type
, types
;
5181 struct x86_exception e
;
5188 if (!(vmx
->nested
.msrs
.secondary_ctls_high
&
5189 SECONDARY_EXEC_ENABLE_VPID
) ||
5190 !(vmx
->nested
.msrs
.vpid_caps
& VMX_VPID_INVVPID_BIT
)) {
5191 kvm_queue_exception(vcpu
, UD_VECTOR
);
5195 if (!nested_vmx_check_permission(vcpu
))
5198 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5199 type
= kvm_register_readl(vcpu
, (vmx_instruction_info
>> 28) & 0xf);
5201 types
= (vmx
->nested
.msrs
.vpid_caps
&
5202 VMX_VPID_EXTENT_SUPPORTED_MASK
) >> 8;
5204 if (type
>= 32 || !(types
& (1 << type
)))
5205 return nested_vmx_failValid(vcpu
,
5206 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5208 /* according to the intel vmx instruction reference, the memory
5209 * operand is read even if it isn't needed (e.g., for type==global)
5211 if (get_vmx_mem_address(vcpu
, vmcs_readl(EXIT_QUALIFICATION
),
5212 vmx_instruction_info
, false, sizeof(operand
), &gva
))
5214 if (kvm_read_guest_virt(vcpu
, gva
, &operand
, sizeof(operand
), &e
)) {
5215 kvm_inject_page_fault(vcpu
, &e
);
5218 if (operand
.vpid
>> 16)
5219 return nested_vmx_failValid(vcpu
,
5220 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5222 vpid02
= nested_get_vpid02(vcpu
);
5224 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR
:
5225 if (!operand
.vpid
||
5226 is_noncanonical_address(operand
.gla
, vcpu
))
5227 return nested_vmx_failValid(vcpu
,
5228 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5229 if (cpu_has_vmx_invvpid_individual_addr()) {
5230 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR
,
5231 vpid02
, operand
.gla
);
5233 __vmx_flush_tlb(vcpu
, vpid02
, false);
5235 case VMX_VPID_EXTENT_SINGLE_CONTEXT
:
5236 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL
:
5238 return nested_vmx_failValid(vcpu
,
5239 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID
);
5240 __vmx_flush_tlb(vcpu
, vpid02
, false);
5242 case VMX_VPID_EXTENT_ALL_CONTEXT
:
5243 __vmx_flush_tlb(vcpu
, vpid02
, false);
5247 return kvm_skip_emulated_instruction(vcpu
);
5250 return nested_vmx_succeed(vcpu
);
5253 static int nested_vmx_eptp_switching(struct kvm_vcpu
*vcpu
,
5254 struct vmcs12
*vmcs12
)
5256 u32 index
= kvm_rcx_read(vcpu
);
5258 bool accessed_dirty
;
5259 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
5261 if (!nested_cpu_has_eptp_switching(vmcs12
) ||
5262 !nested_cpu_has_ept(vmcs12
))
5265 if (index
>= VMFUNC_EPTP_ENTRIES
)
5269 if (kvm_vcpu_read_guest_page(vcpu
, vmcs12
->eptp_list_address
>> PAGE_SHIFT
,
5270 &new_eptp
, index
* 8, 8))
5273 accessed_dirty
= !!(new_eptp
& VMX_EPTP_AD_ENABLE_BIT
);
5276 * If the (L2) guest does a vmfunc to the currently
5277 * active ept pointer, we don't have to do anything else
5279 if (vmcs12
->ept_pointer
!= new_eptp
) {
5280 if (!nested_vmx_check_eptp(vcpu
, new_eptp
))
5283 kvm_mmu_unload(vcpu
);
5284 mmu
->ept_ad
= accessed_dirty
;
5285 mmu
->mmu_role
.base
.ad_disabled
= !accessed_dirty
;
5286 vmcs12
->ept_pointer
= new_eptp
;
5288 * TODO: Check what's the correct approach in case
5289 * mmu reload fails. Currently, we just let the next
5290 * reload potentially fail
5292 kvm_mmu_reload(vcpu
);
5298 static int handle_vmfunc(struct kvm_vcpu
*vcpu
)
5300 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5301 struct vmcs12
*vmcs12
;
5302 u32 function
= kvm_rax_read(vcpu
);
5305 * VMFUNC is only supported for nested guests, but we always enable the
5306 * secondary control for simplicity; for non-nested mode, fake that we
5307 * didn't by injecting #UD.
5309 if (!is_guest_mode(vcpu
)) {
5310 kvm_queue_exception(vcpu
, UD_VECTOR
);
5314 vmcs12
= get_vmcs12(vcpu
);
5315 if ((vmcs12
->vm_function_control
& (1 << function
)) == 0)
5320 if (nested_vmx_eptp_switching(vcpu
, vmcs12
))
5326 return kvm_skip_emulated_instruction(vcpu
);
5329 nested_vmx_vmexit(vcpu
, vmx
->exit_reason
,
5330 vmcs_read32(VM_EXIT_INTR_INFO
),
5331 vmcs_readl(EXIT_QUALIFICATION
));
5336 * Return true if an IO instruction with the specified port and size should cause
5337 * a VM-exit into L1.
5339 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu
*vcpu
, unsigned int port
,
5342 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5343 gpa_t bitmap
, last_bitmap
;
5346 last_bitmap
= (gpa_t
)-1;
5351 bitmap
= vmcs12
->io_bitmap_a
;
5352 else if (port
< 0x10000)
5353 bitmap
= vmcs12
->io_bitmap_b
;
5356 bitmap
+= (port
& 0x7fff) / 8;
5358 if (last_bitmap
!= bitmap
)
5359 if (kvm_vcpu_read_guest(vcpu
, bitmap
, &b
, 1))
5361 if (b
& (1 << (port
& 7)))
5366 last_bitmap
= bitmap
;
5372 static bool nested_vmx_exit_handled_io(struct kvm_vcpu
*vcpu
,
5373 struct vmcs12
*vmcs12
)
5375 unsigned long exit_qualification
;
5376 unsigned short port
;
5379 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
5380 return nested_cpu_has(vmcs12
, CPU_BASED_UNCOND_IO_EXITING
);
5382 exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
5384 port
= exit_qualification
>> 16;
5385 size
= (exit_qualification
& 7) + 1;
5387 return nested_vmx_check_io_bitmaps(vcpu
, port
, size
);
5391 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5392 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5393 * disinterest in the current event (read or write a specific MSR) by using an
5394 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5396 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu
*vcpu
,
5397 struct vmcs12
*vmcs12
, u32 exit_reason
)
5399 u32 msr_index
= kvm_rcx_read(vcpu
);
5402 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_MSR_BITMAPS
))
5406 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5407 * for the four combinations of read/write and low/high MSR numbers.
5408 * First we need to figure out which of the four to use:
5410 bitmap
= vmcs12
->msr_bitmap
;
5411 if (exit_reason
== EXIT_REASON_MSR_WRITE
)
5413 if (msr_index
>= 0xc0000000) {
5414 msr_index
-= 0xc0000000;
5418 /* Then read the msr_index'th bit from this bitmap: */
5419 if (msr_index
< 1024*8) {
5421 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ msr_index
/8, &b
, 1))
5423 return 1 & (b
>> (msr_index
& 7));
5425 return true; /* let L1 handle the wrong parameter */
5429 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5430 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5431 * intercept (via guest_host_mask etc.) the current event.
5433 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu
*vcpu
,
5434 struct vmcs12
*vmcs12
)
5436 unsigned long exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
5437 int cr
= exit_qualification
& 15;
5441 switch ((exit_qualification
>> 4) & 3) {
5442 case 0: /* mov to cr */
5443 reg
= (exit_qualification
>> 8) & 15;
5444 val
= kvm_register_readl(vcpu
, reg
);
5447 if (vmcs12
->cr0_guest_host_mask
&
5448 (val
^ vmcs12
->cr0_read_shadow
))
5452 if ((vmcs12
->cr3_target_count
>= 1 &&
5453 vmcs12
->cr3_target_value0
== val
) ||
5454 (vmcs12
->cr3_target_count
>= 2 &&
5455 vmcs12
->cr3_target_value1
== val
) ||
5456 (vmcs12
->cr3_target_count
>= 3 &&
5457 vmcs12
->cr3_target_value2
== val
) ||
5458 (vmcs12
->cr3_target_count
>= 4 &&
5459 vmcs12
->cr3_target_value3
== val
))
5461 if (nested_cpu_has(vmcs12
, CPU_BASED_CR3_LOAD_EXITING
))
5465 if (vmcs12
->cr4_guest_host_mask
&
5466 (vmcs12
->cr4_read_shadow
^ val
))
5470 if (nested_cpu_has(vmcs12
, CPU_BASED_CR8_LOAD_EXITING
))
5476 if ((vmcs12
->cr0_guest_host_mask
& X86_CR0_TS
) &&
5477 (vmcs12
->cr0_read_shadow
& X86_CR0_TS
))
5480 case 1: /* mov from cr */
5483 if (vmcs12
->cpu_based_vm_exec_control
&
5484 CPU_BASED_CR3_STORE_EXITING
)
5488 if (vmcs12
->cpu_based_vm_exec_control
&
5489 CPU_BASED_CR8_STORE_EXITING
)
5496 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5497 * cr0. Other attempted changes are ignored, with no exit.
5499 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5500 if (vmcs12
->cr0_guest_host_mask
& 0xe &
5501 (val
^ vmcs12
->cr0_read_shadow
))
5503 if ((vmcs12
->cr0_guest_host_mask
& 0x1) &&
5504 !(vmcs12
->cr0_read_shadow
& 0x1) &&
5512 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu
*vcpu
,
5513 struct vmcs12
*vmcs12
, gpa_t bitmap
)
5515 u32 vmx_instruction_info
;
5516 unsigned long field
;
5519 if (!nested_cpu_has_shadow_vmcs(vmcs12
))
5522 /* Decode instruction info and find the field to access */
5523 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5524 field
= kvm_register_read(vcpu
, (((vmx_instruction_info
) >> 28) & 0xf));
5526 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5530 if (kvm_vcpu_read_guest(vcpu
, bitmap
+ field
/8, &b
, 1))
5533 return 1 & (b
>> (field
& 7));
5536 static bool nested_vmx_exit_handled_mtf(struct vmcs12
*vmcs12
)
5538 u32 entry_intr_info
= vmcs12
->vm_entry_intr_info_field
;
5540 if (nested_cpu_has_mtf(vmcs12
))
5544 * An MTF VM-exit may be injected into the guest by setting the
5545 * interruption-type to 7 (other event) and the vector field to 0. Such
5546 * is the case regardless of the 'monitor trap flag' VM-execution
5549 return entry_intr_info
== (INTR_INFO_VALID_MASK
5550 | INTR_TYPE_OTHER_EVENT
);
5554 * Return true if we should exit from L2 to L1 to handle an exit, or false if we
5555 * should handle it ourselves in L0 (and then continue L2). Only call this
5556 * when in is_guest_mode (L2).
5558 bool nested_vmx_exit_reflected(struct kvm_vcpu
*vcpu
, u32 exit_reason
)
5560 u32 intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
5561 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5562 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5564 WARN_ON_ONCE(vmx
->nested
.nested_run_pending
);
5566 if (unlikely(vmx
->fail
)) {
5567 trace_kvm_nested_vmenter_failed(
5568 "hardware VM-instruction error: ",
5569 vmcs_read32(VM_INSTRUCTION_ERROR
));
5573 trace_kvm_nested_vmexit(kvm_rip_read(vcpu
), exit_reason
,
5574 vmcs_readl(EXIT_QUALIFICATION
),
5575 vmx
->idt_vectoring_info
,
5577 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
),
5580 switch (exit_reason
) {
5581 case EXIT_REASON_EXCEPTION_NMI
:
5582 if (is_nmi(intr_info
))
5584 else if (is_page_fault(intr_info
))
5585 return !vmx
->vcpu
.arch
.apf
.host_apf_reason
&& enable_ept
;
5586 else if (is_debug(intr_info
) &&
5588 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))
5590 else if (is_breakpoint(intr_info
) &&
5591 vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
5593 return vmcs12
->exception_bitmap
&
5594 (1u << (intr_info
& INTR_INFO_VECTOR_MASK
));
5595 case EXIT_REASON_EXTERNAL_INTERRUPT
:
5597 case EXIT_REASON_TRIPLE_FAULT
:
5599 case EXIT_REASON_INTERRUPT_WINDOW
:
5600 return nested_cpu_has(vmcs12
, CPU_BASED_INTR_WINDOW_EXITING
);
5601 case EXIT_REASON_NMI_WINDOW
:
5602 return nested_cpu_has(vmcs12
, CPU_BASED_NMI_WINDOW_EXITING
);
5603 case EXIT_REASON_TASK_SWITCH
:
5605 case EXIT_REASON_CPUID
:
5607 case EXIT_REASON_HLT
:
5608 return nested_cpu_has(vmcs12
, CPU_BASED_HLT_EXITING
);
5609 case EXIT_REASON_INVD
:
5611 case EXIT_REASON_INVLPG
:
5612 return nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5613 case EXIT_REASON_RDPMC
:
5614 return nested_cpu_has(vmcs12
, CPU_BASED_RDPMC_EXITING
);
5615 case EXIT_REASON_RDRAND
:
5616 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDRAND_EXITING
);
5617 case EXIT_REASON_RDSEED
:
5618 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_RDSEED_EXITING
);
5619 case EXIT_REASON_RDTSC
: case EXIT_REASON_RDTSCP
:
5620 return nested_cpu_has(vmcs12
, CPU_BASED_RDTSC_EXITING
);
5621 case EXIT_REASON_VMREAD
:
5622 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5623 vmcs12
->vmread_bitmap
);
5624 case EXIT_REASON_VMWRITE
:
5625 return nested_vmx_exit_handled_vmcs_access(vcpu
, vmcs12
,
5626 vmcs12
->vmwrite_bitmap
);
5627 case EXIT_REASON_VMCALL
: case EXIT_REASON_VMCLEAR
:
5628 case EXIT_REASON_VMLAUNCH
: case EXIT_REASON_VMPTRLD
:
5629 case EXIT_REASON_VMPTRST
: case EXIT_REASON_VMRESUME
:
5630 case EXIT_REASON_VMOFF
: case EXIT_REASON_VMON
:
5631 case EXIT_REASON_INVEPT
: case EXIT_REASON_INVVPID
:
5633 * VMX instructions trap unconditionally. This allows L1 to
5634 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5637 case EXIT_REASON_CR_ACCESS
:
5638 return nested_vmx_exit_handled_cr(vcpu
, vmcs12
);
5639 case EXIT_REASON_DR_ACCESS
:
5640 return nested_cpu_has(vmcs12
, CPU_BASED_MOV_DR_EXITING
);
5641 case EXIT_REASON_IO_INSTRUCTION
:
5642 return nested_vmx_exit_handled_io(vcpu
, vmcs12
);
5643 case EXIT_REASON_GDTR_IDTR
: case EXIT_REASON_LDTR_TR
:
5644 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
);
5645 case EXIT_REASON_MSR_READ
:
5646 case EXIT_REASON_MSR_WRITE
:
5647 return nested_vmx_exit_handled_msr(vcpu
, vmcs12
, exit_reason
);
5648 case EXIT_REASON_INVALID_STATE
:
5650 case EXIT_REASON_MWAIT_INSTRUCTION
:
5651 return nested_cpu_has(vmcs12
, CPU_BASED_MWAIT_EXITING
);
5652 case EXIT_REASON_MONITOR_TRAP_FLAG
:
5653 return nested_vmx_exit_handled_mtf(vmcs12
);
5654 case EXIT_REASON_MONITOR_INSTRUCTION
:
5655 return nested_cpu_has(vmcs12
, CPU_BASED_MONITOR_EXITING
);
5656 case EXIT_REASON_PAUSE_INSTRUCTION
:
5657 return nested_cpu_has(vmcs12
, CPU_BASED_PAUSE_EXITING
) ||
5658 nested_cpu_has2(vmcs12
,
5659 SECONDARY_EXEC_PAUSE_LOOP_EXITING
);
5660 case EXIT_REASON_MCE_DURING_VMENTRY
:
5662 case EXIT_REASON_TPR_BELOW_THRESHOLD
:
5663 return nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
);
5664 case EXIT_REASON_APIC_ACCESS
:
5665 case EXIT_REASON_APIC_WRITE
:
5666 case EXIT_REASON_EOI_INDUCED
:
5668 * The controls for "virtualize APIC accesses," "APIC-
5669 * register virtualization," and "virtual-interrupt
5670 * delivery" only come from vmcs12.
5673 case EXIT_REASON_EPT_VIOLATION
:
5675 * L0 always deals with the EPT violation. If nested EPT is
5676 * used, and the nested mmu code discovers that the address is
5677 * missing in the guest EPT table (EPT12), the EPT violation
5678 * will be injected with nested_ept_inject_page_fault()
5681 case EXIT_REASON_EPT_MISCONFIG
:
5683 * L2 never uses directly L1's EPT, but rather L0's own EPT
5684 * table (shadow on EPT) or a merged EPT table that L0 built
5685 * (EPT on EPT). So any problems with the structure of the
5686 * table is L0's fault.
5689 case EXIT_REASON_INVPCID
:
5691 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_INVPCID
) &&
5692 nested_cpu_has(vmcs12
, CPU_BASED_INVLPG_EXITING
);
5693 case EXIT_REASON_WBINVD
:
5694 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_WBINVD_EXITING
);
5695 case EXIT_REASON_XSETBV
:
5697 case EXIT_REASON_XSAVES
: case EXIT_REASON_XRSTORS
:
5699 * This should never happen, since it is not possible to
5700 * set XSS to a non-zero value---neither in L1 nor in L2.
5701 * If if it were, XSS would have to be checked against
5702 * the XSS exit bitmap in vmcs12.
5704 return nested_cpu_has2(vmcs12
, SECONDARY_EXEC_XSAVES
);
5705 case EXIT_REASON_PREEMPTION_TIMER
:
5707 case EXIT_REASON_PML_FULL
:
5708 /* We emulate PML support to L1. */
5710 case EXIT_REASON_VMFUNC
:
5711 /* VM functions are emulated through L2->L0 vmexits. */
5713 case EXIT_REASON_ENCLS
:
5714 /* SGX is never exposed to L1 */
5716 case EXIT_REASON_UMWAIT
:
5717 case EXIT_REASON_TPAUSE
:
5718 return nested_cpu_has2(vmcs12
,
5719 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
);
5726 static int vmx_get_nested_state(struct kvm_vcpu
*vcpu
,
5727 struct kvm_nested_state __user
*user_kvm_nested_state
,
5730 struct vcpu_vmx
*vmx
;
5731 struct vmcs12
*vmcs12
;
5732 struct kvm_nested_state kvm_state
= {
5734 .format
= KVM_STATE_NESTED_FORMAT_VMX
,
5735 .size
= sizeof(kvm_state
),
5736 .hdr
.vmx
.vmxon_pa
= -1ull,
5737 .hdr
.vmx
.vmcs12_pa
= -1ull,
5739 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
5740 &user_kvm_nested_state
->data
.vmx
[0];
5743 return kvm_state
.size
+ sizeof(*user_vmx_nested_state
);
5746 vmcs12
= get_vmcs12(vcpu
);
5748 if (nested_vmx_allowed(vcpu
) &&
5749 (vmx
->nested
.vmxon
|| vmx
->nested
.smm
.vmxon
)) {
5750 kvm_state
.hdr
.vmx
.vmxon_pa
= vmx
->nested
.vmxon_ptr
;
5751 kvm_state
.hdr
.vmx
.vmcs12_pa
= vmx
->nested
.current_vmptr
;
5753 if (vmx_has_valid_vmcs12(vcpu
)) {
5754 kvm_state
.size
+= sizeof(user_vmx_nested_state
->vmcs12
);
5756 if (vmx
->nested
.hv_evmcs
)
5757 kvm_state
.flags
|= KVM_STATE_NESTED_EVMCS
;
5759 if (is_guest_mode(vcpu
) &&
5760 nested_cpu_has_shadow_vmcs(vmcs12
) &&
5761 vmcs12
->vmcs_link_pointer
!= -1ull)
5762 kvm_state
.size
+= sizeof(user_vmx_nested_state
->shadow_vmcs12
);
5765 if (vmx
->nested
.smm
.vmxon
)
5766 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_VMXON
;
5768 if (vmx
->nested
.smm
.guest_mode
)
5769 kvm_state
.hdr
.vmx
.smm
.flags
|= KVM_STATE_NESTED_SMM_GUEST_MODE
;
5771 if (is_guest_mode(vcpu
)) {
5772 kvm_state
.flags
|= KVM_STATE_NESTED_GUEST_MODE
;
5774 if (vmx
->nested
.nested_run_pending
)
5775 kvm_state
.flags
|= KVM_STATE_NESTED_RUN_PENDING
;
5777 if (vmx
->nested
.mtf_pending
)
5778 kvm_state
.flags
|= KVM_STATE_NESTED_MTF_PENDING
;
5782 if (user_data_size
< kvm_state
.size
)
5785 if (copy_to_user(user_kvm_nested_state
, &kvm_state
, sizeof(kvm_state
)))
5788 if (!vmx_has_valid_vmcs12(vcpu
))
5792 * When running L2, the authoritative vmcs12 state is in the
5793 * vmcs02. When running L1, the authoritative vmcs12 state is
5794 * in the shadow or enlightened vmcs linked to vmcs01, unless
5795 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
5796 * vmcs12 state is in the vmcs12 already.
5798 if (is_guest_mode(vcpu
)) {
5799 sync_vmcs02_to_vmcs12(vcpu
, vmcs12
);
5800 sync_vmcs02_to_vmcs12_rare(vcpu
, vmcs12
);
5801 } else if (!vmx
->nested
.need_vmcs12_to_shadow_sync
) {
5802 if (vmx
->nested
.hv_evmcs
)
5803 copy_enlightened_to_vmcs12(vmx
);
5804 else if (enable_shadow_vmcs
)
5805 copy_shadow_to_vmcs12(vmx
);
5808 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->vmcs12
) < VMCS12_SIZE
);
5809 BUILD_BUG_ON(sizeof(user_vmx_nested_state
->shadow_vmcs12
) < VMCS12_SIZE
);
5812 * Copy over the full allocated size of vmcs12 rather than just the size
5815 if (copy_to_user(user_vmx_nested_state
->vmcs12
, vmcs12
, VMCS12_SIZE
))
5818 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
5819 vmcs12
->vmcs_link_pointer
!= -1ull) {
5820 if (copy_to_user(user_vmx_nested_state
->shadow_vmcs12
,
5821 get_shadow_vmcs12(vcpu
), VMCS12_SIZE
))
5826 return kvm_state
.size
;
5830 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5832 void vmx_leave_nested(struct kvm_vcpu
*vcpu
)
5834 if (is_guest_mode(vcpu
)) {
5835 to_vmx(vcpu
)->nested
.nested_run_pending
= 0;
5836 nested_vmx_vmexit(vcpu
, -1, 0, 0);
5841 static int vmx_set_nested_state(struct kvm_vcpu
*vcpu
,
5842 struct kvm_nested_state __user
*user_kvm_nested_state
,
5843 struct kvm_nested_state
*kvm_state
)
5845 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5846 struct vmcs12
*vmcs12
;
5848 struct kvm_vmx_nested_state_data __user
*user_vmx_nested_state
=
5849 &user_kvm_nested_state
->data
.vmx
[0];
5852 if (kvm_state
->format
!= KVM_STATE_NESTED_FORMAT_VMX
)
5855 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull) {
5856 if (kvm_state
->hdr
.vmx
.smm
.flags
)
5859 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull)
5863 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
5864 * enable eVMCS capability on vCPU. However, since then
5865 * code was changed such that flag signals vmcs12 should
5866 * be copied into eVMCS in guest memory.
5868 * To preserve backwards compatability, allow user
5869 * to set this flag even when there is no VMXON region.
5871 if (kvm_state
->flags
& ~KVM_STATE_NESTED_EVMCS
)
5874 if (!nested_vmx_allowed(vcpu
))
5877 if (!page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmxon_pa
))
5881 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
5882 (kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
5885 if (kvm_state
->hdr
.vmx
.smm
.flags
&
5886 ~(KVM_STATE_NESTED_SMM_GUEST_MODE
| KVM_STATE_NESTED_SMM_VMXON
))
5890 * SMM temporarily disables VMX, so we cannot be in guest mode,
5891 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5896 (KVM_STATE_NESTED_GUEST_MODE
| KVM_STATE_NESTED_RUN_PENDING
))
5897 : kvm_state
->hdr
.vmx
.smm
.flags
)
5900 if ((kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
) &&
5901 !(kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
))
5904 if ((kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) &&
5905 (!nested_vmx_allowed(vcpu
) || !vmx
->nested
.enlightened_vmcs_enabled
))
5908 vmx_leave_nested(vcpu
);
5910 if (kvm_state
->hdr
.vmx
.vmxon_pa
== -1ull)
5913 vmx
->nested
.vmxon_ptr
= kvm_state
->hdr
.vmx
.vmxon_pa
;
5914 ret
= enter_vmx_operation(vcpu
);
5918 /* Empty 'VMXON' state is permitted */
5919 if (kvm_state
->size
< sizeof(*kvm_state
) + sizeof(*vmcs12
))
5922 if (kvm_state
->hdr
.vmx
.vmcs12_pa
!= -1ull) {
5923 if (kvm_state
->hdr
.vmx
.vmcs12_pa
== kvm_state
->hdr
.vmx
.vmxon_pa
||
5924 !page_address_valid(vcpu
, kvm_state
->hdr
.vmx
.vmcs12_pa
))
5927 set_current_vmptr(vmx
, kvm_state
->hdr
.vmx
.vmcs12_pa
);
5928 } else if (kvm_state
->flags
& KVM_STATE_NESTED_EVMCS
) {
5930 * nested_vmx_handle_enlightened_vmptrld() cannot be called
5931 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
5932 * restored yet. EVMCS will be mapped from
5933 * nested_get_vmcs12_pages().
5935 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES
, vcpu
);
5940 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_VMXON
) {
5941 vmx
->nested
.smm
.vmxon
= true;
5942 vmx
->nested
.vmxon
= false;
5944 if (kvm_state
->hdr
.vmx
.smm
.flags
& KVM_STATE_NESTED_SMM_GUEST_MODE
)
5945 vmx
->nested
.smm
.guest_mode
= true;
5948 vmcs12
= get_vmcs12(vcpu
);
5949 if (copy_from_user(vmcs12
, user_vmx_nested_state
->vmcs12
, sizeof(*vmcs12
)))
5952 if (vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
)
5955 if (!(kvm_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))
5958 vmx
->nested
.nested_run_pending
=
5959 !!(kvm_state
->flags
& KVM_STATE_NESTED_RUN_PENDING
);
5961 vmx
->nested
.mtf_pending
=
5962 !!(kvm_state
->flags
& KVM_STATE_NESTED_MTF_PENDING
);
5965 if (nested_cpu_has_shadow_vmcs(vmcs12
) &&
5966 vmcs12
->vmcs_link_pointer
!= -1ull) {
5967 struct vmcs12
*shadow_vmcs12
= get_shadow_vmcs12(vcpu
);
5969 if (kvm_state
->size
<
5970 sizeof(*kvm_state
) +
5971 sizeof(user_vmx_nested_state
->vmcs12
) + sizeof(*shadow_vmcs12
))
5972 goto error_guest_mode
;
5974 if (copy_from_user(shadow_vmcs12
,
5975 user_vmx_nested_state
->shadow_vmcs12
,
5976 sizeof(*shadow_vmcs12
))) {
5978 goto error_guest_mode
;
5981 if (shadow_vmcs12
->hdr
.revision_id
!= VMCS12_REVISION
||
5982 !shadow_vmcs12
->hdr
.shadow_vmcs
)
5983 goto error_guest_mode
;
5986 if (nested_vmx_check_controls(vcpu
, vmcs12
) ||
5987 nested_vmx_check_host_state(vcpu
, vmcs12
) ||
5988 nested_vmx_check_guest_state(vcpu
, vmcs12
, &exit_qual
))
5989 goto error_guest_mode
;
5991 vmx
->nested
.dirty_vmcs12
= true;
5992 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
5994 goto error_guest_mode
;
5999 vmx
->nested
.nested_run_pending
= 0;
6003 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6005 if (enable_shadow_vmcs
) {
6006 vmcs_write64(VMREAD_BITMAP
, __pa(vmx_vmread_bitmap
));
6007 vmcs_write64(VMWRITE_BITMAP
, __pa(vmx_vmwrite_bitmap
));
6012 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6013 * returned for the various VMX controls MSRs when nested VMX is enabled.
6014 * The same values should also be used to verify that vmcs12 control fields are
6015 * valid during nested entry from L1 to L2.
6016 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6017 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6018 * bit in the high half is on if the corresponding bit in the control field
6019 * may be on. See also vmx_control_verify().
6021 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs
*msrs
, u32 ept_caps
)
6024 * Note that as a general rule, the high half of the MSRs (bits in
6025 * the control fields which may be 1) should be initialized by the
6026 * intersection of the underlying hardware's MSR (i.e., features which
6027 * can be supported) and the list of features we want to expose -
6028 * because they are known to be properly supported in our code.
6029 * Also, usually, the low half of the MSRs (bits which must be 1) can
6030 * be set to 0, meaning that L1 may turn off any of these bits. The
6031 * reason is that if one of these bits is necessary, it will appear
6032 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6033 * fields of vmcs01 and vmcs02, will turn these bits off - and
6034 * nested_vmx_exit_reflected() will not pass related exits to L1.
6035 * These rules have exceptions below.
6038 /* pin-based controls */
6039 rdmsr(MSR_IA32_VMX_PINBASED_CTLS
,
6040 msrs
->pinbased_ctls_low
,
6041 msrs
->pinbased_ctls_high
);
6042 msrs
->pinbased_ctls_low
|=
6043 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6044 msrs
->pinbased_ctls_high
&=
6045 PIN_BASED_EXT_INTR_MASK
|
6046 PIN_BASED_NMI_EXITING
|
6047 PIN_BASED_VIRTUAL_NMIS
|
6048 (enable_apicv
? PIN_BASED_POSTED_INTR
: 0);
6049 msrs
->pinbased_ctls_high
|=
6050 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6051 PIN_BASED_VMX_PREEMPTION_TIMER
;
6054 rdmsr(MSR_IA32_VMX_EXIT_CTLS
,
6055 msrs
->exit_ctls_low
,
6056 msrs
->exit_ctls_high
);
6057 msrs
->exit_ctls_low
=
6058 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
;
6060 msrs
->exit_ctls_high
&=
6061 #ifdef CONFIG_X86_64
6062 VM_EXIT_HOST_ADDR_SPACE_SIZE
|
6064 VM_EXIT_LOAD_IA32_PAT
| VM_EXIT_SAVE_IA32_PAT
;
6065 msrs
->exit_ctls_high
|=
6066 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR
|
6067 VM_EXIT_LOAD_IA32_EFER
| VM_EXIT_SAVE_IA32_EFER
|
6068 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
| VM_EXIT_ACK_INTR_ON_EXIT
;
6070 /* We support free control of debug control saving. */
6071 msrs
->exit_ctls_low
&= ~VM_EXIT_SAVE_DEBUG_CONTROLS
;
6073 /* entry controls */
6074 rdmsr(MSR_IA32_VMX_ENTRY_CTLS
,
6075 msrs
->entry_ctls_low
,
6076 msrs
->entry_ctls_high
);
6077 msrs
->entry_ctls_low
=
6078 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
;
6079 msrs
->entry_ctls_high
&=
6080 #ifdef CONFIG_X86_64
6081 VM_ENTRY_IA32E_MODE
|
6083 VM_ENTRY_LOAD_IA32_PAT
;
6084 msrs
->entry_ctls_high
|=
6085 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR
| VM_ENTRY_LOAD_IA32_EFER
);
6087 /* We support free control of debug control loading. */
6088 msrs
->entry_ctls_low
&= ~VM_ENTRY_LOAD_DEBUG_CONTROLS
;
6090 /* cpu-based controls */
6091 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS
,
6092 msrs
->procbased_ctls_low
,
6093 msrs
->procbased_ctls_high
);
6094 msrs
->procbased_ctls_low
=
6095 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
;
6096 msrs
->procbased_ctls_high
&=
6097 CPU_BASED_INTR_WINDOW_EXITING
|
6098 CPU_BASED_NMI_WINDOW_EXITING
| CPU_BASED_USE_TSC_OFFSETTING
|
6099 CPU_BASED_HLT_EXITING
| CPU_BASED_INVLPG_EXITING
|
6100 CPU_BASED_MWAIT_EXITING
| CPU_BASED_CR3_LOAD_EXITING
|
6101 CPU_BASED_CR3_STORE_EXITING
|
6102 #ifdef CONFIG_X86_64
6103 CPU_BASED_CR8_LOAD_EXITING
| CPU_BASED_CR8_STORE_EXITING
|
6105 CPU_BASED_MOV_DR_EXITING
| CPU_BASED_UNCOND_IO_EXITING
|
6106 CPU_BASED_USE_IO_BITMAPS
| CPU_BASED_MONITOR_TRAP_FLAG
|
6107 CPU_BASED_MONITOR_EXITING
| CPU_BASED_RDPMC_EXITING
|
6108 CPU_BASED_RDTSC_EXITING
| CPU_BASED_PAUSE_EXITING
|
6109 CPU_BASED_TPR_SHADOW
| CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
6111 * We can allow some features even when not supported by the
6112 * hardware. For example, L1 can specify an MSR bitmap - and we
6113 * can use it to avoid exits to L1 - even when L0 runs L2
6114 * without MSR bitmaps.
6116 msrs
->procbased_ctls_high
|=
6117 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR
|
6118 CPU_BASED_USE_MSR_BITMAPS
;
6120 /* We support free control of CR3 access interception. */
6121 msrs
->procbased_ctls_low
&=
6122 ~(CPU_BASED_CR3_LOAD_EXITING
| CPU_BASED_CR3_STORE_EXITING
);
6125 * secondary cpu-based controls. Do not include those that
6126 * depend on CPUID bits, they are added later by vmx_cpuid_update.
6128 if (msrs
->procbased_ctls_high
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
)
6129 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2
,
6130 msrs
->secondary_ctls_low
,
6131 msrs
->secondary_ctls_high
);
6133 msrs
->secondary_ctls_low
= 0;
6134 msrs
->secondary_ctls_high
&=
6135 SECONDARY_EXEC_DESC
|
6136 SECONDARY_EXEC_RDTSCP
|
6137 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
6138 SECONDARY_EXEC_WBINVD_EXITING
|
6139 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
6140 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
|
6141 SECONDARY_EXEC_RDRAND_EXITING
|
6142 SECONDARY_EXEC_ENABLE_INVPCID
|
6143 SECONDARY_EXEC_RDSEED_EXITING
|
6144 SECONDARY_EXEC_XSAVES
;
6147 * We can emulate "VMCS shadowing," even if the hardware
6148 * doesn't support it.
6150 msrs
->secondary_ctls_high
|=
6151 SECONDARY_EXEC_SHADOW_VMCS
;
6154 /* nested EPT: emulate EPT also to L1 */
6155 msrs
->secondary_ctls_high
|=
6156 SECONDARY_EXEC_ENABLE_EPT
;
6158 VMX_EPT_PAGE_WALK_4_BIT
|
6159 VMX_EPT_PAGE_WALK_5_BIT
|
6161 VMX_EPT_INVEPT_BIT
|
6162 VMX_EPT_EXECUTE_ONLY_BIT
;
6164 msrs
->ept_caps
&= ept_caps
;
6165 msrs
->ept_caps
|= VMX_EPT_EXTENT_GLOBAL_BIT
|
6166 VMX_EPT_EXTENT_CONTEXT_BIT
| VMX_EPT_2MB_PAGE_BIT
|
6167 VMX_EPT_1GB_PAGE_BIT
;
6168 if (enable_ept_ad_bits
) {
6169 msrs
->secondary_ctls_high
|=
6170 SECONDARY_EXEC_ENABLE_PML
;
6171 msrs
->ept_caps
|= VMX_EPT_AD_BIT
;
6175 if (cpu_has_vmx_vmfunc()) {
6176 msrs
->secondary_ctls_high
|=
6177 SECONDARY_EXEC_ENABLE_VMFUNC
;
6179 * Advertise EPTP switching unconditionally
6180 * since we emulate it
6183 msrs
->vmfunc_controls
=
6184 VMX_VMFUNC_EPTP_SWITCHING
;
6188 * Old versions of KVM use the single-context version without
6189 * checking for support, so declare that it is supported even
6190 * though it is treated as global context. The alternative is
6191 * not failing the single-context invvpid, and it is worse.
6194 msrs
->secondary_ctls_high
|=
6195 SECONDARY_EXEC_ENABLE_VPID
;
6196 msrs
->vpid_caps
= VMX_VPID_INVVPID_BIT
|
6197 VMX_VPID_EXTENT_SUPPORTED_MASK
;
6200 if (enable_unrestricted_guest
)
6201 msrs
->secondary_ctls_high
|=
6202 SECONDARY_EXEC_UNRESTRICTED_GUEST
;
6204 if (flexpriority_enabled
)
6205 msrs
->secondary_ctls_high
|=
6206 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
6208 /* miscellaneous data */
6209 rdmsr(MSR_IA32_VMX_MISC
,
6212 msrs
->misc_low
&= VMX_MISC_SAVE_EFER_LMA
;
6214 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS
|
6215 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE
|
6216 VMX_MISC_ACTIVITY_HLT
;
6217 msrs
->misc_high
= 0;
6220 * This MSR reports some information about VMX support. We
6221 * should return information about the VMX we emulate for the
6222 * guest, and the VMCS structure we give it - not about the
6223 * VMX support of the underlying hardware.
6227 VMX_BASIC_TRUE_CTLS
|
6228 ((u64
)VMCS12_SIZE
<< VMX_BASIC_VMCS_SIZE_SHIFT
) |
6229 (VMX_BASIC_MEM_TYPE_WB
<< VMX_BASIC_MEM_TYPE_SHIFT
);
6231 if (cpu_has_vmx_basic_inout())
6232 msrs
->basic
|= VMX_BASIC_INOUT
;
6235 * These MSRs specify bits which the guest must keep fixed on
6236 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6237 * We picked the standard core2 setting.
6239 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6240 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6241 msrs
->cr0_fixed0
= VMXON_CR0_ALWAYSON
;
6242 msrs
->cr4_fixed0
= VMXON_CR4_ALWAYSON
;
6244 /* These MSRs specify bits which the guest must keep fixed off. */
6245 rdmsrl(MSR_IA32_VMX_CR0_FIXED1
, msrs
->cr0_fixed1
);
6246 rdmsrl(MSR_IA32_VMX_CR4_FIXED1
, msrs
->cr4_fixed1
);
6248 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
6249 msrs
->vmcs_enum
= VMCS12_MAX_FIELD_INDEX
<< 1;
6252 void nested_vmx_hardware_unsetup(void)
6256 if (enable_shadow_vmcs
) {
6257 for (i
= 0; i
< VMX_BITMAP_NR
; i
++)
6258 free_page((unsigned long)vmx_bitmap
[i
]);
6262 __init
int nested_vmx_hardware_setup(struct kvm_x86_ops
*ops
,
6263 int (*exit_handlers
[])(struct kvm_vcpu
*))
6267 if (!cpu_has_vmx_shadow_vmcs())
6268 enable_shadow_vmcs
= 0;
6269 if (enable_shadow_vmcs
) {
6270 for (i
= 0; i
< VMX_BITMAP_NR
; i
++) {
6272 * The vmx_bitmap is not tied to a VM and so should
6273 * not be charged to a memcg.
6275 vmx_bitmap
[i
] = (unsigned long *)
6276 __get_free_page(GFP_KERNEL
);
6277 if (!vmx_bitmap
[i
]) {
6278 nested_vmx_hardware_unsetup();
6283 init_vmcs_shadow_fields();
6286 exit_handlers
[EXIT_REASON_VMCLEAR
] = handle_vmclear
;
6287 exit_handlers
[EXIT_REASON_VMLAUNCH
] = handle_vmlaunch
;
6288 exit_handlers
[EXIT_REASON_VMPTRLD
] = handle_vmptrld
;
6289 exit_handlers
[EXIT_REASON_VMPTRST
] = handle_vmptrst
;
6290 exit_handlers
[EXIT_REASON_VMREAD
] = handle_vmread
;
6291 exit_handlers
[EXIT_REASON_VMRESUME
] = handle_vmresume
;
6292 exit_handlers
[EXIT_REASON_VMWRITE
] = handle_vmwrite
;
6293 exit_handlers
[EXIT_REASON_VMOFF
] = handle_vmoff
;
6294 exit_handlers
[EXIT_REASON_VMON
] = handle_vmon
;
6295 exit_handlers
[EXIT_REASON_INVEPT
] = handle_invept
;
6296 exit_handlers
[EXIT_REASON_INVVPID
] = handle_invvpid
;
6297 exit_handlers
[EXIT_REASON_VMFUNC
] = handle_vmfunc
;
6299 ops
->check_nested_events
= vmx_check_nested_events
;
6300 ops
->get_nested_state
= vmx_get_nested_state
;
6301 ops
->set_nested_state
= vmx_set_nested_state
;
6302 ops
->get_vmcs12_pages
= nested_get_vmcs12_pages
;
6303 ops
->nested_enable_evmcs
= nested_enable_evmcs
;
6304 ops
->nested_get_evmcs_version
= nested_get_evmcs_version
;