1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
6 * machines without emulation or binary translation.
8 * Copyright (C) 2006 Qumranet, Inc.
9 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/highmem.h>
18 #include <linux/hrtimer.h>
19 #include <linux/kernel.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/mod_devicetable.h>
25 #include <linux/objtool.h>
26 #include <linux/sched.h>
27 #include <linux/sched/smt.h>
28 #include <linux/slab.h>
29 #include <linux/tboot.h>
30 #include <linux/trace_events.h>
31 #include <linux/entry-kvm.h>
36 #include <asm/cpu_device_id.h>
37 #include <asm/debugreg.h>
39 #include <asm/fpu/api.h>
40 #include <asm/fpu/xstate.h>
41 #include <asm/idtentry.h>
43 #include <asm/irq_remapping.h>
44 #include <asm/reboot.h>
45 #include <asm/perf_event.h>
46 #include <asm/mmu_context.h>
47 #include <asm/mshyperv.h>
48 #include <asm/mwait.h>
49 #include <asm/spec-ctrl.h>
52 #include "capabilities.h"
55 #include "kvm_onhyperv.h"
57 #include "kvm_cache_regs.h"
69 #include "vmx_onhyperv.h"
71 MODULE_AUTHOR("Qumranet");
72 MODULE_LICENSE("GPL");
75 static const struct x86_cpu_id vmx_cpu_id
[] = {
76 X86_MATCH_FEATURE(X86_FEATURE_VMX
, NULL
),
79 MODULE_DEVICE_TABLE(x86cpu
, vmx_cpu_id
);
82 bool __read_mostly enable_vpid
= 1;
83 module_param_named(vpid
, enable_vpid
, bool, 0444);
85 static bool __read_mostly enable_vnmi
= 1;
86 module_param_named(vnmi
, enable_vnmi
, bool, 0444);
88 bool __read_mostly flexpriority_enabled
= 1;
89 module_param_named(flexpriority
, flexpriority_enabled
, bool, 0444);
91 bool __read_mostly enable_ept
= 1;
92 module_param_named(ept
, enable_ept
, bool, 0444);
94 bool __read_mostly enable_unrestricted_guest
= 1;
95 module_param_named(unrestricted_guest
,
96 enable_unrestricted_guest
, bool, 0444);
98 bool __read_mostly enable_ept_ad_bits
= 1;
99 module_param_named(eptad
, enable_ept_ad_bits
, bool, 0444);
101 static bool __read_mostly emulate_invalid_guest_state
= true;
102 module_param(emulate_invalid_guest_state
, bool, 0444);
104 static bool __read_mostly fasteoi
= 1;
105 module_param(fasteoi
, bool, 0444);
107 module_param(enable_apicv
, bool, 0444);
109 bool __read_mostly enable_ipiv
= true;
110 module_param(enable_ipiv
, bool, 0444);
113 * If nested=1, nested virtualization is supported, i.e., guests may use
114 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
115 * use VMX instructions.
117 static bool __read_mostly nested
= 1;
118 module_param(nested
, bool, 0444);
120 bool __read_mostly enable_pml
= 1;
121 module_param_named(pml
, enable_pml
, bool, 0444);
123 static bool __read_mostly error_on_inconsistent_vmcs_config
= true;
124 module_param(error_on_inconsistent_vmcs_config
, bool, 0444);
126 static bool __read_mostly dump_invalid_vmcs
= 0;
127 module_param(dump_invalid_vmcs
, bool, 0644);
129 #define MSR_BITMAP_MODE_X2APIC 1
130 #define MSR_BITMAP_MODE_X2APIC_APICV 2
132 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
134 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
135 static int __read_mostly cpu_preemption_timer_multi
;
136 static bool __read_mostly enable_preemption_timer
= 1;
138 module_param_named(preemption_timer
, enable_preemption_timer
, bool, S_IRUGO
);
141 extern bool __read_mostly allow_smaller_maxphyaddr
;
142 module_param(allow_smaller_maxphyaddr
, bool, S_IRUGO
);
144 #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
145 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
146 #define KVM_VM_CR0_ALWAYS_ON \
147 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
149 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
150 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
151 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
153 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
155 #define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
156 RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
157 RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
158 RTIT_STATUS_BYTECNT))
161 * List of MSRs that can be directly passed to the guest.
162 * In addition to these x2apic and PT MSRs are handled specially.
164 static u32 vmx_possible_passthrough_msrs
[MAX_POSSIBLE_PASSTHROUGH_MSRS
] = {
176 MSR_IA32_SYSENTER_CS
,
177 MSR_IA32_SYSENTER_ESP
,
178 MSR_IA32_SYSENTER_EIP
,
180 MSR_CORE_C3_RESIDENCY
,
181 MSR_CORE_C6_RESIDENCY
,
182 MSR_CORE_C7_RESIDENCY
,
186 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
187 * ple_gap: upper bound on the amount of time between two successive
188 * executions of PAUSE in a loop. Also indicate if ple enabled.
189 * According to test, this time is usually smaller than 128 cycles.
190 * ple_window: upper bound on the amount of time a guest is allowed to execute
191 * in a PAUSE loop. Tests indicate that most spinlocks are held for
192 * less than 2^12 cycles
193 * Time is measured based on a counter that runs at the same rate as the TSC,
194 * refer SDM volume 3b section 21.6.13 & 22.1.3.
196 static unsigned int ple_gap
= KVM_DEFAULT_PLE_GAP
;
197 module_param(ple_gap
, uint
, 0444);
199 static unsigned int ple_window
= KVM_VMX_DEFAULT_PLE_WINDOW
;
200 module_param(ple_window
, uint
, 0444);
202 /* Default doubles per-vcpu window every exit. */
203 static unsigned int ple_window_grow
= KVM_DEFAULT_PLE_WINDOW_GROW
;
204 module_param(ple_window_grow
, uint
, 0444);
206 /* Default resets per-vcpu window every exit to ple_window. */
207 static unsigned int ple_window_shrink
= KVM_DEFAULT_PLE_WINDOW_SHRINK
;
208 module_param(ple_window_shrink
, uint
, 0444);
210 /* Default is to compute the maximum so we can never overflow. */
211 static unsigned int ple_window_max
= KVM_VMX_DEFAULT_PLE_WINDOW_MAX
;
212 module_param(ple_window_max
, uint
, 0444);
214 /* Default is SYSTEM mode, 1 for host-guest mode */
215 int __read_mostly pt_mode
= PT_MODE_SYSTEM
;
216 module_param(pt_mode
, int, S_IRUGO
);
218 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush
);
219 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond
);
220 static DEFINE_MUTEX(vmx_l1d_flush_mutex
);
222 /* Storage for pre module init parameter parsing */
223 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param
= VMENTER_L1D_FLUSH_AUTO
;
225 static const struct {
228 } vmentry_l1d_param
[] = {
229 [VMENTER_L1D_FLUSH_AUTO
] = {"auto", true},
230 [VMENTER_L1D_FLUSH_NEVER
] = {"never", true},
231 [VMENTER_L1D_FLUSH_COND
] = {"cond", true},
232 [VMENTER_L1D_FLUSH_ALWAYS
] = {"always", true},
233 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = {"EPT disabled", false},
234 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = {"not required", false},
237 #define L1D_CACHE_ORDER 4
238 static void *vmx_l1d_flush_pages
;
240 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf
)
245 if (!boot_cpu_has_bug(X86_BUG_L1TF
)) {
246 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_NOT_REQUIRED
;
251 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_EPT_DISABLED
;
255 if (host_arch_capabilities
& ARCH_CAP_SKIP_VMENTRY_L1DFLUSH
) {
256 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_NOT_REQUIRED
;
260 /* If set to auto use the default l1tf mitigation method */
261 if (l1tf
== VMENTER_L1D_FLUSH_AUTO
) {
262 switch (l1tf_mitigation
) {
263 case L1TF_MITIGATION_OFF
:
264 l1tf
= VMENTER_L1D_FLUSH_NEVER
;
266 case L1TF_MITIGATION_FLUSH_NOWARN
:
267 case L1TF_MITIGATION_FLUSH
:
268 case L1TF_MITIGATION_FLUSH_NOSMT
:
269 l1tf
= VMENTER_L1D_FLUSH_COND
;
271 case L1TF_MITIGATION_FULL
:
272 case L1TF_MITIGATION_FULL_FORCE
:
273 l1tf
= VMENTER_L1D_FLUSH_ALWAYS
;
276 } else if (l1tf_mitigation
== L1TF_MITIGATION_FULL_FORCE
) {
277 l1tf
= VMENTER_L1D_FLUSH_ALWAYS
;
280 if (l1tf
!= VMENTER_L1D_FLUSH_NEVER
&& !vmx_l1d_flush_pages
&&
281 !boot_cpu_has(X86_FEATURE_FLUSH_L1D
)) {
283 * This allocation for vmx_l1d_flush_pages is not tied to a VM
284 * lifetime and so should not be charged to a memcg.
286 page
= alloc_pages(GFP_KERNEL
, L1D_CACHE_ORDER
);
289 vmx_l1d_flush_pages
= page_address(page
);
292 * Initialize each page with a different pattern in
293 * order to protect against KSM in the nested
294 * virtualization case.
296 for (i
= 0; i
< 1u << L1D_CACHE_ORDER
; ++i
) {
297 memset(vmx_l1d_flush_pages
+ i
* PAGE_SIZE
, i
+ 1,
302 l1tf_vmx_mitigation
= l1tf
;
304 if (l1tf
!= VMENTER_L1D_FLUSH_NEVER
)
305 static_branch_enable(&vmx_l1d_should_flush
);
307 static_branch_disable(&vmx_l1d_should_flush
);
309 if (l1tf
== VMENTER_L1D_FLUSH_COND
)
310 static_branch_enable(&vmx_l1d_flush_cond
);
312 static_branch_disable(&vmx_l1d_flush_cond
);
316 static int vmentry_l1d_flush_parse(const char *s
)
321 for (i
= 0; i
< ARRAY_SIZE(vmentry_l1d_param
); i
++) {
322 if (vmentry_l1d_param
[i
].for_parse
&&
323 sysfs_streq(s
, vmentry_l1d_param
[i
].option
))
330 static int vmentry_l1d_flush_set(const char *s
, const struct kernel_param
*kp
)
334 l1tf
= vmentry_l1d_flush_parse(s
);
338 if (!boot_cpu_has(X86_BUG_L1TF
))
342 * Has vmx_init() run already? If not then this is the pre init
343 * parameter parsing. In that case just store the value and let
344 * vmx_init() do the proper setup after enable_ept has been
347 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
) {
348 vmentry_l1d_flush_param
= l1tf
;
352 mutex_lock(&vmx_l1d_flush_mutex
);
353 ret
= vmx_setup_l1d_flush(l1tf
);
354 mutex_unlock(&vmx_l1d_flush_mutex
);
358 static int vmentry_l1d_flush_get(char *s
, const struct kernel_param
*kp
)
360 if (WARN_ON_ONCE(l1tf_vmx_mitigation
>= ARRAY_SIZE(vmentry_l1d_param
)))
361 return sysfs_emit(s
, "???\n");
363 return sysfs_emit(s
, "%s\n", vmentry_l1d_param
[l1tf_vmx_mitigation
].option
);
366 static __always_inline
void vmx_disable_fb_clear(struct vcpu_vmx
*vmx
)
370 if (!vmx
->disable_fb_clear
)
373 msr
= __rdmsr(MSR_IA32_MCU_OPT_CTRL
);
375 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL
, msr
);
376 /* Cache the MSR value to avoid reading it later */
377 vmx
->msr_ia32_mcu_opt_ctrl
= msr
;
380 static __always_inline
void vmx_enable_fb_clear(struct vcpu_vmx
*vmx
)
382 if (!vmx
->disable_fb_clear
)
385 vmx
->msr_ia32_mcu_opt_ctrl
&= ~FB_CLEAR_DIS
;
386 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL
, vmx
->msr_ia32_mcu_opt_ctrl
);
389 static void vmx_update_fb_clear_dis(struct kvm_vcpu
*vcpu
, struct vcpu_vmx
*vmx
)
391 vmx
->disable_fb_clear
= (host_arch_capabilities
& ARCH_CAP_FB_CLEAR_CTRL
) &&
392 !boot_cpu_has_bug(X86_BUG_MDS
) &&
393 !boot_cpu_has_bug(X86_BUG_TAA
);
396 * If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
397 * at VMEntry. Skip the MSR read/write when a guest has no use case to
400 if ((vcpu
->arch
.arch_capabilities
& ARCH_CAP_FB_CLEAR
) ||
401 ((vcpu
->arch
.arch_capabilities
& ARCH_CAP_MDS_NO
) &&
402 (vcpu
->arch
.arch_capabilities
& ARCH_CAP_TAA_NO
) &&
403 (vcpu
->arch
.arch_capabilities
& ARCH_CAP_PSDP_NO
) &&
404 (vcpu
->arch
.arch_capabilities
& ARCH_CAP_FBSDP_NO
) &&
405 (vcpu
->arch
.arch_capabilities
& ARCH_CAP_SBDR_SSDP_NO
)))
406 vmx
->disable_fb_clear
= false;
409 static const struct kernel_param_ops vmentry_l1d_flush_ops
= {
410 .set
= vmentry_l1d_flush_set
,
411 .get
= vmentry_l1d_flush_get
,
413 module_param_cb(vmentry_l1d_flush
, &vmentry_l1d_flush_ops
, NULL
, 0644);
415 static u32
vmx_segment_access_rights(struct kvm_segment
*var
);
417 void vmx_vmexit(void);
419 #define vmx_insn_failed(fmt...) \
422 pr_warn_ratelimited(fmt); \
425 noinline
void vmread_error(unsigned long field
)
427 vmx_insn_failed("vmread failed: field=%lx\n", field
);
430 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
431 noinstr
void vmread_error_trampoline2(unsigned long field
, bool fault
)
434 kvm_spurious_fault();
436 instrumentation_begin();
438 instrumentation_end();
443 noinline
void vmwrite_error(unsigned long field
, unsigned long value
)
445 vmx_insn_failed("vmwrite failed: field=%lx val=%lx err=%u\n",
446 field
, value
, vmcs_read32(VM_INSTRUCTION_ERROR
));
449 noinline
void vmclear_error(struct vmcs
*vmcs
, u64 phys_addr
)
451 vmx_insn_failed("vmclear failed: %p/%llx err=%u\n",
452 vmcs
, phys_addr
, vmcs_read32(VM_INSTRUCTION_ERROR
));
455 noinline
void vmptrld_error(struct vmcs
*vmcs
, u64 phys_addr
)
457 vmx_insn_failed("vmptrld failed: %p/%llx err=%u\n",
458 vmcs
, phys_addr
, vmcs_read32(VM_INSTRUCTION_ERROR
));
461 noinline
void invvpid_error(unsigned long ext
, u16 vpid
, gva_t gva
)
463 vmx_insn_failed("invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
467 noinline
void invept_error(unsigned long ext
, u64 eptp
, gpa_t gpa
)
469 vmx_insn_failed("invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
473 static DEFINE_PER_CPU(struct vmcs
*, vmxarea
);
474 DEFINE_PER_CPU(struct vmcs
*, current_vmcs
);
476 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
477 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
479 static DEFINE_PER_CPU(struct list_head
, loaded_vmcss_on_cpu
);
481 static DECLARE_BITMAP(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
482 static DEFINE_SPINLOCK(vmx_vpid_lock
);
484 struct vmcs_config vmcs_config __ro_after_init
;
485 struct vmx_capability vmx_capability __ro_after_init
;
487 #define VMX_SEGMENT_FIELD(seg) \
488 [VCPU_SREG_##seg] = { \
489 .selector = GUEST_##seg##_SELECTOR, \
490 .base = GUEST_##seg##_BASE, \
491 .limit = GUEST_##seg##_LIMIT, \
492 .ar_bytes = GUEST_##seg##_AR_BYTES, \
495 static const struct kvm_vmx_segment_field
{
500 } kvm_vmx_segment_fields
[] = {
501 VMX_SEGMENT_FIELD(CS
),
502 VMX_SEGMENT_FIELD(DS
),
503 VMX_SEGMENT_FIELD(ES
),
504 VMX_SEGMENT_FIELD(FS
),
505 VMX_SEGMENT_FIELD(GS
),
506 VMX_SEGMENT_FIELD(SS
),
507 VMX_SEGMENT_FIELD(TR
),
508 VMX_SEGMENT_FIELD(LDTR
),
511 static inline void vmx_segment_cache_clear(struct vcpu_vmx
*vmx
)
513 vmx
->segment_cache
.bitmask
= 0;
516 static unsigned long host_idt_base
;
518 #if IS_ENABLED(CONFIG_HYPERV)
519 static struct kvm_x86_ops vmx_x86_ops __initdata
;
521 static bool __read_mostly enlightened_vmcs
= true;
522 module_param(enlightened_vmcs
, bool, 0444);
524 static int hv_enable_l2_tlb_flush(struct kvm_vcpu
*vcpu
)
526 struct hv_enlightened_vmcs
*evmcs
;
527 hpa_t partition_assist_page
= hv_get_partition_assist_page(vcpu
);
529 if (partition_assist_page
== INVALID_PAGE
)
532 evmcs
= (struct hv_enlightened_vmcs
*)to_vmx(vcpu
)->loaded_vmcs
->vmcs
;
534 evmcs
->partition_assist_page
= partition_assist_page
;
535 evmcs
->hv_vm_id
= (unsigned long)vcpu
->kvm
;
536 evmcs
->hv_enlightenments_control
.nested_flush_hypercall
= 1;
541 static __init
void hv_init_evmcs(void)
545 if (!enlightened_vmcs
)
549 * Enlightened VMCS usage should be recommended and the host needs
550 * to support eVMCS v1 or above.
552 if (ms_hyperv
.hints
& HV_X64_ENLIGHTENED_VMCS_RECOMMENDED
&&
553 (ms_hyperv
.nested_features
& HV_X64_ENLIGHTENED_VMCS_VERSION
) >=
556 /* Check that we have assist pages on all online CPUs */
557 for_each_online_cpu(cpu
) {
558 if (!hv_get_vp_assist_page(cpu
)) {
559 enlightened_vmcs
= false;
564 if (enlightened_vmcs
) {
565 pr_info("Using Hyper-V Enlightened VMCS\n");
566 static_branch_enable(&__kvm_is_using_evmcs
);
569 if (ms_hyperv
.nested_features
& HV_X64_NESTED_DIRECT_FLUSH
)
570 vmx_x86_ops
.enable_l2_tlb_flush
571 = hv_enable_l2_tlb_flush
;
574 enlightened_vmcs
= false;
578 static void hv_reset_evmcs(void)
580 struct hv_vp_assist_page
*vp_ap
;
582 if (!kvm_is_using_evmcs())
586 * KVM should enable eVMCS if and only if all CPUs have a VP assist
587 * page, and should reject CPU onlining if eVMCS is enabled the CPU
588 * doesn't have a VP assist page allocated.
590 vp_ap
= hv_get_vp_assist_page(smp_processor_id());
591 if (WARN_ON_ONCE(!vp_ap
))
595 * Reset everything to support using non-enlightened VMCS access later
596 * (e.g. when we reload the module with enlightened_vmcs=0)
598 vp_ap
->nested_control
.features
.directhypercall
= 0;
599 vp_ap
->current_nested_vmcs
= 0;
600 vp_ap
->enlighten_vmentry
= 0;
603 #else /* IS_ENABLED(CONFIG_HYPERV) */
604 static void hv_init_evmcs(void) {}
605 static void hv_reset_evmcs(void) {}
606 #endif /* IS_ENABLED(CONFIG_HYPERV) */
609 * Comment's format: document - errata name - stepping - processor name.
611 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
613 static u32 vmx_preemption_cpu_tfms
[] = {
614 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
616 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
617 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
618 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
620 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
622 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
623 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
625 * 320767.pdf - AAP86 - B1 -
626 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
629 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
631 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
633 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
635 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
636 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
637 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
639 /* Xeon E3-1220 V2 */
643 static inline bool cpu_has_broken_vmx_preemption_timer(void)
645 u32 eax
= cpuid_eax(0x00000001), i
;
647 /* Clear the reserved bits */
648 eax
&= ~(0x3U
<< 14 | 0xfU
<< 28);
649 for (i
= 0; i
< ARRAY_SIZE(vmx_preemption_cpu_tfms
); i
++)
650 if (eax
== vmx_preemption_cpu_tfms
[i
])
656 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu
*vcpu
)
658 return flexpriority_enabled
&& lapic_in_kernel(vcpu
);
661 static int possible_passthrough_msr_slot(u32 msr
)
665 for (i
= 0; i
< ARRAY_SIZE(vmx_possible_passthrough_msrs
); i
++)
666 if (vmx_possible_passthrough_msrs
[i
] == msr
)
672 static bool is_valid_passthrough_msr(u32 msr
)
677 case 0x800 ... 0x8ff:
678 /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
680 case MSR_IA32_RTIT_STATUS
:
681 case MSR_IA32_RTIT_OUTPUT_BASE
:
682 case MSR_IA32_RTIT_OUTPUT_MASK
:
683 case MSR_IA32_RTIT_CR3_MATCH
:
684 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
685 /* PT MSRs. These are handled in pt_update_intercept_for_msr() */
688 case MSR_LBR_INFO_0
... MSR_LBR_INFO_0
+ 31:
689 case MSR_LBR_NHM_FROM
... MSR_LBR_NHM_FROM
+ 31:
690 case MSR_LBR_NHM_TO
... MSR_LBR_NHM_TO
+ 31:
691 case MSR_LBR_CORE_FROM
... MSR_LBR_CORE_FROM
+ 8:
692 case MSR_LBR_CORE_TO
... MSR_LBR_CORE_TO
+ 8:
693 /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
697 r
= possible_passthrough_msr_slot(msr
) != -ENOENT
;
699 WARN(!r
, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr
);
704 struct vmx_uret_msr
*vmx_find_uret_msr(struct vcpu_vmx
*vmx
, u32 msr
)
708 i
= kvm_find_user_return_msr(msr
);
710 return &vmx
->guest_uret_msrs
[i
];
714 static int vmx_set_guest_uret_msr(struct vcpu_vmx
*vmx
,
715 struct vmx_uret_msr
*msr
, u64 data
)
717 unsigned int slot
= msr
- vmx
->guest_uret_msrs
;
720 if (msr
->load_into_hardware
) {
722 ret
= kvm_set_user_return_msr(slot
, data
, msr
->mask
);
731 * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
733 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
734 * atomically track post-VMXON state, e.g. this may be called in NMI context.
735 * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
736 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
737 * magically in RM, VM86, compat mode, or at CPL>0.
739 static int kvm_cpu_vmxoff(void)
741 asm goto("1: vmxoff\n\t"
742 _ASM_EXTABLE(1b
, %l
[fault
])
743 ::: "cc", "memory" : fault
);
745 cr4_clear_bits(X86_CR4_VMXE
);
749 cr4_clear_bits(X86_CR4_VMXE
);
753 static void vmx_emergency_disable(void)
755 int cpu
= raw_smp_processor_id();
756 struct loaded_vmcs
*v
;
758 kvm_rebooting
= true;
761 * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
762 * set in task context. If this races with VMX is disabled by an NMI,
763 * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
766 if (!(__read_cr4() & X86_CR4_VMXE
))
769 list_for_each_entry(v
, &per_cpu(loaded_vmcss_on_cpu
, cpu
),
770 loaded_vmcss_on_cpu_link
)
776 static void __loaded_vmcs_clear(void *arg
)
778 struct loaded_vmcs
*loaded_vmcs
= arg
;
779 int cpu
= raw_smp_processor_id();
781 if (loaded_vmcs
->cpu
!= cpu
)
782 return; /* vcpu migration can race with cpu offline */
783 if (per_cpu(current_vmcs
, cpu
) == loaded_vmcs
->vmcs
)
784 per_cpu(current_vmcs
, cpu
) = NULL
;
786 vmcs_clear(loaded_vmcs
->vmcs
);
787 if (loaded_vmcs
->shadow_vmcs
&& loaded_vmcs
->launched
)
788 vmcs_clear(loaded_vmcs
->shadow_vmcs
);
790 list_del(&loaded_vmcs
->loaded_vmcss_on_cpu_link
);
793 * Ensure all writes to loaded_vmcs, including deleting it from its
794 * current percpu list, complete before setting loaded_vmcs->cpu to
795 * -1, otherwise a different cpu can see loaded_vmcs->cpu == -1 first
796 * and add loaded_vmcs to its percpu list before it's deleted from this
797 * cpu's list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
801 loaded_vmcs
->cpu
= -1;
802 loaded_vmcs
->launched
= 0;
805 void loaded_vmcs_clear(struct loaded_vmcs
*loaded_vmcs
)
807 int cpu
= loaded_vmcs
->cpu
;
810 smp_call_function_single(cpu
,
811 __loaded_vmcs_clear
, loaded_vmcs
, 1);
814 static bool vmx_segment_cache_test_set(struct vcpu_vmx
*vmx
, unsigned seg
,
818 u32 mask
= 1 << (seg
* SEG_FIELD_NR
+ field
);
820 if (!kvm_register_is_available(&vmx
->vcpu
, VCPU_EXREG_SEGMENTS
)) {
821 kvm_register_mark_available(&vmx
->vcpu
, VCPU_EXREG_SEGMENTS
);
822 vmx
->segment_cache
.bitmask
= 0;
824 ret
= vmx
->segment_cache
.bitmask
& mask
;
825 vmx
->segment_cache
.bitmask
|= mask
;
829 static u16
vmx_read_guest_seg_selector(struct vcpu_vmx
*vmx
, unsigned seg
)
831 u16
*p
= &vmx
->segment_cache
.seg
[seg
].selector
;
833 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_SEL
))
834 *p
= vmcs_read16(kvm_vmx_segment_fields
[seg
].selector
);
838 static ulong
vmx_read_guest_seg_base(struct vcpu_vmx
*vmx
, unsigned seg
)
840 ulong
*p
= &vmx
->segment_cache
.seg
[seg
].base
;
842 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_BASE
))
843 *p
= vmcs_readl(kvm_vmx_segment_fields
[seg
].base
);
847 static u32
vmx_read_guest_seg_limit(struct vcpu_vmx
*vmx
, unsigned seg
)
849 u32
*p
= &vmx
->segment_cache
.seg
[seg
].limit
;
851 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_LIMIT
))
852 *p
= vmcs_read32(kvm_vmx_segment_fields
[seg
].limit
);
856 static u32
vmx_read_guest_seg_ar(struct vcpu_vmx
*vmx
, unsigned seg
)
858 u32
*p
= &vmx
->segment_cache
.seg
[seg
].ar
;
860 if (!vmx_segment_cache_test_set(vmx
, seg
, SEG_FIELD_AR
))
861 *p
= vmcs_read32(kvm_vmx_segment_fields
[seg
].ar_bytes
);
865 void vmx_update_exception_bitmap(struct kvm_vcpu
*vcpu
)
869 eb
= (1u << PF_VECTOR
) | (1u << UD_VECTOR
) | (1u << MC_VECTOR
) |
870 (1u << DB_VECTOR
) | (1u << AC_VECTOR
);
872 * Guest access to VMware backdoor ports could legitimately
873 * trigger #GP because of TSS I/O permission bitmap.
874 * We intercept those #GP and allow access to them anyway
877 if (enable_vmware_backdoor
)
878 eb
|= (1u << GP_VECTOR
);
879 if ((vcpu
->guest_debug
&
880 (KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
)) ==
881 (KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_SW_BP
))
882 eb
|= 1u << BP_VECTOR
;
883 if (to_vmx(vcpu
)->rmode
.vm86_active
)
885 if (!vmx_need_pf_intercept(vcpu
))
886 eb
&= ~(1u << PF_VECTOR
);
888 /* When we are running a nested L2 guest and L1 specified for it a
889 * certain exception bitmap, we must trap the same exceptions and pass
890 * them to L1. When running L2, we will only handle the exceptions
891 * specified above if L1 did not want them.
893 if (is_guest_mode(vcpu
))
894 eb
|= get_vmcs12(vcpu
)->exception_bitmap
;
896 int mask
= 0, match
= 0;
898 if (enable_ept
&& (eb
& (1u << PF_VECTOR
))) {
900 * If EPT is enabled, #PF is currently only intercepted
901 * if MAXPHYADDR is smaller on the guest than on the
902 * host. In that case we only care about present,
903 * non-reserved faults. For vmcs02, however, PFEC_MASK
904 * and PFEC_MATCH are set in prepare_vmcs02_rare.
906 mask
= PFERR_PRESENT_MASK
| PFERR_RSVD_MASK
;
907 match
= PFERR_PRESENT_MASK
;
909 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, mask
);
910 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, match
);
914 * Disabling xfd interception indicates that dynamic xfeatures
915 * might be used in the guest. Always trap #NM in this case
916 * to save guest xfd_err timely.
918 if (vcpu
->arch
.xfd_no_write_intercept
)
919 eb
|= (1u << NM_VECTOR
);
921 vmcs_write32(EXCEPTION_BITMAP
, eb
);
925 * Check if MSR is intercepted for currently loaded MSR bitmap.
927 static bool msr_write_intercepted(struct vcpu_vmx
*vmx
, u32 msr
)
929 if (!(exec_controls_get(vmx
) & CPU_BASED_USE_MSR_BITMAPS
))
932 return vmx_test_msr_bitmap_write(vmx
->loaded_vmcs
->msr_bitmap
, msr
);
935 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx
*vmx
)
937 unsigned int flags
= 0;
939 if (vmx
->loaded_vmcs
->launched
)
940 flags
|= VMX_RUN_VMRESUME
;
943 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
944 * to change it directly without causing a vmexit. In that case read
945 * it after vmexit and store it in vmx->spec_ctrl.
947 if (!msr_write_intercepted(vmx
, MSR_IA32_SPEC_CTRL
))
948 flags
|= VMX_RUN_SAVE_SPEC_CTRL
;
953 static __always_inline
void clear_atomic_switch_msr_special(struct vcpu_vmx
*vmx
,
954 unsigned long entry
, unsigned long exit
)
956 vm_entry_controls_clearbit(vmx
, entry
);
957 vm_exit_controls_clearbit(vmx
, exit
);
960 int vmx_find_loadstore_msr_slot(struct vmx_msrs
*m
, u32 msr
)
964 for (i
= 0; i
< m
->nr
; ++i
) {
965 if (m
->val
[i
].index
== msr
)
971 static void clear_atomic_switch_msr(struct vcpu_vmx
*vmx
, unsigned msr
)
974 struct msr_autoload
*m
= &vmx
->msr_autoload
;
978 if (cpu_has_load_ia32_efer()) {
979 clear_atomic_switch_msr_special(vmx
,
980 VM_ENTRY_LOAD_IA32_EFER
,
981 VM_EXIT_LOAD_IA32_EFER
);
985 case MSR_CORE_PERF_GLOBAL_CTRL
:
986 if (cpu_has_load_perf_global_ctrl()) {
987 clear_atomic_switch_msr_special(vmx
,
988 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
,
989 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
);
994 i
= vmx_find_loadstore_msr_slot(&m
->guest
, msr
);
998 m
->guest
.val
[i
] = m
->guest
.val
[m
->guest
.nr
];
999 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, m
->guest
.nr
);
1002 i
= vmx_find_loadstore_msr_slot(&m
->host
, msr
);
1007 m
->host
.val
[i
] = m
->host
.val
[m
->host
.nr
];
1008 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, m
->host
.nr
);
1011 static __always_inline
void add_atomic_switch_msr_special(struct vcpu_vmx
*vmx
,
1012 unsigned long entry
, unsigned long exit
,
1013 unsigned long guest_val_vmcs
, unsigned long host_val_vmcs
,
1014 u64 guest_val
, u64 host_val
)
1016 vmcs_write64(guest_val_vmcs
, guest_val
);
1017 if (host_val_vmcs
!= HOST_IA32_EFER
)
1018 vmcs_write64(host_val_vmcs
, host_val
);
1019 vm_entry_controls_setbit(vmx
, entry
);
1020 vm_exit_controls_setbit(vmx
, exit
);
1023 static void add_atomic_switch_msr(struct vcpu_vmx
*vmx
, unsigned msr
,
1024 u64 guest_val
, u64 host_val
, bool entry_only
)
1027 struct msr_autoload
*m
= &vmx
->msr_autoload
;
1031 if (cpu_has_load_ia32_efer()) {
1032 add_atomic_switch_msr_special(vmx
,
1033 VM_ENTRY_LOAD_IA32_EFER
,
1034 VM_EXIT_LOAD_IA32_EFER
,
1037 guest_val
, host_val
);
1041 case MSR_CORE_PERF_GLOBAL_CTRL
:
1042 if (cpu_has_load_perf_global_ctrl()) {
1043 add_atomic_switch_msr_special(vmx
,
1044 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
,
1045 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
,
1046 GUEST_IA32_PERF_GLOBAL_CTRL
,
1047 HOST_IA32_PERF_GLOBAL_CTRL
,
1048 guest_val
, host_val
);
1052 case MSR_IA32_PEBS_ENABLE
:
1053 /* PEBS needs a quiescent period after being disabled (to write
1054 * a record). Disabling PEBS through VMX MSR swapping doesn't
1055 * provide that period, so a CPU could write host's record into
1058 wrmsrl(MSR_IA32_PEBS_ENABLE
, 0);
1061 i
= vmx_find_loadstore_msr_slot(&m
->guest
, msr
);
1063 j
= vmx_find_loadstore_msr_slot(&m
->host
, msr
);
1065 if ((i
< 0 && m
->guest
.nr
== MAX_NR_LOADSTORE_MSRS
) ||
1066 (j
< 0 && m
->host
.nr
== MAX_NR_LOADSTORE_MSRS
)) {
1067 printk_once(KERN_WARNING
"Not enough msr switch entries. "
1068 "Can't add msr %x\n", msr
);
1073 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, m
->guest
.nr
);
1075 m
->guest
.val
[i
].index
= msr
;
1076 m
->guest
.val
[i
].value
= guest_val
;
1083 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, m
->host
.nr
);
1085 m
->host
.val
[j
].index
= msr
;
1086 m
->host
.val
[j
].value
= host_val
;
1089 static bool update_transition_efer(struct vcpu_vmx
*vmx
)
1091 u64 guest_efer
= vmx
->vcpu
.arch
.efer
;
1092 u64 ignore_bits
= 0;
1095 /* Shadow paging assumes NX to be available. */
1097 guest_efer
|= EFER_NX
;
1100 * LMA and LME handled by hardware; SCE meaningless outside long mode.
1102 ignore_bits
|= EFER_SCE
;
1103 #ifdef CONFIG_X86_64
1104 ignore_bits
|= EFER_LMA
| EFER_LME
;
1105 /* SCE is meaningful only in long mode on Intel */
1106 if (guest_efer
& EFER_LMA
)
1107 ignore_bits
&= ~(u64
)EFER_SCE
;
1111 * On EPT, we can't emulate NX, so we must switch EFER atomically.
1112 * On CPUs that support "load IA32_EFER", always switch EFER
1113 * atomically, since it's faster than switching it manually.
1115 if (cpu_has_load_ia32_efer() ||
1116 (enable_ept
&& ((vmx
->vcpu
.arch
.efer
^ host_efer
) & EFER_NX
))) {
1117 if (!(guest_efer
& EFER_LMA
))
1118 guest_efer
&= ~EFER_LME
;
1119 if (guest_efer
!= host_efer
)
1120 add_atomic_switch_msr(vmx
, MSR_EFER
,
1121 guest_efer
, host_efer
, false);
1123 clear_atomic_switch_msr(vmx
, MSR_EFER
);
1127 i
= kvm_find_user_return_msr(MSR_EFER
);
1131 clear_atomic_switch_msr(vmx
, MSR_EFER
);
1133 guest_efer
&= ~ignore_bits
;
1134 guest_efer
|= host_efer
& ignore_bits
;
1136 vmx
->guest_uret_msrs
[i
].data
= guest_efer
;
1137 vmx
->guest_uret_msrs
[i
].mask
= ~ignore_bits
;
1142 #ifdef CONFIG_X86_32
1144 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1145 * VMCS rather than the segment table. KVM uses this helper to figure
1146 * out the current bases to poke them into the VMCS before entry.
1148 static unsigned long segment_base(u16 selector
)
1150 struct desc_struct
*table
;
1153 if (!(selector
& ~SEGMENT_RPL_MASK
))
1156 table
= get_current_gdt_ro();
1158 if ((selector
& SEGMENT_TI_MASK
) == SEGMENT_LDT
) {
1159 u16 ldt_selector
= kvm_read_ldt();
1161 if (!(ldt_selector
& ~SEGMENT_RPL_MASK
))
1164 table
= (struct desc_struct
*)segment_base(ldt_selector
);
1166 v
= get_desc_base(&table
[selector
>> 3]);
1171 static inline bool pt_can_write_msr(struct vcpu_vmx
*vmx
)
1173 return vmx_pt_mode_is_host_guest() &&
1174 !(vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
);
1177 static inline bool pt_output_base_valid(struct kvm_vcpu
*vcpu
, u64 base
)
1179 /* The base must be 128-byte aligned and a legal physical address. */
1180 return kvm_vcpu_is_legal_aligned_gpa(vcpu
, base
, 128);
1183 static inline void pt_load_msr(struct pt_ctx
*ctx
, u32 addr_range
)
1187 wrmsrl(MSR_IA32_RTIT_STATUS
, ctx
->status
);
1188 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, ctx
->output_base
);
1189 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, ctx
->output_mask
);
1190 wrmsrl(MSR_IA32_RTIT_CR3_MATCH
, ctx
->cr3_match
);
1191 for (i
= 0; i
< addr_range
; i
++) {
1192 wrmsrl(MSR_IA32_RTIT_ADDR0_A
+ i
* 2, ctx
->addr_a
[i
]);
1193 wrmsrl(MSR_IA32_RTIT_ADDR0_B
+ i
* 2, ctx
->addr_b
[i
]);
1197 static inline void pt_save_msr(struct pt_ctx
*ctx
, u32 addr_range
)
1201 rdmsrl(MSR_IA32_RTIT_STATUS
, ctx
->status
);
1202 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE
, ctx
->output_base
);
1203 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK
, ctx
->output_mask
);
1204 rdmsrl(MSR_IA32_RTIT_CR3_MATCH
, ctx
->cr3_match
);
1205 for (i
= 0; i
< addr_range
; i
++) {
1206 rdmsrl(MSR_IA32_RTIT_ADDR0_A
+ i
* 2, ctx
->addr_a
[i
]);
1207 rdmsrl(MSR_IA32_RTIT_ADDR0_B
+ i
* 2, ctx
->addr_b
[i
]);
1211 static void pt_guest_enter(struct vcpu_vmx
*vmx
)
1213 if (vmx_pt_mode_is_system())
1217 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
1218 * Save host state before VM entry.
1220 rdmsrl(MSR_IA32_RTIT_CTL
, vmx
->pt_desc
.host
.ctl
);
1221 if (vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) {
1222 wrmsrl(MSR_IA32_RTIT_CTL
, 0);
1223 pt_save_msr(&vmx
->pt_desc
.host
, vmx
->pt_desc
.num_address_ranges
);
1224 pt_load_msr(&vmx
->pt_desc
.guest
, vmx
->pt_desc
.num_address_ranges
);
1228 static void pt_guest_exit(struct vcpu_vmx
*vmx
)
1230 if (vmx_pt_mode_is_system())
1233 if (vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) {
1234 pt_save_msr(&vmx
->pt_desc
.guest
, vmx
->pt_desc
.num_address_ranges
);
1235 pt_load_msr(&vmx
->pt_desc
.host
, vmx
->pt_desc
.num_address_ranges
);
1239 * KVM requires VM_EXIT_CLEAR_IA32_RTIT_CTL to expose PT to the guest,
1240 * i.e. RTIT_CTL is always cleared on VM-Exit. Restore it if necessary.
1242 if (vmx
->pt_desc
.host
.ctl
)
1243 wrmsrl(MSR_IA32_RTIT_CTL
, vmx
->pt_desc
.host
.ctl
);
1246 void vmx_set_host_fs_gs(struct vmcs_host_state
*host
, u16 fs_sel
, u16 gs_sel
,
1247 unsigned long fs_base
, unsigned long gs_base
)
1249 if (unlikely(fs_sel
!= host
->fs_sel
)) {
1251 vmcs_write16(HOST_FS_SELECTOR
, fs_sel
);
1253 vmcs_write16(HOST_FS_SELECTOR
, 0);
1254 host
->fs_sel
= fs_sel
;
1256 if (unlikely(gs_sel
!= host
->gs_sel
)) {
1258 vmcs_write16(HOST_GS_SELECTOR
, gs_sel
);
1260 vmcs_write16(HOST_GS_SELECTOR
, 0);
1261 host
->gs_sel
= gs_sel
;
1263 if (unlikely(fs_base
!= host
->fs_base
)) {
1264 vmcs_writel(HOST_FS_BASE
, fs_base
);
1265 host
->fs_base
= fs_base
;
1267 if (unlikely(gs_base
!= host
->gs_base
)) {
1268 vmcs_writel(HOST_GS_BASE
, gs_base
);
1269 host
->gs_base
= gs_base
;
1273 void vmx_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
)
1275 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1276 struct vmcs_host_state
*host_state
;
1277 #ifdef CONFIG_X86_64
1278 int cpu
= raw_smp_processor_id();
1280 unsigned long fs_base
, gs_base
;
1284 vmx
->req_immediate_exit
= false;
1287 * Note that guest MSRs to be saved/restored can also be changed
1288 * when guest state is loaded. This happens when guest transitions
1289 * to/from long-mode by setting MSR_EFER.LMA.
1291 if (!vmx
->guest_uret_msrs_loaded
) {
1292 vmx
->guest_uret_msrs_loaded
= true;
1293 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
) {
1294 if (!vmx
->guest_uret_msrs
[i
].load_into_hardware
)
1297 kvm_set_user_return_msr(i
,
1298 vmx
->guest_uret_msrs
[i
].data
,
1299 vmx
->guest_uret_msrs
[i
].mask
);
1303 if (vmx
->nested
.need_vmcs12_to_shadow_sync
)
1304 nested_sync_vmcs12_to_shadow(vcpu
);
1306 if (vmx
->guest_state_loaded
)
1309 host_state
= &vmx
->loaded_vmcs
->host_state
;
1312 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
1313 * allow segment selectors with cpl > 0 or ti == 1.
1315 host_state
->ldt_sel
= kvm_read_ldt();
1317 #ifdef CONFIG_X86_64
1318 savesegment(ds
, host_state
->ds_sel
);
1319 savesegment(es
, host_state
->es_sel
);
1321 gs_base
= cpu_kernelmode_gs_base(cpu
);
1322 if (likely(is_64bit_mm(current
->mm
))) {
1323 current_save_fsgs();
1324 fs_sel
= current
->thread
.fsindex
;
1325 gs_sel
= current
->thread
.gsindex
;
1326 fs_base
= current
->thread
.fsbase
;
1327 vmx
->msr_host_kernel_gs_base
= current
->thread
.gsbase
;
1329 savesegment(fs
, fs_sel
);
1330 savesegment(gs
, gs_sel
);
1331 fs_base
= read_msr(MSR_FS_BASE
);
1332 vmx
->msr_host_kernel_gs_base
= read_msr(MSR_KERNEL_GS_BASE
);
1335 wrmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1337 savesegment(fs
, fs_sel
);
1338 savesegment(gs
, gs_sel
);
1339 fs_base
= segment_base(fs_sel
);
1340 gs_base
= segment_base(gs_sel
);
1343 vmx_set_host_fs_gs(host_state
, fs_sel
, gs_sel
, fs_base
, gs_base
);
1344 vmx
->guest_state_loaded
= true;
1347 static void vmx_prepare_switch_to_host(struct vcpu_vmx
*vmx
)
1349 struct vmcs_host_state
*host_state
;
1351 if (!vmx
->guest_state_loaded
)
1354 host_state
= &vmx
->loaded_vmcs
->host_state
;
1356 ++vmx
->vcpu
.stat
.host_state_reload
;
1358 #ifdef CONFIG_X86_64
1359 rdmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1361 if (host_state
->ldt_sel
|| (host_state
->gs_sel
& 7)) {
1362 kvm_load_ldt(host_state
->ldt_sel
);
1363 #ifdef CONFIG_X86_64
1364 load_gs_index(host_state
->gs_sel
);
1366 loadsegment(gs
, host_state
->gs_sel
);
1369 if (host_state
->fs_sel
& 7)
1370 loadsegment(fs
, host_state
->fs_sel
);
1371 #ifdef CONFIG_X86_64
1372 if (unlikely(host_state
->ds_sel
| host_state
->es_sel
)) {
1373 loadsegment(ds
, host_state
->ds_sel
);
1374 loadsegment(es
, host_state
->es_sel
);
1377 invalidate_tss_limit();
1378 #ifdef CONFIG_X86_64
1379 wrmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_host_kernel_gs_base
);
1381 load_fixmap_gdt(raw_smp_processor_id());
1382 vmx
->guest_state_loaded
= false;
1383 vmx
->guest_uret_msrs_loaded
= false;
1386 #ifdef CONFIG_X86_64
1387 static u64
vmx_read_guest_kernel_gs_base(struct vcpu_vmx
*vmx
)
1390 if (vmx
->guest_state_loaded
)
1391 rdmsrl(MSR_KERNEL_GS_BASE
, vmx
->msr_guest_kernel_gs_base
);
1393 return vmx
->msr_guest_kernel_gs_base
;
1396 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx
*vmx
, u64 data
)
1399 if (vmx
->guest_state_loaded
)
1400 wrmsrl(MSR_KERNEL_GS_BASE
, data
);
1402 vmx
->msr_guest_kernel_gs_base
= data
;
1406 void vmx_vcpu_load_vmcs(struct kvm_vcpu
*vcpu
, int cpu
,
1407 struct loaded_vmcs
*buddy
)
1409 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1410 bool already_loaded
= vmx
->loaded_vmcs
->cpu
== cpu
;
1413 if (!already_loaded
) {
1414 loaded_vmcs_clear(vmx
->loaded_vmcs
);
1415 local_irq_disable();
1418 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to
1419 * this cpu's percpu list, otherwise it may not yet be deleted
1420 * from its previous cpu's percpu list. Pairs with the
1421 * smb_wmb() in __loaded_vmcs_clear().
1425 list_add(&vmx
->loaded_vmcs
->loaded_vmcss_on_cpu_link
,
1426 &per_cpu(loaded_vmcss_on_cpu
, cpu
));
1430 prev
= per_cpu(current_vmcs
, cpu
);
1431 if (prev
!= vmx
->loaded_vmcs
->vmcs
) {
1432 per_cpu(current_vmcs
, cpu
) = vmx
->loaded_vmcs
->vmcs
;
1433 vmcs_load(vmx
->loaded_vmcs
->vmcs
);
1436 * No indirect branch prediction barrier needed when switching
1437 * the active VMCS within a vCPU, unless IBRS is advertised to
1438 * the vCPU. To minimize the number of IBPBs executed, KVM
1439 * performs IBPB on nested VM-Exit (a single nested transition
1440 * may switch the active VMCS multiple times).
1442 if (!buddy
|| WARN_ON_ONCE(buddy
->vmcs
!= prev
))
1443 indirect_branch_prediction_barrier();
1446 if (!already_loaded
) {
1447 void *gdt
= get_current_gdt_ro();
1450 * Flush all EPTP/VPID contexts, the new pCPU may have stale
1451 * TLB entries from its previous association with the vCPU.
1453 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1456 * Linux uses per-cpu TSS and GDT, so set these when switching
1457 * processors. See 22.2.4.
1459 vmcs_writel(HOST_TR_BASE
,
1460 (unsigned long)&get_cpu_entry_area(cpu
)->tss
.x86_tss
);
1461 vmcs_writel(HOST_GDTR_BASE
, (unsigned long)gdt
); /* 22.2.4 */
1463 if (IS_ENABLED(CONFIG_IA32_EMULATION
) || IS_ENABLED(CONFIG_X86_32
)) {
1465 vmcs_writel(HOST_IA32_SYSENTER_ESP
,
1466 (unsigned long)(cpu_entry_stack(cpu
) + 1));
1469 vmx
->loaded_vmcs
->cpu
= cpu
;
1474 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1475 * vcpu mutex is already taken.
1477 static void vmx_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1479 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1481 vmx_vcpu_load_vmcs(vcpu
, cpu
, NULL
);
1483 vmx_vcpu_pi_load(vcpu
, cpu
);
1485 vmx
->host_debugctlmsr
= get_debugctlmsr();
1488 static void vmx_vcpu_put(struct kvm_vcpu
*vcpu
)
1490 vmx_vcpu_pi_put(vcpu
);
1492 vmx_prepare_switch_to_host(to_vmx(vcpu
));
1495 bool vmx_emulation_required(struct kvm_vcpu
*vcpu
)
1497 return emulate_invalid_guest_state
&& !vmx_guest_state_valid(vcpu
);
1500 unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
)
1502 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1503 unsigned long rflags
, save_rflags
;
1505 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_RFLAGS
)) {
1506 kvm_register_mark_available(vcpu
, VCPU_EXREG_RFLAGS
);
1507 rflags
= vmcs_readl(GUEST_RFLAGS
);
1508 if (vmx
->rmode
.vm86_active
) {
1509 rflags
&= RMODE_GUEST_OWNED_EFLAGS_BITS
;
1510 save_rflags
= vmx
->rmode
.save_rflags
;
1511 rflags
|= save_rflags
& ~RMODE_GUEST_OWNED_EFLAGS_BITS
;
1513 vmx
->rflags
= rflags
;
1518 void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
)
1520 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1521 unsigned long old_rflags
;
1524 * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU
1525 * is an unrestricted guest in order to mark L2 as needing emulation
1526 * if L1 runs L2 as a restricted guest.
1528 if (is_unrestricted_guest(vcpu
)) {
1529 kvm_register_mark_available(vcpu
, VCPU_EXREG_RFLAGS
);
1530 vmx
->rflags
= rflags
;
1531 vmcs_writel(GUEST_RFLAGS
, rflags
);
1535 old_rflags
= vmx_get_rflags(vcpu
);
1536 vmx
->rflags
= rflags
;
1537 if (vmx
->rmode
.vm86_active
) {
1538 vmx
->rmode
.save_rflags
= rflags
;
1539 rflags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
1541 vmcs_writel(GUEST_RFLAGS
, rflags
);
1543 if ((old_rflags
^ vmx
->rflags
) & X86_EFLAGS_VM
)
1544 vmx
->emulation_required
= vmx_emulation_required(vcpu
);
1547 static bool vmx_get_if_flag(struct kvm_vcpu
*vcpu
)
1549 return vmx_get_rflags(vcpu
) & X86_EFLAGS_IF
;
1552 u32
vmx_get_interrupt_shadow(struct kvm_vcpu
*vcpu
)
1554 u32 interruptibility
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
1557 if (interruptibility
& GUEST_INTR_STATE_STI
)
1558 ret
|= KVM_X86_SHADOW_INT_STI
;
1559 if (interruptibility
& GUEST_INTR_STATE_MOV_SS
)
1560 ret
|= KVM_X86_SHADOW_INT_MOV_SS
;
1565 void vmx_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
)
1567 u32 interruptibility_old
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
);
1568 u32 interruptibility
= interruptibility_old
;
1570 interruptibility
&= ~(GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
);
1572 if (mask
& KVM_X86_SHADOW_INT_MOV_SS
)
1573 interruptibility
|= GUEST_INTR_STATE_MOV_SS
;
1574 else if (mask
& KVM_X86_SHADOW_INT_STI
)
1575 interruptibility
|= GUEST_INTR_STATE_STI
;
1577 if ((interruptibility
!= interruptibility_old
))
1578 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
, interruptibility
);
1581 static int vmx_rtit_ctl_check(struct kvm_vcpu
*vcpu
, u64 data
)
1583 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1584 unsigned long value
;
1587 * Any MSR write that attempts to change bits marked reserved will
1590 if (data
& vmx
->pt_desc
.ctl_bitmask
)
1594 * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
1595 * result in a #GP unless the same write also clears TraceEn.
1597 if ((vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
) &&
1598 ((vmx
->pt_desc
.guest
.ctl
^ data
) & ~RTIT_CTL_TRACEEN
))
1602 * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
1603 * and FabricEn would cause #GP, if
1604 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
1606 if ((data
& RTIT_CTL_TRACEEN
) && !(data
& RTIT_CTL_TOPA
) &&
1607 !(data
& RTIT_CTL_FABRIC_EN
) &&
1608 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1609 PT_CAP_single_range_output
))
1613 * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
1614 * utilize encodings marked reserved will cause a #GP fault.
1616 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc_periods
);
1617 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc
) &&
1618 !test_bit((data
& RTIT_CTL_MTC_RANGE
) >>
1619 RTIT_CTL_MTC_RANGE_OFFSET
, &value
))
1621 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
,
1622 PT_CAP_cycle_thresholds
);
1623 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
) &&
1624 !test_bit((data
& RTIT_CTL_CYC_THRESH
) >>
1625 RTIT_CTL_CYC_THRESH_OFFSET
, &value
))
1627 value
= intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_periods
);
1628 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
) &&
1629 !test_bit((data
& RTIT_CTL_PSB_FREQ
) >>
1630 RTIT_CTL_PSB_FREQ_OFFSET
, &value
))
1634 * If ADDRx_CFG is reserved or the encodings is >2 will
1635 * cause a #GP fault.
1637 value
= (data
& RTIT_CTL_ADDR0
) >> RTIT_CTL_ADDR0_OFFSET
;
1638 if ((value
&& (vmx
->pt_desc
.num_address_ranges
< 1)) || (value
> 2))
1640 value
= (data
& RTIT_CTL_ADDR1
) >> RTIT_CTL_ADDR1_OFFSET
;
1641 if ((value
&& (vmx
->pt_desc
.num_address_ranges
< 2)) || (value
> 2))
1643 value
= (data
& RTIT_CTL_ADDR2
) >> RTIT_CTL_ADDR2_OFFSET
;
1644 if ((value
&& (vmx
->pt_desc
.num_address_ranges
< 3)) || (value
> 2))
1646 value
= (data
& RTIT_CTL_ADDR3
) >> RTIT_CTL_ADDR3_OFFSET
;
1647 if ((value
&& (vmx
->pt_desc
.num_address_ranges
< 4)) || (value
> 2))
1653 static int vmx_check_emulate_instruction(struct kvm_vcpu
*vcpu
, int emul_type
,
1654 void *insn
, int insn_len
)
1657 * Emulation of instructions in SGX enclaves is impossible as RIP does
1658 * not point at the failing instruction, and even if it did, the code
1659 * stream is inaccessible. Inject #UD instead of exiting to userspace
1660 * so that guest userspace can't DoS the guest simply by triggering
1661 * emulation (enclaves are CPL3 only).
1663 if (to_vmx(vcpu
)->exit_reason
.enclave_mode
) {
1664 kvm_queue_exception(vcpu
, UD_VECTOR
);
1665 return X86EMUL_PROPAGATE_FAULT
;
1667 return X86EMUL_CONTINUE
;
1670 static int skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
1672 union vmx_exit_reason exit_reason
= to_vmx(vcpu
)->exit_reason
;
1673 unsigned long rip
, orig_rip
;
1677 * Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
1678 * undefined behavior: Intel's SDM doesn't mandate the VMCS field be
1679 * set when EPT misconfig occurs. In practice, real hardware updates
1680 * VM_EXIT_INSTRUCTION_LEN on EPT misconfig, but other hypervisors
1681 * (namely Hyper-V) don't set it due to it being undefined behavior,
1682 * i.e. we end up advancing IP with some random value.
1684 if (!static_cpu_has(X86_FEATURE_HYPERVISOR
) ||
1685 exit_reason
.basic
!= EXIT_REASON_EPT_MISCONFIG
) {
1686 instr_len
= vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
1689 * Emulating an enclave's instructions isn't supported as KVM
1690 * cannot access the enclave's memory or its true RIP, e.g. the
1691 * vmcs.GUEST_RIP points at the exit point of the enclave, not
1692 * the RIP that actually triggered the VM-Exit. But, because
1693 * most instructions that cause VM-Exit will #UD in an enclave,
1694 * most instruction-based VM-Exits simply do not occur.
1696 * There are a few exceptions, notably the debug instructions
1697 * INT1ICEBRK and INT3, as they are allowed in debug enclaves
1698 * and generate #DB/#BP as expected, which KVM might intercept.
1699 * But again, the CPU does the dirty work and saves an instr
1700 * length of zero so VMMs don't shoot themselves in the foot.
1701 * WARN if KVM tries to skip a non-zero length instruction on
1702 * a VM-Exit from an enclave.
1707 WARN_ONCE(exit_reason
.enclave_mode
,
1708 "skipping instruction after SGX enclave VM-Exit");
1710 orig_rip
= kvm_rip_read(vcpu
);
1711 rip
= orig_rip
+ instr_len
;
1712 #ifdef CONFIG_X86_64
1714 * We need to mask out the high 32 bits of RIP if not in 64-bit
1715 * mode, but just finding out that we are in 64-bit mode is
1716 * quite expensive. Only do it if there was a carry.
1718 if (unlikely(((rip
^ orig_rip
) >> 31) == 3) && !is_64_bit_mode(vcpu
))
1721 kvm_rip_write(vcpu
, rip
);
1723 if (!kvm_emulate_instruction(vcpu
, EMULTYPE_SKIP
))
1728 /* skipping an emulated instruction also counts */
1729 vmx_set_interrupt_shadow(vcpu
, 0);
1735 * Recognizes a pending MTF VM-exit and records the nested state for later
1738 static void vmx_update_emulated_instruction(struct kvm_vcpu
*vcpu
)
1740 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1741 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1743 if (!is_guest_mode(vcpu
))
1747 * Per the SDM, MTF takes priority over debug-trap exceptions besides
1748 * TSS T-bit traps and ICEBP (INT1). KVM doesn't emulate T-bit traps
1749 * or ICEBP (in the emulator proper), and skipping of ICEBP after an
1750 * intercepted #DB deliberately avoids single-step #DB and MTF updates
1751 * as ICEBP is higher priority than both. As instruction emulation is
1752 * completed at this point (i.e. KVM is at the instruction boundary),
1753 * any #DB exception pending delivery must be a debug-trap of lower
1754 * priority than MTF. Record the pending MTF state to be delivered in
1755 * vmx_check_nested_events().
1757 if (nested_cpu_has_mtf(vmcs12
) &&
1758 (!vcpu
->arch
.exception
.pending
||
1759 vcpu
->arch
.exception
.vector
== DB_VECTOR
) &&
1760 (!vcpu
->arch
.exception_vmexit
.pending
||
1761 vcpu
->arch
.exception_vmexit
.vector
== DB_VECTOR
)) {
1762 vmx
->nested
.mtf_pending
= true;
1763 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
1765 vmx
->nested
.mtf_pending
= false;
1769 static int vmx_skip_emulated_instruction(struct kvm_vcpu
*vcpu
)
1771 vmx_update_emulated_instruction(vcpu
);
1772 return skip_emulated_instruction(vcpu
);
1775 static void vmx_clear_hlt(struct kvm_vcpu
*vcpu
)
1778 * Ensure that we clear the HLT state in the VMCS. We don't need to
1779 * explicitly skip the instruction because if the HLT state is set,
1780 * then the instruction is already executing and RIP has already been
1783 if (kvm_hlt_in_guest(vcpu
->kvm
) &&
1784 vmcs_read32(GUEST_ACTIVITY_STATE
) == GUEST_ACTIVITY_HLT
)
1785 vmcs_write32(GUEST_ACTIVITY_STATE
, GUEST_ACTIVITY_ACTIVE
);
1788 static void vmx_inject_exception(struct kvm_vcpu
*vcpu
)
1790 struct kvm_queued_exception
*ex
= &vcpu
->arch
.exception
;
1791 u32 intr_info
= ex
->vector
| INTR_INFO_VALID_MASK
;
1792 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1794 kvm_deliver_exception_payload(vcpu
, ex
);
1796 if (ex
->has_error_code
) {
1798 * Despite the error code being architecturally defined as 32
1799 * bits, and the VMCS field being 32 bits, Intel CPUs and thus
1800 * VMX don't actually supporting setting bits 31:16. Hardware
1801 * will (should) never provide a bogus error code, but AMD CPUs
1802 * do generate error codes with bits 31:16 set, and so KVM's
1803 * ABI lets userspace shove in arbitrary 32-bit values. Drop
1804 * the upper bits to avoid VM-Fail, losing information that
1805 * doesn't really exist is preferable to killing the VM.
1807 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE
, (u16
)ex
->error_code
);
1808 intr_info
|= INTR_INFO_DELIVER_CODE_MASK
;
1811 if (vmx
->rmode
.vm86_active
) {
1813 if (kvm_exception_is_soft(ex
->vector
))
1814 inc_eip
= vcpu
->arch
.event_exit_inst_len
;
1815 kvm_inject_realmode_interrupt(vcpu
, ex
->vector
, inc_eip
);
1819 WARN_ON_ONCE(vmx
->emulation_required
);
1821 if (kvm_exception_is_soft(ex
->vector
)) {
1822 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
1823 vmx
->vcpu
.arch
.event_exit_inst_len
);
1824 intr_info
|= INTR_TYPE_SOFT_EXCEPTION
;
1826 intr_info
|= INTR_TYPE_HARD_EXCEPTION
;
1828 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, intr_info
);
1830 vmx_clear_hlt(vcpu
);
1833 static void vmx_setup_uret_msr(struct vcpu_vmx
*vmx
, unsigned int msr
,
1834 bool load_into_hardware
)
1836 struct vmx_uret_msr
*uret_msr
;
1838 uret_msr
= vmx_find_uret_msr(vmx
, msr
);
1842 uret_msr
->load_into_hardware
= load_into_hardware
;
1846 * Configuring user return MSRs to automatically save, load, and restore MSRs
1847 * that need to be shoved into hardware when running the guest. Note, omitting
1848 * an MSR here does _NOT_ mean it's not emulated, only that it will not be
1849 * loaded into hardware when running the guest.
1851 static void vmx_setup_uret_msrs(struct vcpu_vmx
*vmx
)
1853 #ifdef CONFIG_X86_64
1854 bool load_syscall_msrs
;
1857 * The SYSCALL MSRs are only needed on long mode guests, and only
1858 * when EFER.SCE is set.
1860 load_syscall_msrs
= is_long_mode(&vmx
->vcpu
) &&
1861 (vmx
->vcpu
.arch
.efer
& EFER_SCE
);
1863 vmx_setup_uret_msr(vmx
, MSR_STAR
, load_syscall_msrs
);
1864 vmx_setup_uret_msr(vmx
, MSR_LSTAR
, load_syscall_msrs
);
1865 vmx_setup_uret_msr(vmx
, MSR_SYSCALL_MASK
, load_syscall_msrs
);
1867 vmx_setup_uret_msr(vmx
, MSR_EFER
, update_transition_efer(vmx
));
1869 vmx_setup_uret_msr(vmx
, MSR_TSC_AUX
,
1870 guest_cpuid_has(&vmx
->vcpu
, X86_FEATURE_RDTSCP
) ||
1871 guest_cpuid_has(&vmx
->vcpu
, X86_FEATURE_RDPID
));
1874 * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
1875 * kernel and old userspace. If those guests run on a tsx=off host, do
1876 * allow guests to use TSX_CTRL, but don't change the value in hardware
1877 * so that TSX remains always disabled.
1879 vmx_setup_uret_msr(vmx
, MSR_IA32_TSX_CTRL
, boot_cpu_has(X86_FEATURE_RTM
));
1882 * The set of MSRs to load may have changed, reload MSRs before the
1885 vmx
->guest_uret_msrs_loaded
= false;
1888 u64
vmx_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
)
1890 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1892 if (nested_cpu_has(vmcs12
, CPU_BASED_USE_TSC_OFFSETTING
))
1893 return vmcs12
->tsc_offset
;
1898 u64
vmx_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1900 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
1902 if (nested_cpu_has(vmcs12
, CPU_BASED_USE_TSC_OFFSETTING
) &&
1903 nested_cpu_has2(vmcs12
, SECONDARY_EXEC_TSC_SCALING
))
1904 return vmcs12
->tsc_multiplier
;
1906 return kvm_caps
.default_tsc_scaling_ratio
;
1909 static void vmx_write_tsc_offset(struct kvm_vcpu
*vcpu
)
1911 vmcs_write64(TSC_OFFSET
, vcpu
->arch
.tsc_offset
);
1914 static void vmx_write_tsc_multiplier(struct kvm_vcpu
*vcpu
)
1916 vmcs_write64(TSC_MULTIPLIER
, vcpu
->arch
.tsc_scaling_ratio
);
1920 * Userspace is allowed to set any supported IA32_FEATURE_CONTROL regardless of
1921 * guest CPUID. Note, KVM allows userspace to set "VMX in SMX" to maintain
1922 * backwards compatibility even though KVM doesn't support emulating SMX. And
1923 * because userspace set "VMX in SMX", the guest must also be allowed to set it,
1924 * e.g. if the MSR is left unlocked and the guest does a RMW operation.
1926 #define KVM_SUPPORTED_FEATURE_CONTROL (FEAT_CTL_LOCKED | \
1927 FEAT_CTL_VMX_ENABLED_INSIDE_SMX | \
1928 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX | \
1929 FEAT_CTL_SGX_LC_ENABLED | \
1930 FEAT_CTL_SGX_ENABLED | \
1931 FEAT_CTL_LMCE_ENABLED)
1933 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx
*vmx
,
1934 struct msr_data
*msr
)
1936 uint64_t valid_bits
;
1939 * Ensure KVM_SUPPORTED_FEATURE_CONTROL is updated when new bits are
1940 * exposed to the guest.
1942 WARN_ON_ONCE(vmx
->msr_ia32_feature_control_valid_bits
&
1943 ~KVM_SUPPORTED_FEATURE_CONTROL
);
1945 if (!msr
->host_initiated
&&
1946 (vmx
->msr_ia32_feature_control
& FEAT_CTL_LOCKED
))
1949 if (msr
->host_initiated
)
1950 valid_bits
= KVM_SUPPORTED_FEATURE_CONTROL
;
1952 valid_bits
= vmx
->msr_ia32_feature_control_valid_bits
;
1954 return !(msr
->data
& ~valid_bits
);
1957 static int vmx_get_msr_feature(struct kvm_msr_entry
*msr
)
1959 switch (msr
->index
) {
1960 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
1963 return vmx_get_vmx_msr(&vmcs_config
.nested
, msr
->index
, &msr
->data
);
1965 return KVM_MSR_RET_INVALID
;
1970 * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
1971 * Returns 0 on success, non-0 otherwise.
1972 * Assumes vcpu_load() was already called.
1974 static int vmx_get_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
1976 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
1977 struct vmx_uret_msr
*msr
;
1980 switch (msr_info
->index
) {
1981 #ifdef CONFIG_X86_64
1983 msr_info
->data
= vmcs_readl(GUEST_FS_BASE
);
1986 msr_info
->data
= vmcs_readl(GUEST_GS_BASE
);
1988 case MSR_KERNEL_GS_BASE
:
1989 msr_info
->data
= vmx_read_guest_kernel_gs_base(vmx
);
1993 return kvm_get_msr_common(vcpu
, msr_info
);
1994 case MSR_IA32_TSX_CTRL
:
1995 if (!msr_info
->host_initiated
&&
1996 !(vcpu
->arch
.arch_capabilities
& ARCH_CAP_TSX_CTRL_MSR
))
1999 case MSR_IA32_UMWAIT_CONTROL
:
2000 if (!msr_info
->host_initiated
&& !vmx_has_waitpkg(vmx
))
2003 msr_info
->data
= vmx
->msr_ia32_umwait_control
;
2005 case MSR_IA32_SPEC_CTRL
:
2006 if (!msr_info
->host_initiated
&&
2007 !guest_has_spec_ctrl_msr(vcpu
))
2010 msr_info
->data
= to_vmx(vcpu
)->spec_ctrl
;
2012 case MSR_IA32_SYSENTER_CS
:
2013 msr_info
->data
= vmcs_read32(GUEST_SYSENTER_CS
);
2015 case MSR_IA32_SYSENTER_EIP
:
2016 msr_info
->data
= vmcs_readl(GUEST_SYSENTER_EIP
);
2018 case MSR_IA32_SYSENTER_ESP
:
2019 msr_info
->data
= vmcs_readl(GUEST_SYSENTER_ESP
);
2021 case MSR_IA32_BNDCFGS
:
2022 if (!kvm_mpx_supported() ||
2023 (!msr_info
->host_initiated
&&
2024 !guest_cpuid_has(vcpu
, X86_FEATURE_MPX
)))
2026 msr_info
->data
= vmcs_read64(GUEST_BNDCFGS
);
2028 case MSR_IA32_MCG_EXT_CTL
:
2029 if (!msr_info
->host_initiated
&&
2030 !(vmx
->msr_ia32_feature_control
&
2031 FEAT_CTL_LMCE_ENABLED
))
2033 msr_info
->data
= vcpu
->arch
.mcg_ext_ctl
;
2035 case MSR_IA32_FEAT_CTL
:
2036 msr_info
->data
= vmx
->msr_ia32_feature_control
;
2038 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
2039 if (!msr_info
->host_initiated
&&
2040 !guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
))
2042 msr_info
->data
= to_vmx(vcpu
)->msr_ia32_sgxlepubkeyhash
2043 [msr_info
->index
- MSR_IA32_SGXLEPUBKEYHASH0
];
2045 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
2046 if (!guest_can_use(vcpu
, X86_FEATURE_VMX
))
2048 if (vmx_get_vmx_msr(&vmx
->nested
.msrs
, msr_info
->index
,
2051 #ifdef CONFIG_KVM_HYPERV
2053 * Enlightened VMCS v1 doesn't have certain VMCS fields but
2054 * instead of just ignoring the features, different Hyper-V
2055 * versions are either trying to use them and fail or do some
2056 * sanity checking and refuse to boot. Filter all unsupported
2059 if (!msr_info
->host_initiated
&& guest_cpuid_has_evmcs(vcpu
))
2060 nested_evmcs_filter_control_msr(vcpu
, msr_info
->index
,
2064 case MSR_IA32_RTIT_CTL
:
2065 if (!vmx_pt_mode_is_host_guest())
2067 msr_info
->data
= vmx
->pt_desc
.guest
.ctl
;
2069 case MSR_IA32_RTIT_STATUS
:
2070 if (!vmx_pt_mode_is_host_guest())
2072 msr_info
->data
= vmx
->pt_desc
.guest
.status
;
2074 case MSR_IA32_RTIT_CR3_MATCH
:
2075 if (!vmx_pt_mode_is_host_guest() ||
2076 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2077 PT_CAP_cr3_filtering
))
2079 msr_info
->data
= vmx
->pt_desc
.guest
.cr3_match
;
2081 case MSR_IA32_RTIT_OUTPUT_BASE
:
2082 if (!vmx_pt_mode_is_host_guest() ||
2083 (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2084 PT_CAP_topa_output
) &&
2085 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2086 PT_CAP_single_range_output
)))
2088 msr_info
->data
= vmx
->pt_desc
.guest
.output_base
;
2090 case MSR_IA32_RTIT_OUTPUT_MASK
:
2091 if (!vmx_pt_mode_is_host_guest() ||
2092 (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2093 PT_CAP_topa_output
) &&
2094 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2095 PT_CAP_single_range_output
)))
2097 msr_info
->data
= vmx
->pt_desc
.guest
.output_mask
;
2099 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
2100 index
= msr_info
->index
- MSR_IA32_RTIT_ADDR0_A
;
2101 if (!vmx_pt_mode_is_host_guest() ||
2102 (index
>= 2 * vmx
->pt_desc
.num_address_ranges
))
2105 msr_info
->data
= vmx
->pt_desc
.guest
.addr_b
[index
/ 2];
2107 msr_info
->data
= vmx
->pt_desc
.guest
.addr_a
[index
/ 2];
2109 case MSR_IA32_DEBUGCTLMSR
:
2110 msr_info
->data
= vmcs_read64(GUEST_IA32_DEBUGCTL
);
2114 msr
= vmx_find_uret_msr(vmx
, msr_info
->index
);
2116 msr_info
->data
= msr
->data
;
2119 return kvm_get_msr_common(vcpu
, msr_info
);
2125 static u64
nested_vmx_truncate_sysenter_addr(struct kvm_vcpu
*vcpu
,
2128 #ifdef CONFIG_X86_64
2129 if (!guest_cpuid_has(vcpu
, X86_FEATURE_LM
))
2132 return (unsigned long)data
;
2135 static u64
vmx_get_supported_debugctl(struct kvm_vcpu
*vcpu
, bool host_initiated
)
2139 if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT
) &&
2140 (host_initiated
|| guest_cpuid_has(vcpu
, X86_FEATURE_BUS_LOCK_DETECT
)))
2141 debugctl
|= DEBUGCTLMSR_BUS_LOCK_DETECT
;
2143 if ((kvm_caps
.supported_perf_cap
& PMU_CAP_LBR_FMT
) &&
2144 (host_initiated
|| intel_pmu_lbr_is_enabled(vcpu
)))
2145 debugctl
|= DEBUGCTLMSR_LBR
| DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
;
2151 * Writes msr value into the appropriate "register".
2152 * Returns 0 on success, non-0 otherwise.
2153 * Assumes vcpu_load() was already called.
2155 static int vmx_set_msr(struct kvm_vcpu
*vcpu
, struct msr_data
*msr_info
)
2157 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2158 struct vmx_uret_msr
*msr
;
2160 u32 msr_index
= msr_info
->index
;
2161 u64 data
= msr_info
->data
;
2164 switch (msr_index
) {
2166 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2168 #ifdef CONFIG_X86_64
2170 vmx_segment_cache_clear(vmx
);
2171 vmcs_writel(GUEST_FS_BASE
, data
);
2174 vmx_segment_cache_clear(vmx
);
2175 vmcs_writel(GUEST_GS_BASE
, data
);
2177 case MSR_KERNEL_GS_BASE
:
2178 vmx_write_guest_kernel_gs_base(vmx
, data
);
2181 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2183 * Always intercepting WRMSR could incur non-negligible
2184 * overhead given xfd might be changed frequently in
2185 * guest context switch. Disable write interception
2186 * upon the first write with a non-zero value (indicating
2187 * potential usage on dynamic xfeatures). Also update
2188 * exception bitmap to trap #NM for proper virtualization
2192 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_XFD
,
2194 vcpu
->arch
.xfd_no_write_intercept
= true;
2195 vmx_update_exception_bitmap(vcpu
);
2199 case MSR_IA32_SYSENTER_CS
:
2200 if (is_guest_mode(vcpu
))
2201 get_vmcs12(vcpu
)->guest_sysenter_cs
= data
;
2202 vmcs_write32(GUEST_SYSENTER_CS
, data
);
2204 case MSR_IA32_SYSENTER_EIP
:
2205 if (is_guest_mode(vcpu
)) {
2206 data
= nested_vmx_truncate_sysenter_addr(vcpu
, data
);
2207 get_vmcs12(vcpu
)->guest_sysenter_eip
= data
;
2209 vmcs_writel(GUEST_SYSENTER_EIP
, data
);
2211 case MSR_IA32_SYSENTER_ESP
:
2212 if (is_guest_mode(vcpu
)) {
2213 data
= nested_vmx_truncate_sysenter_addr(vcpu
, data
);
2214 get_vmcs12(vcpu
)->guest_sysenter_esp
= data
;
2216 vmcs_writel(GUEST_SYSENTER_ESP
, data
);
2218 case MSR_IA32_DEBUGCTLMSR
: {
2221 invalid
= data
& ~vmx_get_supported_debugctl(vcpu
, msr_info
->host_initiated
);
2222 if (invalid
& (DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
)) {
2223 kvm_pr_unimpl_wrmsr(vcpu
, msr_index
, data
);
2224 data
&= ~(DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
);
2225 invalid
&= ~(DEBUGCTLMSR_BTF
|DEBUGCTLMSR_LBR
);
2231 if (is_guest_mode(vcpu
) && get_vmcs12(vcpu
)->vm_exit_controls
&
2232 VM_EXIT_SAVE_DEBUG_CONTROLS
)
2233 get_vmcs12(vcpu
)->guest_ia32_debugctl
= data
;
2235 vmcs_write64(GUEST_IA32_DEBUGCTL
, data
);
2236 if (intel_pmu_lbr_is_enabled(vcpu
) && !to_vmx(vcpu
)->lbr_desc
.event
&&
2237 (data
& DEBUGCTLMSR_LBR
))
2238 intel_pmu_create_guest_lbr_event(vcpu
);
2241 case MSR_IA32_BNDCFGS
:
2242 if (!kvm_mpx_supported() ||
2243 (!msr_info
->host_initiated
&&
2244 !guest_cpuid_has(vcpu
, X86_FEATURE_MPX
)))
2246 if (is_noncanonical_address(data
& PAGE_MASK
, vcpu
) ||
2247 (data
& MSR_IA32_BNDCFGS_RSVD
))
2250 if (is_guest_mode(vcpu
) &&
2251 ((vmx
->nested
.msrs
.entry_ctls_high
& VM_ENTRY_LOAD_BNDCFGS
) ||
2252 (vmx
->nested
.msrs
.exit_ctls_high
& VM_EXIT_CLEAR_BNDCFGS
)))
2253 get_vmcs12(vcpu
)->guest_bndcfgs
= data
;
2255 vmcs_write64(GUEST_BNDCFGS
, data
);
2257 case MSR_IA32_UMWAIT_CONTROL
:
2258 if (!msr_info
->host_initiated
&& !vmx_has_waitpkg(vmx
))
2261 /* The reserved bit 1 and non-32 bit [63:32] should be zero */
2262 if (data
& (BIT_ULL(1) | GENMASK_ULL(63, 32)))
2265 vmx
->msr_ia32_umwait_control
= data
;
2267 case MSR_IA32_SPEC_CTRL
:
2268 if (!msr_info
->host_initiated
&&
2269 !guest_has_spec_ctrl_msr(vcpu
))
2272 if (kvm_spec_ctrl_test_value(data
))
2275 vmx
->spec_ctrl
= data
;
2281 * When it's written (to non-zero) for the first time, pass
2285 * The handling of the MSR bitmap for L2 guests is done in
2286 * nested_vmx_prepare_msr_bitmap. We should not touch the
2287 * vmcs02.msr_bitmap here since it gets completely overwritten
2288 * in the merging. We update the vmcs01 here for L1 as well
2289 * since it will end up touching the MSR anyway now.
2291 vmx_disable_intercept_for_msr(vcpu
,
2295 case MSR_IA32_TSX_CTRL
:
2296 if (!msr_info
->host_initiated
&&
2297 !(vcpu
->arch
.arch_capabilities
& ARCH_CAP_TSX_CTRL_MSR
))
2299 if (data
& ~(TSX_CTRL_RTM_DISABLE
| TSX_CTRL_CPUID_CLEAR
))
2302 case MSR_IA32_CR_PAT
:
2303 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2307 if (is_guest_mode(vcpu
) &&
2308 get_vmcs12(vcpu
)->vm_exit_controls
& VM_EXIT_SAVE_IA32_PAT
)
2309 get_vmcs12(vcpu
)->guest_ia32_pat
= data
;
2311 if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
)
2312 vmcs_write64(GUEST_IA32_PAT
, data
);
2314 case MSR_IA32_MCG_EXT_CTL
:
2315 if ((!msr_info
->host_initiated
&&
2316 !(to_vmx(vcpu
)->msr_ia32_feature_control
&
2317 FEAT_CTL_LMCE_ENABLED
)) ||
2318 (data
& ~MCG_EXT_CTL_LMCE_EN
))
2320 vcpu
->arch
.mcg_ext_ctl
= data
;
2322 case MSR_IA32_FEAT_CTL
:
2323 if (!is_vmx_feature_control_msr_valid(vmx
, msr_info
))
2326 vmx
->msr_ia32_feature_control
= data
;
2327 if (msr_info
->host_initiated
&& data
== 0)
2328 vmx_leave_nested(vcpu
);
2330 /* SGX may be enabled/disabled by guest's firmware */
2331 vmx_write_encls_bitmap(vcpu
, NULL
);
2333 case MSR_IA32_SGXLEPUBKEYHASH0
... MSR_IA32_SGXLEPUBKEYHASH3
:
2335 * On real hardware, the LE hash MSRs are writable before
2336 * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
2337 * at which point SGX related bits in IA32_FEATURE_CONTROL
2340 * KVM does not emulate SGX activation for simplicity, so
2341 * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
2342 * is unlocked. This is technically not architectural
2343 * behavior, but it's close enough.
2345 if (!msr_info
->host_initiated
&&
2346 (!guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
) ||
2347 ((vmx
->msr_ia32_feature_control
& FEAT_CTL_LOCKED
) &&
2348 !(vmx
->msr_ia32_feature_control
& FEAT_CTL_SGX_LC_ENABLED
))))
2350 vmx
->msr_ia32_sgxlepubkeyhash
2351 [msr_index
- MSR_IA32_SGXLEPUBKEYHASH0
] = data
;
2353 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
2354 if (!msr_info
->host_initiated
)
2355 return 1; /* they are read-only */
2356 if (!guest_can_use(vcpu
, X86_FEATURE_VMX
))
2358 return vmx_set_vmx_msr(vcpu
, msr_index
, data
);
2359 case MSR_IA32_RTIT_CTL
:
2360 if (!vmx_pt_mode_is_host_guest() ||
2361 vmx_rtit_ctl_check(vcpu
, data
) ||
2364 vmcs_write64(GUEST_IA32_RTIT_CTL
, data
);
2365 vmx
->pt_desc
.guest
.ctl
= data
;
2366 pt_update_intercept_for_msr(vcpu
);
2368 case MSR_IA32_RTIT_STATUS
:
2369 if (!pt_can_write_msr(vmx
))
2371 if (data
& MSR_IA32_RTIT_STATUS_MASK
)
2373 vmx
->pt_desc
.guest
.status
= data
;
2375 case MSR_IA32_RTIT_CR3_MATCH
:
2376 if (!pt_can_write_msr(vmx
))
2378 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2379 PT_CAP_cr3_filtering
))
2381 vmx
->pt_desc
.guest
.cr3_match
= data
;
2383 case MSR_IA32_RTIT_OUTPUT_BASE
:
2384 if (!pt_can_write_msr(vmx
))
2386 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2387 PT_CAP_topa_output
) &&
2388 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2389 PT_CAP_single_range_output
))
2391 if (!pt_output_base_valid(vcpu
, data
))
2393 vmx
->pt_desc
.guest
.output_base
= data
;
2395 case MSR_IA32_RTIT_OUTPUT_MASK
:
2396 if (!pt_can_write_msr(vmx
))
2398 if (!intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2399 PT_CAP_topa_output
) &&
2400 !intel_pt_validate_cap(vmx
->pt_desc
.caps
,
2401 PT_CAP_single_range_output
))
2403 vmx
->pt_desc
.guest
.output_mask
= data
;
2405 case MSR_IA32_RTIT_ADDR0_A
... MSR_IA32_RTIT_ADDR3_B
:
2406 if (!pt_can_write_msr(vmx
))
2408 index
= msr_info
->index
- MSR_IA32_RTIT_ADDR0_A
;
2409 if (index
>= 2 * vmx
->pt_desc
.num_address_ranges
)
2411 if (is_noncanonical_address(data
, vcpu
))
2414 vmx
->pt_desc
.guest
.addr_b
[index
/ 2] = data
;
2416 vmx
->pt_desc
.guest
.addr_a
[index
/ 2] = data
;
2418 case MSR_IA32_PERF_CAPABILITIES
:
2419 if (data
&& !vcpu_to_pmu(vcpu
)->version
)
2421 if (data
& PMU_CAP_LBR_FMT
) {
2422 if ((data
& PMU_CAP_LBR_FMT
) !=
2423 (kvm_caps
.supported_perf_cap
& PMU_CAP_LBR_FMT
))
2425 if (!cpuid_model_is_consistent(vcpu
))
2428 if (data
& PERF_CAP_PEBS_FORMAT
) {
2429 if ((data
& PERF_CAP_PEBS_MASK
) !=
2430 (kvm_caps
.supported_perf_cap
& PERF_CAP_PEBS_MASK
))
2432 if (!guest_cpuid_has(vcpu
, X86_FEATURE_DS
))
2434 if (!guest_cpuid_has(vcpu
, X86_FEATURE_DTES64
))
2436 if (!cpuid_model_is_consistent(vcpu
))
2439 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2444 msr
= vmx_find_uret_msr(vmx
, msr_index
);
2446 ret
= vmx_set_guest_uret_msr(vmx
, msr
, data
);
2448 ret
= kvm_set_msr_common(vcpu
, msr_info
);
2451 /* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
2452 if (msr_index
== MSR_IA32_ARCH_CAPABILITIES
)
2453 vmx_update_fb_clear_dis(vcpu
, vmx
);
2458 static void vmx_cache_reg(struct kvm_vcpu
*vcpu
, enum kvm_reg reg
)
2460 unsigned long guest_owned_bits
;
2462 kvm_register_mark_available(vcpu
, reg
);
2466 vcpu
->arch
.regs
[VCPU_REGS_RSP
] = vmcs_readl(GUEST_RSP
);
2469 vcpu
->arch
.regs
[VCPU_REGS_RIP
] = vmcs_readl(GUEST_RIP
);
2471 case VCPU_EXREG_PDPTR
:
2473 ept_save_pdptrs(vcpu
);
2475 case VCPU_EXREG_CR0
:
2476 guest_owned_bits
= vcpu
->arch
.cr0_guest_owned_bits
;
2478 vcpu
->arch
.cr0
&= ~guest_owned_bits
;
2479 vcpu
->arch
.cr0
|= vmcs_readl(GUEST_CR0
) & guest_owned_bits
;
2481 case VCPU_EXREG_CR3
:
2483 * When intercepting CR3 loads, e.g. for shadowing paging, KVM's
2484 * CR3 is loaded into hardware, not the guest's CR3.
2486 if (!(exec_controls_get(to_vmx(vcpu
)) & CPU_BASED_CR3_LOAD_EXITING
))
2487 vcpu
->arch
.cr3
= vmcs_readl(GUEST_CR3
);
2489 case VCPU_EXREG_CR4
:
2490 guest_owned_bits
= vcpu
->arch
.cr4_guest_owned_bits
;
2492 vcpu
->arch
.cr4
&= ~guest_owned_bits
;
2493 vcpu
->arch
.cr4
|= vmcs_readl(GUEST_CR4
) & guest_owned_bits
;
2496 KVM_BUG_ON(1, vcpu
->kvm
);
2502 * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
2503 * directly instead of going through cpu_has(), to ensure KVM is trapping
2504 * ENCLS whenever it's supported in hardware. It does not matter whether
2505 * the host OS supports or has enabled SGX.
2507 static bool cpu_has_sgx(void)
2509 return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
2513 * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
2514 * can't be used due to errata where VM Exit may incorrectly clear
2515 * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
2516 * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2518 static bool cpu_has_perf_global_ctrl_bug(void)
2520 if (boot_cpu_data
.x86
== 0x6) {
2521 switch (boot_cpu_data
.x86_model
) {
2522 case INTEL_FAM6_NEHALEM_EP
: /* AAK155 */
2523 case INTEL_FAM6_NEHALEM
: /* AAP115 */
2524 case INTEL_FAM6_WESTMERE
: /* AAT100 */
2525 case INTEL_FAM6_WESTMERE_EP
: /* BC86,AAY89,BD102 */
2526 case INTEL_FAM6_NEHALEM_EX
: /* BA97 */
2536 static int adjust_vmx_controls(u32 ctl_min
, u32 ctl_opt
, u32 msr
, u32
*result
)
2538 u32 vmx_msr_low
, vmx_msr_high
;
2539 u32 ctl
= ctl_min
| ctl_opt
;
2541 rdmsr(msr
, vmx_msr_low
, vmx_msr_high
);
2543 ctl
&= vmx_msr_high
; /* bit == 0 in high word ==> must be zero */
2544 ctl
|= vmx_msr_low
; /* bit == 1 in low word ==> must be one */
2546 /* Ensure minimum (required) set of control bits are supported. */
2554 static u64
adjust_vmx_controls64(u64 ctl_opt
, u32 msr
)
2558 rdmsrl(msr
, allowed
);
2560 return ctl_opt
& allowed
;
2563 static int setup_vmcs_config(struct vmcs_config
*vmcs_conf
,
2564 struct vmx_capability
*vmx_cap
)
2566 u32 vmx_msr_low
, vmx_msr_high
;
2567 u32 _pin_based_exec_control
= 0;
2568 u32 _cpu_based_exec_control
= 0;
2569 u32 _cpu_based_2nd_exec_control
= 0;
2570 u64 _cpu_based_3rd_exec_control
= 0;
2571 u32 _vmexit_control
= 0;
2572 u32 _vmentry_control
= 0;
2577 * LOAD/SAVE_DEBUG_CONTROLS are absent because both are mandatory.
2578 * SAVE_IA32_PAT and SAVE_IA32_EFER are absent because KVM always
2579 * intercepts writes to PAT and EFER, i.e. never enables those controls.
2584 } const vmcs_entry_exit_pairs
[] = {
2585 { VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
, VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
},
2586 { VM_ENTRY_LOAD_IA32_PAT
, VM_EXIT_LOAD_IA32_PAT
},
2587 { VM_ENTRY_LOAD_IA32_EFER
, VM_EXIT_LOAD_IA32_EFER
},
2588 { VM_ENTRY_LOAD_BNDCFGS
, VM_EXIT_CLEAR_BNDCFGS
},
2589 { VM_ENTRY_LOAD_IA32_RTIT_CTL
, VM_EXIT_CLEAR_IA32_RTIT_CTL
},
2592 memset(vmcs_conf
, 0, sizeof(*vmcs_conf
));
2594 if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
,
2595 KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL
,
2596 MSR_IA32_VMX_PROCBASED_CTLS
,
2597 &_cpu_based_exec_control
))
2599 if (_cpu_based_exec_control
& CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
) {
2600 if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL
,
2601 KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL
,
2602 MSR_IA32_VMX_PROCBASED_CTLS2
,
2603 &_cpu_based_2nd_exec_control
))
2606 #ifndef CONFIG_X86_64
2607 if (!(_cpu_based_2nd_exec_control
&
2608 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
2609 _cpu_based_exec_control
&= ~CPU_BASED_TPR_SHADOW
;
2612 if (!(_cpu_based_exec_control
& CPU_BASED_TPR_SHADOW
))
2613 _cpu_based_2nd_exec_control
&= ~(
2614 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
2615 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
2616 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
2618 rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP
,
2619 &vmx_cap
->ept
, &vmx_cap
->vpid
);
2621 if (!(_cpu_based_2nd_exec_control
& SECONDARY_EXEC_ENABLE_EPT
) &&
2623 pr_warn_once("EPT CAP should not exist if not support "
2624 "1-setting enable EPT VM-execution control\n");
2626 if (error_on_inconsistent_vmcs_config
)
2631 if (!(_cpu_based_2nd_exec_control
& SECONDARY_EXEC_ENABLE_VPID
) &&
2633 pr_warn_once("VPID CAP should not exist if not support "
2634 "1-setting enable VPID VM-execution control\n");
2636 if (error_on_inconsistent_vmcs_config
)
2643 _cpu_based_2nd_exec_control
&= ~SECONDARY_EXEC_ENCLS_EXITING
;
2645 if (_cpu_based_exec_control
& CPU_BASED_ACTIVATE_TERTIARY_CONTROLS
)
2646 _cpu_based_3rd_exec_control
=
2647 adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL
,
2648 MSR_IA32_VMX_PROCBASED_CTLS3
);
2650 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
,
2651 KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS
,
2652 MSR_IA32_VMX_EXIT_CTLS
,
2656 if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL
,
2657 KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL
,
2658 MSR_IA32_VMX_PINBASED_CTLS
,
2659 &_pin_based_exec_control
))
2662 if (cpu_has_broken_vmx_preemption_timer())
2663 _pin_based_exec_control
&= ~PIN_BASED_VMX_PREEMPTION_TIMER
;
2664 if (!(_cpu_based_2nd_exec_control
&
2665 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
))
2666 _pin_based_exec_control
&= ~PIN_BASED_POSTED_INTR
;
2668 if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
,
2669 KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS
,
2670 MSR_IA32_VMX_ENTRY_CTLS
,
2674 for (i
= 0; i
< ARRAY_SIZE(vmcs_entry_exit_pairs
); i
++) {
2675 u32 n_ctrl
= vmcs_entry_exit_pairs
[i
].entry_control
;
2676 u32 x_ctrl
= vmcs_entry_exit_pairs
[i
].exit_control
;
2678 if (!(_vmentry_control
& n_ctrl
) == !(_vmexit_control
& x_ctrl
))
2681 pr_warn_once("Inconsistent VM-Entry/VM-Exit pair, entry = %x, exit = %x\n",
2682 _vmentry_control
& n_ctrl
, _vmexit_control
& x_ctrl
);
2684 if (error_on_inconsistent_vmcs_config
)
2687 _vmentry_control
&= ~n_ctrl
;
2688 _vmexit_control
&= ~x_ctrl
;
2691 rdmsr(MSR_IA32_VMX_BASIC
, vmx_msr_low
, vmx_msr_high
);
2693 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2694 if ((vmx_msr_high
& 0x1fff) > PAGE_SIZE
)
2697 #ifdef CONFIG_X86_64
2698 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2699 if (vmx_msr_high
& (1u<<16))
2703 /* Require Write-Back (WB) memory type for VMCS accesses. */
2704 if (((vmx_msr_high
>> 18) & 15) != 6)
2707 rdmsrl(MSR_IA32_VMX_MISC
, misc_msr
);
2709 vmcs_conf
->size
= vmx_msr_high
& 0x1fff;
2710 vmcs_conf
->basic_cap
= vmx_msr_high
& ~0x1fff;
2712 vmcs_conf
->revision_id
= vmx_msr_low
;
2714 vmcs_conf
->pin_based_exec_ctrl
= _pin_based_exec_control
;
2715 vmcs_conf
->cpu_based_exec_ctrl
= _cpu_based_exec_control
;
2716 vmcs_conf
->cpu_based_2nd_exec_ctrl
= _cpu_based_2nd_exec_control
;
2717 vmcs_conf
->cpu_based_3rd_exec_ctrl
= _cpu_based_3rd_exec_control
;
2718 vmcs_conf
->vmexit_ctrl
= _vmexit_control
;
2719 vmcs_conf
->vmentry_ctrl
= _vmentry_control
;
2720 vmcs_conf
->misc
= misc_msr
;
2722 #if IS_ENABLED(CONFIG_HYPERV)
2723 if (enlightened_vmcs
)
2724 evmcs_sanitize_exec_ctrls(vmcs_conf
);
2730 static bool __kvm_is_vmx_supported(void)
2732 int cpu
= smp_processor_id();
2734 if (!(cpuid_ecx(1) & feature_bit(VMX
))) {
2735 pr_err("VMX not supported by CPU %d\n", cpu
);
2739 if (!this_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2740 !this_cpu_has(X86_FEATURE_VMX
)) {
2741 pr_err("VMX not enabled (by BIOS) in MSR_IA32_FEAT_CTL on CPU %d\n", cpu
);
2748 static bool kvm_is_vmx_supported(void)
2753 supported
= __kvm_is_vmx_supported();
2759 static int vmx_check_processor_compat(void)
2761 int cpu
= raw_smp_processor_id();
2762 struct vmcs_config vmcs_conf
;
2763 struct vmx_capability vmx_cap
;
2765 if (!__kvm_is_vmx_supported())
2768 if (setup_vmcs_config(&vmcs_conf
, &vmx_cap
) < 0) {
2769 pr_err("Failed to setup VMCS config on CPU %d\n", cpu
);
2773 nested_vmx_setup_ctls_msrs(&vmcs_conf
, vmx_cap
.ept
);
2774 if (memcmp(&vmcs_config
, &vmcs_conf
, sizeof(struct vmcs_config
))) {
2775 pr_err("Inconsistent VMCS config on CPU %d\n", cpu
);
2781 static int kvm_cpu_vmxon(u64 vmxon_pointer
)
2785 cr4_set_bits(X86_CR4_VMXE
);
2787 asm goto("1: vmxon %[vmxon_pointer]\n\t"
2788 _ASM_EXTABLE(1b
, %l
[fault
])
2789 : : [vmxon_pointer
] "m"(vmxon_pointer
)
2794 WARN_ONCE(1, "VMXON faulted, MSR_IA32_FEAT_CTL (0x3a) = 0x%llx\n",
2795 rdmsrl_safe(MSR_IA32_FEAT_CTL
, &msr
) ? 0xdeadbeef : msr
);
2796 cr4_clear_bits(X86_CR4_VMXE
);
2801 static int vmx_hardware_enable(void)
2803 int cpu
= raw_smp_processor_id();
2804 u64 phys_addr
= __pa(per_cpu(vmxarea
, cpu
));
2807 if (cr4_read_shadow() & X86_CR4_VMXE
)
2811 * This can happen if we hot-added a CPU but failed to allocate
2812 * VP assist page for it.
2814 if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu
))
2817 intel_pt_handle_vmx(1);
2819 r
= kvm_cpu_vmxon(phys_addr
);
2821 intel_pt_handle_vmx(0);
2831 static void vmclear_local_loaded_vmcss(void)
2833 int cpu
= raw_smp_processor_id();
2834 struct loaded_vmcs
*v
, *n
;
2836 list_for_each_entry_safe(v
, n
, &per_cpu(loaded_vmcss_on_cpu
, cpu
),
2837 loaded_vmcss_on_cpu_link
)
2838 __loaded_vmcs_clear(v
);
2841 static void vmx_hardware_disable(void)
2843 vmclear_local_loaded_vmcss();
2845 if (kvm_cpu_vmxoff())
2846 kvm_spurious_fault();
2850 intel_pt_handle_vmx(0);
2853 struct vmcs
*alloc_vmcs_cpu(bool shadow
, int cpu
, gfp_t flags
)
2855 int node
= cpu_to_node(cpu
);
2859 pages
= __alloc_pages_node(node
, flags
, 0);
2862 vmcs
= page_address(pages
);
2863 memset(vmcs
, 0, vmcs_config
.size
);
2865 /* KVM supports Enlightened VMCS v1 only */
2866 if (kvm_is_using_evmcs())
2867 vmcs
->hdr
.revision_id
= KVM_EVMCS_VERSION
;
2869 vmcs
->hdr
.revision_id
= vmcs_config
.revision_id
;
2872 vmcs
->hdr
.shadow_vmcs
= 1;
2876 void free_vmcs(struct vmcs
*vmcs
)
2878 free_page((unsigned long)vmcs
);
2882 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2884 void free_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
)
2886 if (!loaded_vmcs
->vmcs
)
2888 loaded_vmcs_clear(loaded_vmcs
);
2889 free_vmcs(loaded_vmcs
->vmcs
);
2890 loaded_vmcs
->vmcs
= NULL
;
2891 if (loaded_vmcs
->msr_bitmap
)
2892 free_page((unsigned long)loaded_vmcs
->msr_bitmap
);
2893 WARN_ON(loaded_vmcs
->shadow_vmcs
!= NULL
);
2896 int alloc_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
)
2898 loaded_vmcs
->vmcs
= alloc_vmcs(false);
2899 if (!loaded_vmcs
->vmcs
)
2902 vmcs_clear(loaded_vmcs
->vmcs
);
2904 loaded_vmcs
->shadow_vmcs
= NULL
;
2905 loaded_vmcs
->hv_timer_soft_disabled
= false;
2906 loaded_vmcs
->cpu
= -1;
2907 loaded_vmcs
->launched
= 0;
2909 if (cpu_has_vmx_msr_bitmap()) {
2910 loaded_vmcs
->msr_bitmap
= (unsigned long *)
2911 __get_free_page(GFP_KERNEL_ACCOUNT
);
2912 if (!loaded_vmcs
->msr_bitmap
)
2914 memset(loaded_vmcs
->msr_bitmap
, 0xff, PAGE_SIZE
);
2917 memset(&loaded_vmcs
->host_state
, 0, sizeof(struct vmcs_host_state
));
2918 memset(&loaded_vmcs
->controls_shadow
, 0,
2919 sizeof(struct vmcs_controls_shadow
));
2924 free_loaded_vmcs(loaded_vmcs
);
2928 static void free_kvm_area(void)
2932 for_each_possible_cpu(cpu
) {
2933 free_vmcs(per_cpu(vmxarea
, cpu
));
2934 per_cpu(vmxarea
, cpu
) = NULL
;
2938 static __init
int alloc_kvm_area(void)
2942 for_each_possible_cpu(cpu
) {
2945 vmcs
= alloc_vmcs_cpu(false, cpu
, GFP_KERNEL
);
2952 * When eVMCS is enabled, alloc_vmcs_cpu() sets
2953 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
2954 * revision_id reported by MSR_IA32_VMX_BASIC.
2956 * However, even though not explicitly documented by
2957 * TLFS, VMXArea passed as VMXON argument should
2958 * still be marked with revision_id reported by
2961 if (kvm_is_using_evmcs())
2962 vmcs
->hdr
.revision_id
= vmcs_config
.revision_id
;
2964 per_cpu(vmxarea
, cpu
) = vmcs
;
2969 static void fix_pmode_seg(struct kvm_vcpu
*vcpu
, int seg
,
2970 struct kvm_segment
*save
)
2972 if (!emulate_invalid_guest_state
) {
2974 * CS and SS RPL should be equal during guest entry according
2975 * to VMX spec, but in reality it is not always so. Since vcpu
2976 * is in the middle of the transition from real mode to
2977 * protected mode it is safe to assume that RPL 0 is a good
2980 if (seg
== VCPU_SREG_CS
|| seg
== VCPU_SREG_SS
)
2981 save
->selector
&= ~SEGMENT_RPL_MASK
;
2982 save
->dpl
= save
->selector
& SEGMENT_RPL_MASK
;
2985 __vmx_set_segment(vcpu
, save
, seg
);
2988 static void enter_pmode(struct kvm_vcpu
*vcpu
)
2990 unsigned long flags
;
2991 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
2994 * Update real mode segment cache. It may be not up-to-date if segment
2995 * register was written while vcpu was in a guest mode.
2997 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_ES
], VCPU_SREG_ES
);
2998 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_DS
], VCPU_SREG_DS
);
2999 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_FS
], VCPU_SREG_FS
);
3000 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_GS
], VCPU_SREG_GS
);
3001 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_SS
], VCPU_SREG_SS
);
3002 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_CS
], VCPU_SREG_CS
);
3004 vmx
->rmode
.vm86_active
= 0;
3006 __vmx_set_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_TR
], VCPU_SREG_TR
);
3008 flags
= vmcs_readl(GUEST_RFLAGS
);
3009 flags
&= RMODE_GUEST_OWNED_EFLAGS_BITS
;
3010 flags
|= vmx
->rmode
.save_rflags
& ~RMODE_GUEST_OWNED_EFLAGS_BITS
;
3011 vmcs_writel(GUEST_RFLAGS
, flags
);
3013 vmcs_writel(GUEST_CR4
, (vmcs_readl(GUEST_CR4
) & ~X86_CR4_VME
) |
3014 (vmcs_readl(CR4_READ_SHADOW
) & X86_CR4_VME
));
3016 vmx_update_exception_bitmap(vcpu
);
3018 fix_pmode_seg(vcpu
, VCPU_SREG_CS
, &vmx
->rmode
.segs
[VCPU_SREG_CS
]);
3019 fix_pmode_seg(vcpu
, VCPU_SREG_SS
, &vmx
->rmode
.segs
[VCPU_SREG_SS
]);
3020 fix_pmode_seg(vcpu
, VCPU_SREG_ES
, &vmx
->rmode
.segs
[VCPU_SREG_ES
]);
3021 fix_pmode_seg(vcpu
, VCPU_SREG_DS
, &vmx
->rmode
.segs
[VCPU_SREG_DS
]);
3022 fix_pmode_seg(vcpu
, VCPU_SREG_FS
, &vmx
->rmode
.segs
[VCPU_SREG_FS
]);
3023 fix_pmode_seg(vcpu
, VCPU_SREG_GS
, &vmx
->rmode
.segs
[VCPU_SREG_GS
]);
3026 static void fix_rmode_seg(int seg
, struct kvm_segment
*save
)
3028 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
3029 struct kvm_segment var
= *save
;
3032 if (seg
== VCPU_SREG_CS
)
3035 if (!emulate_invalid_guest_state
) {
3036 var
.selector
= var
.base
>> 4;
3037 var
.base
= var
.base
& 0xffff0;
3047 if (save
->base
& 0xf)
3048 pr_warn_once("segment base is not paragraph aligned "
3049 "when entering protected mode (seg=%d)", seg
);
3052 vmcs_write16(sf
->selector
, var
.selector
);
3053 vmcs_writel(sf
->base
, var
.base
);
3054 vmcs_write32(sf
->limit
, var
.limit
);
3055 vmcs_write32(sf
->ar_bytes
, vmx_segment_access_rights(&var
));
3058 static void enter_rmode(struct kvm_vcpu
*vcpu
)
3060 unsigned long flags
;
3061 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3062 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(vcpu
->kvm
);
3065 * KVM should never use VM86 to virtualize Real Mode when L2 is active,
3066 * as using VM86 is unnecessary if unrestricted guest is enabled, and
3067 * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0
3068 * should VM-Fail and KVM should reject userspace attempts to stuff
3069 * CR0.PG=0 when L2 is active.
3071 WARN_ON_ONCE(is_guest_mode(vcpu
));
3073 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_TR
], VCPU_SREG_TR
);
3074 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_ES
], VCPU_SREG_ES
);
3075 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_DS
], VCPU_SREG_DS
);
3076 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_FS
], VCPU_SREG_FS
);
3077 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_GS
], VCPU_SREG_GS
);
3078 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_SS
], VCPU_SREG_SS
);
3079 vmx_get_segment(vcpu
, &vmx
->rmode
.segs
[VCPU_SREG_CS
], VCPU_SREG_CS
);
3081 vmx
->rmode
.vm86_active
= 1;
3083 vmx_segment_cache_clear(vmx
);
3085 vmcs_writel(GUEST_TR_BASE
, kvm_vmx
->tss_addr
);
3086 vmcs_write32(GUEST_TR_LIMIT
, RMODE_TSS_SIZE
- 1);
3087 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
3089 flags
= vmcs_readl(GUEST_RFLAGS
);
3090 vmx
->rmode
.save_rflags
= flags
;
3092 flags
|= X86_EFLAGS_IOPL
| X86_EFLAGS_VM
;
3094 vmcs_writel(GUEST_RFLAGS
, flags
);
3095 vmcs_writel(GUEST_CR4
, vmcs_readl(GUEST_CR4
) | X86_CR4_VME
);
3096 vmx_update_exception_bitmap(vcpu
);
3098 fix_rmode_seg(VCPU_SREG_SS
, &vmx
->rmode
.segs
[VCPU_SREG_SS
]);
3099 fix_rmode_seg(VCPU_SREG_CS
, &vmx
->rmode
.segs
[VCPU_SREG_CS
]);
3100 fix_rmode_seg(VCPU_SREG_ES
, &vmx
->rmode
.segs
[VCPU_SREG_ES
]);
3101 fix_rmode_seg(VCPU_SREG_DS
, &vmx
->rmode
.segs
[VCPU_SREG_DS
]);
3102 fix_rmode_seg(VCPU_SREG_GS
, &vmx
->rmode
.segs
[VCPU_SREG_GS
]);
3103 fix_rmode_seg(VCPU_SREG_FS
, &vmx
->rmode
.segs
[VCPU_SREG_FS
]);
3106 int vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
3108 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3110 /* Nothing to do if hardware doesn't support EFER. */
3111 if (!vmx_find_uret_msr(vmx
, MSR_EFER
))
3114 vcpu
->arch
.efer
= efer
;
3115 #ifdef CONFIG_X86_64
3116 if (efer
& EFER_LMA
)
3117 vm_entry_controls_setbit(vmx
, VM_ENTRY_IA32E_MODE
);
3119 vm_entry_controls_clearbit(vmx
, VM_ENTRY_IA32E_MODE
);
3121 if (KVM_BUG_ON(efer
& EFER_LMA
, vcpu
->kvm
))
3125 vmx_setup_uret_msrs(vmx
);
3129 #ifdef CONFIG_X86_64
3131 static void enter_lmode(struct kvm_vcpu
*vcpu
)
3135 vmx_segment_cache_clear(to_vmx(vcpu
));
3137 guest_tr_ar
= vmcs_read32(GUEST_TR_AR_BYTES
);
3138 if ((guest_tr_ar
& VMX_AR_TYPE_MASK
) != VMX_AR_TYPE_BUSY_64_TSS
) {
3139 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3141 vmcs_write32(GUEST_TR_AR_BYTES
,
3142 (guest_tr_ar
& ~VMX_AR_TYPE_MASK
)
3143 | VMX_AR_TYPE_BUSY_64_TSS
);
3145 vmx_set_efer(vcpu
, vcpu
->arch
.efer
| EFER_LMA
);
3148 static void exit_lmode(struct kvm_vcpu
*vcpu
)
3150 vmx_set_efer(vcpu
, vcpu
->arch
.efer
& ~EFER_LMA
);
3155 static void vmx_flush_tlb_all(struct kvm_vcpu
*vcpu
)
3157 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3160 * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
3161 * the CPU is not required to invalidate guest-physical mappings on
3162 * VM-Entry, even if VPID is disabled. Guest-physical mappings are
3163 * associated with the root EPT structure and not any particular VPID
3164 * (INVVPID also isn't required to invalidate guest-physical mappings).
3168 } else if (enable_vpid
) {
3169 if (cpu_has_vmx_invvpid_global()) {
3170 vpid_sync_vcpu_global();
3172 vpid_sync_vcpu_single(vmx
->vpid
);
3173 vpid_sync_vcpu_single(vmx
->nested
.vpid02
);
3178 static inline int vmx_get_current_vpid(struct kvm_vcpu
*vcpu
)
3180 if (is_guest_mode(vcpu
))
3181 return nested_get_vpid02(vcpu
);
3182 return to_vmx(vcpu
)->vpid
;
3185 static void vmx_flush_tlb_current(struct kvm_vcpu
*vcpu
)
3187 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
3188 u64 root_hpa
= mmu
->root
.hpa
;
3190 /* No flush required if the current context is invalid. */
3191 if (!VALID_PAGE(root_hpa
))
3195 ept_sync_context(construct_eptp(vcpu
, root_hpa
,
3196 mmu
->root_role
.level
));
3198 vpid_sync_context(vmx_get_current_vpid(vcpu
));
3201 static void vmx_flush_tlb_gva(struct kvm_vcpu
*vcpu
, gva_t addr
)
3204 * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
3205 * vmx_flush_tlb_guest() for an explanation of why this is ok.
3207 vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu
), addr
);
3210 static void vmx_flush_tlb_guest(struct kvm_vcpu
*vcpu
)
3213 * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
3214 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
3215 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
3216 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
3217 * i.e. no explicit INVVPID is necessary.
3219 vpid_sync_context(vmx_get_current_vpid(vcpu
));
3222 void vmx_ept_load_pdptrs(struct kvm_vcpu
*vcpu
)
3224 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
3226 if (!kvm_register_is_dirty(vcpu
, VCPU_EXREG_PDPTR
))
3229 if (is_pae_paging(vcpu
)) {
3230 vmcs_write64(GUEST_PDPTR0
, mmu
->pdptrs
[0]);
3231 vmcs_write64(GUEST_PDPTR1
, mmu
->pdptrs
[1]);
3232 vmcs_write64(GUEST_PDPTR2
, mmu
->pdptrs
[2]);
3233 vmcs_write64(GUEST_PDPTR3
, mmu
->pdptrs
[3]);
3237 void ept_save_pdptrs(struct kvm_vcpu
*vcpu
)
3239 struct kvm_mmu
*mmu
= vcpu
->arch
.walk_mmu
;
3241 if (WARN_ON_ONCE(!is_pae_paging(vcpu
)))
3244 mmu
->pdptrs
[0] = vmcs_read64(GUEST_PDPTR0
);
3245 mmu
->pdptrs
[1] = vmcs_read64(GUEST_PDPTR1
);
3246 mmu
->pdptrs
[2] = vmcs_read64(GUEST_PDPTR2
);
3247 mmu
->pdptrs
[3] = vmcs_read64(GUEST_PDPTR3
);
3249 kvm_register_mark_available(vcpu
, VCPU_EXREG_PDPTR
);
3252 #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
3253 CPU_BASED_CR3_STORE_EXITING)
3255 static bool vmx_is_valid_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
3257 if (is_guest_mode(vcpu
))
3258 return nested_guest_cr0_valid(vcpu
, cr0
);
3260 if (to_vmx(vcpu
)->nested
.vmxon
)
3261 return nested_host_cr0_valid(vcpu
, cr0
);
3266 void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
3268 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3269 unsigned long hw_cr0
, old_cr0_pg
;
3272 old_cr0_pg
= kvm_read_cr0_bits(vcpu
, X86_CR0_PG
);
3274 hw_cr0
= (cr0
& ~KVM_VM_CR0_ALWAYS_OFF
);
3275 if (enable_unrestricted_guest
)
3276 hw_cr0
|= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST
;
3278 hw_cr0
|= KVM_VM_CR0_ALWAYS_ON
;
3280 hw_cr0
|= X86_CR0_WP
;
3282 if (vmx
->rmode
.vm86_active
&& (cr0
& X86_CR0_PE
))
3285 if (!vmx
->rmode
.vm86_active
&& !(cr0
& X86_CR0_PE
))
3289 vmcs_writel(CR0_READ_SHADOW
, cr0
);
3290 vmcs_writel(GUEST_CR0
, hw_cr0
);
3291 vcpu
->arch
.cr0
= cr0
;
3292 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR0
);
3294 #ifdef CONFIG_X86_64
3295 if (vcpu
->arch
.efer
& EFER_LME
) {
3296 if (!old_cr0_pg
&& (cr0
& X86_CR0_PG
))
3298 else if (old_cr0_pg
&& !(cr0
& X86_CR0_PG
))
3303 if (enable_ept
&& !enable_unrestricted_guest
) {
3305 * Ensure KVM has an up-to-date snapshot of the guest's CR3. If
3306 * the below code _enables_ CR3 exiting, vmx_cache_reg() will
3307 * (correctly) stop reading vmcs.GUEST_CR3 because it thinks
3308 * KVM's CR3 is installed.
3310 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_CR3
))
3311 vmx_cache_reg(vcpu
, VCPU_EXREG_CR3
);
3314 * When running with EPT but not unrestricted guest, KVM must
3315 * intercept CR3 accesses when paging is _disabled_. This is
3316 * necessary because restricted guests can't actually run with
3317 * paging disabled, and so KVM stuffs its own CR3 in order to
3318 * run the guest when identity mapped page tables.
3320 * Do _NOT_ check the old CR0.PG, e.g. to optimize away the
3321 * update, it may be stale with respect to CR3 interception,
3322 * e.g. after nested VM-Enter.
3324 * Lastly, honor L1's desires, i.e. intercept CR3 loads and/or
3325 * stores to forward them to L1, even if KVM does not need to
3326 * intercept them to preserve its identity mapped page tables.
3328 if (!(cr0
& X86_CR0_PG
)) {
3329 exec_controls_setbit(vmx
, CR3_EXITING_BITS
);
3330 } else if (!is_guest_mode(vcpu
)) {
3331 exec_controls_clearbit(vmx
, CR3_EXITING_BITS
);
3333 tmp
= exec_controls_get(vmx
);
3334 tmp
&= ~CR3_EXITING_BITS
;
3335 tmp
|= get_vmcs12(vcpu
)->cpu_based_vm_exec_control
& CR3_EXITING_BITS
;
3336 exec_controls_set(vmx
, tmp
);
3339 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3340 if ((old_cr0_pg
^ cr0
) & X86_CR0_PG
)
3341 vmx_set_cr4(vcpu
, kvm_read_cr4(vcpu
));
3344 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3345 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3347 if (!(old_cr0_pg
& X86_CR0_PG
) && (cr0
& X86_CR0_PG
))
3348 kvm_register_mark_dirty(vcpu
, VCPU_EXREG_CR3
);
3351 /* depends on vcpu->arch.cr0 to be set to a new value */
3352 vmx
->emulation_required
= vmx_emulation_required(vcpu
);
3355 static int vmx_get_max_ept_level(void)
3357 if (cpu_has_vmx_ept_5levels())
3362 u64
construct_eptp(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
, int root_level
)
3364 u64 eptp
= VMX_EPTP_MT_WB
;
3366 eptp
|= (root_level
== 5) ? VMX_EPTP_PWL_5
: VMX_EPTP_PWL_4
;
3368 if (enable_ept_ad_bits
&&
3369 (!is_guest_mode(vcpu
) || nested_ept_ad_enabled(vcpu
)))
3370 eptp
|= VMX_EPTP_AD_ENABLE_BIT
;
3376 static void vmx_load_mmu_pgd(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
,
3379 struct kvm
*kvm
= vcpu
->kvm
;
3380 bool update_guest_cr3
= true;
3381 unsigned long guest_cr3
;
3385 eptp
= construct_eptp(vcpu
, root_hpa
, root_level
);
3386 vmcs_write64(EPT_POINTER
, eptp
);
3388 hv_track_root_tdp(vcpu
, root_hpa
);
3390 if (!enable_unrestricted_guest
&& !is_paging(vcpu
))
3391 guest_cr3
= to_kvm_vmx(kvm
)->ept_identity_map_addr
;
3392 else if (kvm_register_is_dirty(vcpu
, VCPU_EXREG_CR3
))
3393 guest_cr3
= vcpu
->arch
.cr3
;
3394 else /* vmcs.GUEST_CR3 is already up-to-date. */
3395 update_guest_cr3
= false;
3396 vmx_ept_load_pdptrs(vcpu
);
3398 guest_cr3
= root_hpa
| kvm_get_active_pcid(vcpu
) |
3399 kvm_get_active_cr3_lam_bits(vcpu
);
3402 if (update_guest_cr3
)
3403 vmcs_writel(GUEST_CR3
, guest_cr3
);
3407 static bool vmx_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
3410 * We operate under the default treatment of SMM, so VMX cannot be
3411 * enabled under SMM. Note, whether or not VMXE is allowed at all,
3412 * i.e. is a reserved bit, is handled by common x86 code.
3414 if ((cr4
& X86_CR4_VMXE
) && is_smm(vcpu
))
3417 if (to_vmx(vcpu
)->nested
.vmxon
&& !nested_cr4_valid(vcpu
, cr4
))
3423 void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
3425 unsigned long old_cr4
= kvm_read_cr4(vcpu
);
3426 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3427 unsigned long hw_cr4
;
3430 * Pass through host's Machine Check Enable value to hw_cr4, which
3431 * is in force while we are in guest mode. Do not let guests control
3432 * this bit, even if host CR4.MCE == 0.
3434 hw_cr4
= (cr4_read_shadow() & X86_CR4_MCE
) | (cr4
& ~X86_CR4_MCE
);
3435 if (enable_unrestricted_guest
)
3436 hw_cr4
|= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST
;
3437 else if (vmx
->rmode
.vm86_active
)
3438 hw_cr4
|= KVM_RMODE_VM_CR4_ALWAYS_ON
;
3440 hw_cr4
|= KVM_PMODE_VM_CR4_ALWAYS_ON
;
3442 if (vmx_umip_emulated()) {
3443 if (cr4
& X86_CR4_UMIP
) {
3444 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_DESC
);
3445 hw_cr4
&= ~X86_CR4_UMIP
;
3446 } else if (!is_guest_mode(vcpu
) ||
3447 !nested_cpu_has2(get_vmcs12(vcpu
), SECONDARY_EXEC_DESC
)) {
3448 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_DESC
);
3452 vcpu
->arch
.cr4
= cr4
;
3453 kvm_register_mark_available(vcpu
, VCPU_EXREG_CR4
);
3455 if (!enable_unrestricted_guest
) {
3457 if (!is_paging(vcpu
)) {
3458 hw_cr4
&= ~X86_CR4_PAE
;
3459 hw_cr4
|= X86_CR4_PSE
;
3460 } else if (!(cr4
& X86_CR4_PAE
)) {
3461 hw_cr4
&= ~X86_CR4_PAE
;
3466 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
3467 * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
3468 * to be manually disabled when guest switches to non-paging
3471 * If !enable_unrestricted_guest, the CPU is always running
3472 * with CR0.PG=1 and CR4 needs to be modified.
3473 * If enable_unrestricted_guest, the CPU automatically
3474 * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
3476 if (!is_paging(vcpu
))
3477 hw_cr4
&= ~(X86_CR4_SMEP
| X86_CR4_SMAP
| X86_CR4_PKE
);
3480 vmcs_writel(CR4_READ_SHADOW
, cr4
);
3481 vmcs_writel(GUEST_CR4
, hw_cr4
);
3483 if ((cr4
^ old_cr4
) & (X86_CR4_OSXSAVE
| X86_CR4_PKE
))
3484 kvm_update_cpuid_runtime(vcpu
);
3487 void vmx_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3489 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3492 if (vmx
->rmode
.vm86_active
&& seg
!= VCPU_SREG_LDTR
) {
3493 *var
= vmx
->rmode
.segs
[seg
];
3494 if (seg
== VCPU_SREG_TR
3495 || var
->selector
== vmx_read_guest_seg_selector(vmx
, seg
))
3497 var
->base
= vmx_read_guest_seg_base(vmx
, seg
);
3498 var
->selector
= vmx_read_guest_seg_selector(vmx
, seg
);
3501 var
->base
= vmx_read_guest_seg_base(vmx
, seg
);
3502 var
->limit
= vmx_read_guest_seg_limit(vmx
, seg
);
3503 var
->selector
= vmx_read_guest_seg_selector(vmx
, seg
);
3504 ar
= vmx_read_guest_seg_ar(vmx
, seg
);
3505 var
->unusable
= (ar
>> 16) & 1;
3506 var
->type
= ar
& 15;
3507 var
->s
= (ar
>> 4) & 1;
3508 var
->dpl
= (ar
>> 5) & 3;
3510 * Some userspaces do not preserve unusable property. Since usable
3511 * segment has to be present according to VMX spec we can use present
3512 * property to amend userspace bug by making unusable segment always
3513 * nonpresent. vmx_segment_access_rights() already marks nonpresent
3514 * segment as unusable.
3516 var
->present
= !var
->unusable
;
3517 var
->avl
= (ar
>> 12) & 1;
3518 var
->l
= (ar
>> 13) & 1;
3519 var
->db
= (ar
>> 14) & 1;
3520 var
->g
= (ar
>> 15) & 1;
3523 static u64
vmx_get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
3525 struct kvm_segment s
;
3527 if (to_vmx(vcpu
)->rmode
.vm86_active
) {
3528 vmx_get_segment(vcpu
, &s
, seg
);
3531 return vmx_read_guest_seg_base(to_vmx(vcpu
), seg
);
3534 int vmx_get_cpl(struct kvm_vcpu
*vcpu
)
3536 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3538 if (unlikely(vmx
->rmode
.vm86_active
))
3541 int ar
= vmx_read_guest_seg_ar(vmx
, VCPU_SREG_SS
);
3542 return VMX_AR_DPL(ar
);
3546 static u32
vmx_segment_access_rights(struct kvm_segment
*var
)
3550 ar
= var
->type
& 15;
3551 ar
|= (var
->s
& 1) << 4;
3552 ar
|= (var
->dpl
& 3) << 5;
3553 ar
|= (var
->present
& 1) << 7;
3554 ar
|= (var
->avl
& 1) << 12;
3555 ar
|= (var
->l
& 1) << 13;
3556 ar
|= (var
->db
& 1) << 14;
3557 ar
|= (var
->g
& 1) << 15;
3558 ar
|= (var
->unusable
|| !var
->present
) << 16;
3563 void __vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3565 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3566 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
3568 vmx_segment_cache_clear(vmx
);
3570 if (vmx
->rmode
.vm86_active
&& seg
!= VCPU_SREG_LDTR
) {
3571 vmx
->rmode
.segs
[seg
] = *var
;
3572 if (seg
== VCPU_SREG_TR
)
3573 vmcs_write16(sf
->selector
, var
->selector
);
3575 fix_rmode_seg(seg
, &vmx
->rmode
.segs
[seg
]);
3579 vmcs_writel(sf
->base
, var
->base
);
3580 vmcs_write32(sf
->limit
, var
->limit
);
3581 vmcs_write16(sf
->selector
, var
->selector
);
3584 * Fix the "Accessed" bit in AR field of segment registers for older
3586 * IA32 arch specifies that at the time of processor reset the
3587 * "Accessed" bit in the AR field of segment registers is 1. And qemu
3588 * is setting it to 0 in the userland code. This causes invalid guest
3589 * state vmexit when "unrestricted guest" mode is turned on.
3590 * Fix for this setup issue in cpu_reset is being pushed in the qemu
3591 * tree. Newer qemu binaries with that qemu fix would not need this
3594 if (is_unrestricted_guest(vcpu
) && (seg
!= VCPU_SREG_LDTR
))
3595 var
->type
|= 0x1; /* Accessed */
3597 vmcs_write32(sf
->ar_bytes
, vmx_segment_access_rights(var
));
3600 static void vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
)
3602 __vmx_set_segment(vcpu
, var
, seg
);
3604 to_vmx(vcpu
)->emulation_required
= vmx_emulation_required(vcpu
);
3607 static void vmx_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
3609 u32 ar
= vmx_read_guest_seg_ar(to_vmx(vcpu
), VCPU_SREG_CS
);
3611 *db
= (ar
>> 14) & 1;
3612 *l
= (ar
>> 13) & 1;
3615 static void vmx_get_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3617 dt
->size
= vmcs_read32(GUEST_IDTR_LIMIT
);
3618 dt
->address
= vmcs_readl(GUEST_IDTR_BASE
);
3621 static void vmx_set_idt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3623 vmcs_write32(GUEST_IDTR_LIMIT
, dt
->size
);
3624 vmcs_writel(GUEST_IDTR_BASE
, dt
->address
);
3627 static void vmx_get_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3629 dt
->size
= vmcs_read32(GUEST_GDTR_LIMIT
);
3630 dt
->address
= vmcs_readl(GUEST_GDTR_BASE
);
3633 static void vmx_set_gdt(struct kvm_vcpu
*vcpu
, struct desc_ptr
*dt
)
3635 vmcs_write32(GUEST_GDTR_LIMIT
, dt
->size
);
3636 vmcs_writel(GUEST_GDTR_BASE
, dt
->address
);
3639 static bool rmode_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
3641 struct kvm_segment var
;
3644 vmx_get_segment(vcpu
, &var
, seg
);
3646 if (seg
== VCPU_SREG_CS
)
3648 ar
= vmx_segment_access_rights(&var
);
3650 if (var
.base
!= (var
.selector
<< 4))
3652 if (var
.limit
!= 0xffff)
3660 static bool code_segment_valid(struct kvm_vcpu
*vcpu
)
3662 struct kvm_segment cs
;
3663 unsigned int cs_rpl
;
3665 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
3666 cs_rpl
= cs
.selector
& SEGMENT_RPL_MASK
;
3670 if (~cs
.type
& (VMX_AR_TYPE_CODE_MASK
|VMX_AR_TYPE_ACCESSES_MASK
))
3674 if (cs
.type
& VMX_AR_TYPE_WRITEABLE_MASK
) {
3675 if (cs
.dpl
> cs_rpl
)
3678 if (cs
.dpl
!= cs_rpl
)
3684 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3688 static bool stack_segment_valid(struct kvm_vcpu
*vcpu
)
3690 struct kvm_segment ss
;
3691 unsigned int ss_rpl
;
3693 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
3694 ss_rpl
= ss
.selector
& SEGMENT_RPL_MASK
;
3698 if (ss
.type
!= 3 && ss
.type
!= 7)
3702 if (ss
.dpl
!= ss_rpl
) /* DPL != RPL */
3710 static bool data_segment_valid(struct kvm_vcpu
*vcpu
, int seg
)
3712 struct kvm_segment var
;
3715 vmx_get_segment(vcpu
, &var
, seg
);
3716 rpl
= var
.selector
& SEGMENT_RPL_MASK
;
3724 if (~var
.type
& (VMX_AR_TYPE_CODE_MASK
|VMX_AR_TYPE_WRITEABLE_MASK
)) {
3725 if (var
.dpl
< rpl
) /* DPL < RPL */
3729 /* TODO: Add other members to kvm_segment_field to allow checking for other access
3735 static bool tr_valid(struct kvm_vcpu
*vcpu
)
3737 struct kvm_segment tr
;
3739 vmx_get_segment(vcpu
, &tr
, VCPU_SREG_TR
);
3743 if (tr
.selector
& SEGMENT_TI_MASK
) /* TI = 1 */
3745 if (tr
.type
!= 3 && tr
.type
!= 11) /* TODO: Check if guest is in IA32e mode */
3753 static bool ldtr_valid(struct kvm_vcpu
*vcpu
)
3755 struct kvm_segment ldtr
;
3757 vmx_get_segment(vcpu
, &ldtr
, VCPU_SREG_LDTR
);
3761 if (ldtr
.selector
& SEGMENT_TI_MASK
) /* TI = 1 */
3771 static bool cs_ss_rpl_check(struct kvm_vcpu
*vcpu
)
3773 struct kvm_segment cs
, ss
;
3775 vmx_get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
3776 vmx_get_segment(vcpu
, &ss
, VCPU_SREG_SS
);
3778 return ((cs
.selector
& SEGMENT_RPL_MASK
) ==
3779 (ss
.selector
& SEGMENT_RPL_MASK
));
3783 * Check if guest state is valid. Returns true if valid, false if
3785 * We assume that registers are always usable
3787 bool __vmx_guest_state_valid(struct kvm_vcpu
*vcpu
)
3789 /* real mode guest state checks */
3790 if (!is_protmode(vcpu
) || (vmx_get_rflags(vcpu
) & X86_EFLAGS_VM
)) {
3791 if (!rmode_segment_valid(vcpu
, VCPU_SREG_CS
))
3793 if (!rmode_segment_valid(vcpu
, VCPU_SREG_SS
))
3795 if (!rmode_segment_valid(vcpu
, VCPU_SREG_DS
))
3797 if (!rmode_segment_valid(vcpu
, VCPU_SREG_ES
))
3799 if (!rmode_segment_valid(vcpu
, VCPU_SREG_FS
))
3801 if (!rmode_segment_valid(vcpu
, VCPU_SREG_GS
))
3804 /* protected mode guest state checks */
3805 if (!cs_ss_rpl_check(vcpu
))
3807 if (!code_segment_valid(vcpu
))
3809 if (!stack_segment_valid(vcpu
))
3811 if (!data_segment_valid(vcpu
, VCPU_SREG_DS
))
3813 if (!data_segment_valid(vcpu
, VCPU_SREG_ES
))
3815 if (!data_segment_valid(vcpu
, VCPU_SREG_FS
))
3817 if (!data_segment_valid(vcpu
, VCPU_SREG_GS
))
3819 if (!tr_valid(vcpu
))
3821 if (!ldtr_valid(vcpu
))
3825 * - Add checks on RIP
3826 * - Add checks on RFLAGS
3832 static int init_rmode_tss(struct kvm
*kvm
, void __user
*ua
)
3834 const void *zero_page
= (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3838 for (i
= 0; i
< 3; i
++) {
3839 if (__copy_to_user(ua
+ PAGE_SIZE
* i
, zero_page
, PAGE_SIZE
))
3843 data
= TSS_BASE_SIZE
+ TSS_REDIRECTION_SIZE
;
3844 if (__copy_to_user(ua
+ TSS_IOPB_BASE_OFFSET
, &data
, sizeof(u16
)))
3848 if (__copy_to_user(ua
+ RMODE_TSS_SIZE
- 1, &data
, sizeof(u8
)))
3854 static int init_rmode_identity_map(struct kvm
*kvm
)
3856 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(kvm
);
3861 /* Protect kvm_vmx->ept_identity_pagetable_done. */
3862 mutex_lock(&kvm
->slots_lock
);
3864 if (likely(kvm_vmx
->ept_identity_pagetable_done
))
3867 if (!kvm_vmx
->ept_identity_map_addr
)
3868 kvm_vmx
->ept_identity_map_addr
= VMX_EPT_IDENTITY_PAGETABLE_ADDR
;
3870 uaddr
= __x86_set_memory_region(kvm
,
3871 IDENTITY_PAGETABLE_PRIVATE_MEMSLOT
,
3872 kvm_vmx
->ept_identity_map_addr
,
3874 if (IS_ERR(uaddr
)) {
3879 /* Set up identity-mapping pagetable for EPT in real mode */
3880 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(tmp
)); i
++) {
3881 tmp
= (i
<< 22) + (_PAGE_PRESENT
| _PAGE_RW
| _PAGE_USER
|
3882 _PAGE_ACCESSED
| _PAGE_DIRTY
| _PAGE_PSE
);
3883 if (__copy_to_user(uaddr
+ i
* sizeof(tmp
), &tmp
, sizeof(tmp
))) {
3888 kvm_vmx
->ept_identity_pagetable_done
= true;
3891 mutex_unlock(&kvm
->slots_lock
);
3895 static void seg_setup(int seg
)
3897 const struct kvm_vmx_segment_field
*sf
= &kvm_vmx_segment_fields
[seg
];
3900 vmcs_write16(sf
->selector
, 0);
3901 vmcs_writel(sf
->base
, 0);
3902 vmcs_write32(sf
->limit
, 0xffff);
3904 if (seg
== VCPU_SREG_CS
)
3905 ar
|= 0x08; /* code segment */
3907 vmcs_write32(sf
->ar_bytes
, ar
);
3910 int allocate_vpid(void)
3916 spin_lock(&vmx_vpid_lock
);
3917 vpid
= find_first_zero_bit(vmx_vpid_bitmap
, VMX_NR_VPIDS
);
3918 if (vpid
< VMX_NR_VPIDS
)
3919 __set_bit(vpid
, vmx_vpid_bitmap
);
3922 spin_unlock(&vmx_vpid_lock
);
3926 void free_vpid(int vpid
)
3928 if (!enable_vpid
|| vpid
== 0)
3930 spin_lock(&vmx_vpid_lock
);
3931 __clear_bit(vpid
, vmx_vpid_bitmap
);
3932 spin_unlock(&vmx_vpid_lock
);
3935 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx
*vmx
)
3938 * When KVM is a nested hypervisor on top of Hyper-V and uses
3939 * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
3940 * bitmap has changed.
3942 if (kvm_is_using_evmcs()) {
3943 struct hv_enlightened_vmcs
*evmcs
= (void *)vmx
->vmcs01
.vmcs
;
3945 if (evmcs
->hv_enlightenments_control
.msr_bitmap
)
3946 evmcs
->hv_clean_fields
&=
3947 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP
;
3950 vmx
->nested
.force_msr_bitmap_recalc
= true;
3953 void vmx_disable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
)
3955 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
3956 unsigned long *msr_bitmap
= vmx
->vmcs01
.msr_bitmap
;
3958 if (!cpu_has_vmx_msr_bitmap())
3961 vmx_msr_bitmap_l01_changed(vmx
);
3964 * Mark the desired intercept state in shadow bitmap, this is needed
3965 * for resync when the MSR filters change.
3967 if (is_valid_passthrough_msr(msr
)) {
3968 int idx
= possible_passthrough_msr_slot(msr
);
3970 if (idx
!= -ENOENT
) {
3971 if (type
& MSR_TYPE_R
)
3972 clear_bit(idx
, vmx
->shadow_msr_intercept
.read
);
3973 if (type
& MSR_TYPE_W
)
3974 clear_bit(idx
, vmx
->shadow_msr_intercept
.write
);
3978 if ((type
& MSR_TYPE_R
) &&
3979 !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_READ
)) {
3980 vmx_set_msr_bitmap_read(msr_bitmap
, msr
);
3981 type
&= ~MSR_TYPE_R
;
3984 if ((type
& MSR_TYPE_W
) &&
3985 !kvm_msr_allowed(vcpu
, msr
, KVM_MSR_FILTER_WRITE
)) {
3986 vmx_set_msr_bitmap_write(msr_bitmap
, msr
);
3987 type
&= ~MSR_TYPE_W
;
3990 if (type
& MSR_TYPE_R
)
3991 vmx_clear_msr_bitmap_read(msr_bitmap
, msr
);
3993 if (type
& MSR_TYPE_W
)
3994 vmx_clear_msr_bitmap_write(msr_bitmap
, msr
);
3997 void vmx_enable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
)
3999 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4000 unsigned long *msr_bitmap
= vmx
->vmcs01
.msr_bitmap
;
4002 if (!cpu_has_vmx_msr_bitmap())
4005 vmx_msr_bitmap_l01_changed(vmx
);
4008 * Mark the desired intercept state in shadow bitmap, this is needed
4009 * for resync when the MSR filter changes.
4011 if (is_valid_passthrough_msr(msr
)) {
4012 int idx
= possible_passthrough_msr_slot(msr
);
4014 if (idx
!= -ENOENT
) {
4015 if (type
& MSR_TYPE_R
)
4016 set_bit(idx
, vmx
->shadow_msr_intercept
.read
);
4017 if (type
& MSR_TYPE_W
)
4018 set_bit(idx
, vmx
->shadow_msr_intercept
.write
);
4022 if (type
& MSR_TYPE_R
)
4023 vmx_set_msr_bitmap_read(msr_bitmap
, msr
);
4025 if (type
& MSR_TYPE_W
)
4026 vmx_set_msr_bitmap_write(msr_bitmap
, msr
);
4029 static void vmx_update_msr_bitmap_x2apic(struct kvm_vcpu
*vcpu
)
4032 * x2APIC indices for 64-bit accesses into the RDMSR and WRMSR halves
4033 * of the MSR bitmap. KVM emulates APIC registers up through 0x3f0,
4034 * i.e. MSR 0x83f, and so only needs to dynamically manipulate 64 bits.
4036 const int read_idx
= APIC_BASE_MSR
/ BITS_PER_LONG_LONG
;
4037 const int write_idx
= read_idx
+ (0x800 / sizeof(u64
));
4038 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4039 u64
*msr_bitmap
= (u64
*)vmx
->vmcs01
.msr_bitmap
;
4042 if (!cpu_has_vmx_msr_bitmap() || WARN_ON_ONCE(!lapic_in_kernel(vcpu
)))
4045 if (cpu_has_secondary_exec_ctrls() &&
4046 (secondary_exec_controls_get(vmx
) &
4047 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
)) {
4048 mode
= MSR_BITMAP_MODE_X2APIC
;
4049 if (enable_apicv
&& kvm_vcpu_apicv_active(vcpu
))
4050 mode
|= MSR_BITMAP_MODE_X2APIC_APICV
;
4055 if (mode
== vmx
->x2apic_msr_bitmap_mode
)
4058 vmx
->x2apic_msr_bitmap_mode
= mode
;
4061 * Reset the bitmap for MSRs 0x800 - 0x83f. Leave AMD's uber-extended
4062 * registers (0x840 and above) intercepted, KVM doesn't support them.
4063 * Intercept all writes by default and poke holes as needed. Pass
4064 * through reads for all valid registers by default in x2APIC+APICv
4065 * mode, only the current timer count needs on-demand emulation by KVM.
4067 if (mode
& MSR_BITMAP_MODE_X2APIC_APICV
)
4068 msr_bitmap
[read_idx
] = ~kvm_lapic_readable_reg_mask(vcpu
->arch
.apic
);
4070 msr_bitmap
[read_idx
] = ~0ull;
4071 msr_bitmap
[write_idx
] = ~0ull;
4074 * TPR reads and writes can be virtualized even if virtual interrupt
4075 * delivery is not in use.
4077 vmx_set_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_TASKPRI
), MSR_TYPE_RW
,
4078 !(mode
& MSR_BITMAP_MODE_X2APIC
));
4080 if (mode
& MSR_BITMAP_MODE_X2APIC_APICV
) {
4081 vmx_enable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_TMCCT
), MSR_TYPE_RW
);
4082 vmx_disable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_EOI
), MSR_TYPE_W
);
4083 vmx_disable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_SELF_IPI
), MSR_TYPE_W
);
4085 vmx_disable_intercept_for_msr(vcpu
, X2APIC_MSR(APIC_ICR
), MSR_TYPE_RW
);
4089 void pt_update_intercept_for_msr(struct kvm_vcpu
*vcpu
)
4091 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4092 bool flag
= !(vmx
->pt_desc
.guest
.ctl
& RTIT_CTL_TRACEEN
);
4095 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_STATUS
, MSR_TYPE_RW
, flag
);
4096 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_OUTPUT_BASE
, MSR_TYPE_RW
, flag
);
4097 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_OUTPUT_MASK
, MSR_TYPE_RW
, flag
);
4098 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_CR3_MATCH
, MSR_TYPE_RW
, flag
);
4099 for (i
= 0; i
< vmx
->pt_desc
.num_address_ranges
; i
++) {
4100 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_ADDR0_A
+ i
* 2, MSR_TYPE_RW
, flag
);
4101 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_RTIT_ADDR0_B
+ i
* 2, MSR_TYPE_RW
, flag
);
4105 static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu
*vcpu
)
4107 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4112 if (WARN_ON_ONCE(!is_guest_mode(vcpu
)) ||
4113 !nested_cpu_has_vid(get_vmcs12(vcpu
)) ||
4114 WARN_ON_ONCE(!vmx
->nested
.virtual_apic_map
.gfn
))
4117 rvi
= vmx_get_rvi();
4119 vapic_page
= vmx
->nested
.virtual_apic_map
.hva
;
4120 vppr
= *((u32
*)(vapic_page
+ APIC_PROCPRI
));
4122 return ((rvi
& 0xf0) > (vppr
& 0xf0));
4125 static void vmx_msr_filter_changed(struct kvm_vcpu
*vcpu
)
4127 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4131 * Redo intercept permissions for MSRs that KVM is passing through to
4132 * the guest. Disabling interception will check the new MSR filter and
4133 * ensure that KVM enables interception if usersepace wants to filter
4134 * the MSR. MSRs that KVM is already intercepting don't need to be
4135 * refreshed since KVM is going to intercept them regardless of what
4138 for (i
= 0; i
< ARRAY_SIZE(vmx_possible_passthrough_msrs
); i
++) {
4139 u32 msr
= vmx_possible_passthrough_msrs
[i
];
4141 if (!test_bit(i
, vmx
->shadow_msr_intercept
.read
))
4142 vmx_disable_intercept_for_msr(vcpu
, msr
, MSR_TYPE_R
);
4144 if (!test_bit(i
, vmx
->shadow_msr_intercept
.write
))
4145 vmx_disable_intercept_for_msr(vcpu
, msr
, MSR_TYPE_W
);
4148 /* PT MSRs can be passed through iff PT is exposed to the guest. */
4149 if (vmx_pt_mode_is_host_guest())
4150 pt_update_intercept_for_msr(vcpu
);
4153 static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu
*vcpu
,
4157 if (vcpu
->mode
== IN_GUEST_MODE
) {
4159 * The vector of the virtual has already been set in the PIR.
4160 * Send a notification event to deliver the virtual interrupt
4161 * unless the vCPU is the currently running vCPU, i.e. the
4162 * event is being sent from a fastpath VM-Exit handler, in
4163 * which case the PIR will be synced to the vIRR before
4164 * re-entering the guest.
4166 * When the target is not the running vCPU, the following
4167 * possibilities emerge:
4169 * Case 1: vCPU stays in non-root mode. Sending a notification
4170 * event posts the interrupt to the vCPU.
4172 * Case 2: vCPU exits to root mode and is still runnable. The
4173 * PIR will be synced to the vIRR before re-entering the guest.
4174 * Sending a notification event is ok as the host IRQ handler
4175 * will ignore the spurious event.
4177 * Case 3: vCPU exits to root mode and is blocked. vcpu_block()
4178 * has already synced PIR to vIRR and never blocks the vCPU if
4179 * the vIRR is not empty. Therefore, a blocked vCPU here does
4180 * not wait for any requested interrupts in PIR, and sending a
4181 * notification event also results in a benign, spurious event.
4184 if (vcpu
!= kvm_get_running_vcpu())
4185 __apic_send_IPI_mask(get_cpu_mask(vcpu
->cpu
), pi_vec
);
4190 * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
4191 * otherwise do nothing as KVM will grab the highest priority pending
4192 * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
4194 kvm_vcpu_wake_up(vcpu
);
4197 static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu
*vcpu
,
4200 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4202 if (is_guest_mode(vcpu
) &&
4203 vector
== vmx
->nested
.posted_intr_nv
) {
4205 * If a posted intr is not recognized by hardware,
4206 * we will accomplish it in the next vmentry.
4208 vmx
->nested
.pi_pending
= true;
4209 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4212 * This pairs with the smp_mb_*() after setting vcpu->mode in
4213 * vcpu_enter_guest() to guarantee the vCPU sees the event
4214 * request if triggering a posted interrupt "fails" because
4215 * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
4216 * the smb_wmb() in kvm_make_request() only ensures everything
4217 * done before making the request is visible when the request
4218 * is visible, it doesn't ensure ordering between the store to
4219 * vcpu->requests and the load from vcpu->mode.
4221 smp_mb__after_atomic();
4223 /* the PIR and ON have been set by L1. */
4224 kvm_vcpu_trigger_posted_interrupt(vcpu
, POSTED_INTR_NESTED_VECTOR
);
4230 * Send interrupt to vcpu via posted interrupt way.
4231 * 1. If target vcpu is running(non-root mode), send posted interrupt
4232 * notification to vcpu and hardware will sync PIR to vIRR atomically.
4233 * 2. If target vcpu isn't running(root mode), kick it to pick up the
4234 * interrupt from PIR in next vmentry.
4236 static int vmx_deliver_posted_interrupt(struct kvm_vcpu
*vcpu
, int vector
)
4238 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4241 r
= vmx_deliver_nested_posted_interrupt(vcpu
, vector
);
4245 /* Note, this is called iff the local APIC is in-kernel. */
4246 if (!vcpu
->arch
.apic
->apicv_active
)
4249 if (pi_test_and_set_pir(vector
, &vmx
->pi_desc
))
4252 /* If a previous notification has sent the IPI, nothing to do. */
4253 if (pi_test_and_set_on(&vmx
->pi_desc
))
4257 * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
4258 * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
4259 * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
4260 * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
4262 kvm_vcpu_trigger_posted_interrupt(vcpu
, POSTED_INTR_VECTOR
);
4266 static void vmx_deliver_interrupt(struct kvm_lapic
*apic
, int delivery_mode
,
4267 int trig_mode
, int vector
)
4269 struct kvm_vcpu
*vcpu
= apic
->vcpu
;
4271 if (vmx_deliver_posted_interrupt(vcpu
, vector
)) {
4272 kvm_lapic_set_irr(vector
, apic
);
4273 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
4274 kvm_vcpu_kick(vcpu
);
4276 trace_kvm_apicv_accept_irq(vcpu
->vcpu_id
, delivery_mode
,
4282 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4283 * will not change in the lifetime of the guest.
4284 * Note that host-state that does change is set elsewhere. E.g., host-state
4285 * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
4287 void vmx_set_constant_host_state(struct vcpu_vmx
*vmx
)
4291 unsigned long cr0
, cr3
, cr4
;
4294 WARN_ON(cr0
& X86_CR0_TS
);
4295 vmcs_writel(HOST_CR0
, cr0
); /* 22.2.3 */
4298 * Save the most likely value for this task's CR3 in the VMCS.
4299 * We can't use __get_current_cr3_fast() because we're not atomic.
4302 vmcs_writel(HOST_CR3
, cr3
); /* 22.2.3 FIXME: shadow tables */
4303 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
4305 /* Save the most likely value for this task's CR4 in the VMCS. */
4306 cr4
= cr4_read_shadow();
4307 vmcs_writel(HOST_CR4
, cr4
); /* 22.2.3, 22.2.5 */
4308 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
4310 vmcs_write16(HOST_CS_SELECTOR
, __KERNEL_CS
); /* 22.2.4 */
4311 #ifdef CONFIG_X86_64
4313 * Load null selectors, so we can avoid reloading them in
4314 * vmx_prepare_switch_to_host(), in case userspace uses
4315 * the null selectors too (the expected case).
4317 vmcs_write16(HOST_DS_SELECTOR
, 0);
4318 vmcs_write16(HOST_ES_SELECTOR
, 0);
4320 vmcs_write16(HOST_DS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4321 vmcs_write16(HOST_ES_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4323 vmcs_write16(HOST_SS_SELECTOR
, __KERNEL_DS
); /* 22.2.4 */
4324 vmcs_write16(HOST_TR_SELECTOR
, GDT_ENTRY_TSS
*8); /* 22.2.4 */
4326 vmcs_writel(HOST_IDTR_BASE
, host_idt_base
); /* 22.2.4 */
4328 vmcs_writel(HOST_RIP
, (unsigned long)vmx_vmexit
); /* 22.2.5 */
4330 rdmsr(MSR_IA32_SYSENTER_CS
, low32
, high32
);
4331 vmcs_write32(HOST_IA32_SYSENTER_CS
, low32
);
4334 * SYSENTER is used for 32-bit system calls on either 32-bit or
4335 * 64-bit kernels. It is always zero If neither is allowed, otherwise
4336 * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
4337 * have already done so!).
4339 if (!IS_ENABLED(CONFIG_IA32_EMULATION
) && !IS_ENABLED(CONFIG_X86_32
))
4340 vmcs_writel(HOST_IA32_SYSENTER_ESP
, 0);
4342 rdmsrl(MSR_IA32_SYSENTER_EIP
, tmpl
);
4343 vmcs_writel(HOST_IA32_SYSENTER_EIP
, tmpl
); /* 22.2.3 */
4345 if (vmcs_config
.vmexit_ctrl
& VM_EXIT_LOAD_IA32_PAT
) {
4346 rdmsr(MSR_IA32_CR_PAT
, low32
, high32
);
4347 vmcs_write64(HOST_IA32_PAT
, low32
| ((u64
) high32
<< 32));
4350 if (cpu_has_load_ia32_efer())
4351 vmcs_write64(HOST_IA32_EFER
, host_efer
);
4354 void set_cr4_guest_host_mask(struct vcpu_vmx
*vmx
)
4356 struct kvm_vcpu
*vcpu
= &vmx
->vcpu
;
4358 vcpu
->arch
.cr4_guest_owned_bits
= KVM_POSSIBLE_CR4_GUEST_BITS
&
4359 ~vcpu
->arch
.cr4_guest_rsvd_bits
;
4361 vcpu
->arch
.cr4_guest_owned_bits
&= ~X86_CR4_TLBFLUSH_BITS
;
4362 vcpu
->arch
.cr4_guest_owned_bits
&= ~X86_CR4_PDPTR_BITS
;
4364 if (is_guest_mode(&vmx
->vcpu
))
4365 vcpu
->arch
.cr4_guest_owned_bits
&=
4366 ~get_vmcs12(vcpu
)->cr4_guest_host_mask
;
4367 vmcs_writel(CR4_GUEST_HOST_MASK
, ~vcpu
->arch
.cr4_guest_owned_bits
);
4370 static u32
vmx_pin_based_exec_ctrl(struct vcpu_vmx
*vmx
)
4372 u32 pin_based_exec_ctrl
= vmcs_config
.pin_based_exec_ctrl
;
4374 if (!kvm_vcpu_apicv_active(&vmx
->vcpu
))
4375 pin_based_exec_ctrl
&= ~PIN_BASED_POSTED_INTR
;
4378 pin_based_exec_ctrl
&= ~PIN_BASED_VIRTUAL_NMIS
;
4380 if (!enable_preemption_timer
)
4381 pin_based_exec_ctrl
&= ~PIN_BASED_VMX_PREEMPTION_TIMER
;
4383 return pin_based_exec_ctrl
;
4386 static u32
vmx_vmentry_ctrl(void)
4388 u32 vmentry_ctrl
= vmcs_config
.vmentry_ctrl
;
4390 if (vmx_pt_mode_is_system())
4391 vmentry_ctrl
&= ~(VM_ENTRY_PT_CONCEAL_PIP
|
4392 VM_ENTRY_LOAD_IA32_RTIT_CTL
);
4394 * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
4396 vmentry_ctrl
&= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
|
4397 VM_ENTRY_LOAD_IA32_EFER
|
4398 VM_ENTRY_IA32E_MODE
);
4400 if (cpu_has_perf_global_ctrl_bug())
4401 vmentry_ctrl
&= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
;
4403 return vmentry_ctrl
;
4406 static u32
vmx_vmexit_ctrl(void)
4408 u32 vmexit_ctrl
= vmcs_config
.vmexit_ctrl
;
4411 * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
4412 * nested virtualization and thus allowed to be set in vmcs12.
4414 vmexit_ctrl
&= ~(VM_EXIT_SAVE_IA32_PAT
| VM_EXIT_SAVE_IA32_EFER
|
4415 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
);
4417 if (vmx_pt_mode_is_system())
4418 vmexit_ctrl
&= ~(VM_EXIT_PT_CONCEAL_PIP
|
4419 VM_EXIT_CLEAR_IA32_RTIT_CTL
);
4421 if (cpu_has_perf_global_ctrl_bug())
4422 vmexit_ctrl
&= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
;
4424 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
4425 return vmexit_ctrl
&
4426 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
| VM_EXIT_LOAD_IA32_EFER
);
4429 static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu
*vcpu
)
4431 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4433 if (is_guest_mode(vcpu
)) {
4434 vmx
->nested
.update_vmcs01_apicv_status
= true;
4438 pin_controls_set(vmx
, vmx_pin_based_exec_ctrl(vmx
));
4440 if (kvm_vcpu_apicv_active(vcpu
)) {
4441 secondary_exec_controls_setbit(vmx
,
4442 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4443 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4445 tertiary_exec_controls_setbit(vmx
, TERTIARY_EXEC_IPI_VIRT
);
4447 secondary_exec_controls_clearbit(vmx
,
4448 SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4449 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4451 tertiary_exec_controls_clearbit(vmx
, TERTIARY_EXEC_IPI_VIRT
);
4454 vmx_update_msr_bitmap_x2apic(vcpu
);
4457 static u32
vmx_exec_control(struct vcpu_vmx
*vmx
)
4459 u32 exec_control
= vmcs_config
.cpu_based_exec_ctrl
;
4462 * Not used by KVM, but fully supported for nesting, i.e. are allowed in
4463 * vmcs12 and propagated to vmcs02 when set in vmcs12.
4465 exec_control
&= ~(CPU_BASED_RDTSC_EXITING
|
4466 CPU_BASED_USE_IO_BITMAPS
|
4467 CPU_BASED_MONITOR_TRAP_FLAG
|
4468 CPU_BASED_PAUSE_EXITING
);
4470 /* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
4471 exec_control
&= ~(CPU_BASED_INTR_WINDOW_EXITING
|
4472 CPU_BASED_NMI_WINDOW_EXITING
);
4474 if (vmx
->vcpu
.arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
)
4475 exec_control
&= ~CPU_BASED_MOV_DR_EXITING
;
4477 if (!cpu_need_tpr_shadow(&vmx
->vcpu
))
4478 exec_control
&= ~CPU_BASED_TPR_SHADOW
;
4480 #ifdef CONFIG_X86_64
4481 if (exec_control
& CPU_BASED_TPR_SHADOW
)
4482 exec_control
&= ~(CPU_BASED_CR8_LOAD_EXITING
|
4483 CPU_BASED_CR8_STORE_EXITING
);
4485 exec_control
|= CPU_BASED_CR8_STORE_EXITING
|
4486 CPU_BASED_CR8_LOAD_EXITING
;
4488 /* No need to intercept CR3 access or INVPLG when using EPT. */
4490 exec_control
&= ~(CPU_BASED_CR3_LOAD_EXITING
|
4491 CPU_BASED_CR3_STORE_EXITING
|
4492 CPU_BASED_INVLPG_EXITING
);
4493 if (kvm_mwait_in_guest(vmx
->vcpu
.kvm
))
4494 exec_control
&= ~(CPU_BASED_MWAIT_EXITING
|
4495 CPU_BASED_MONITOR_EXITING
);
4496 if (kvm_hlt_in_guest(vmx
->vcpu
.kvm
))
4497 exec_control
&= ~CPU_BASED_HLT_EXITING
;
4498 return exec_control
;
4501 static u64
vmx_tertiary_exec_control(struct vcpu_vmx
*vmx
)
4503 u64 exec_control
= vmcs_config
.cpu_based_3rd_exec_ctrl
;
4506 * IPI virtualization relies on APICv. Disable IPI virtualization if
4507 * APICv is inhibited.
4509 if (!enable_ipiv
|| !kvm_vcpu_apicv_active(&vmx
->vcpu
))
4510 exec_control
&= ~TERTIARY_EXEC_IPI_VIRT
;
4512 return exec_control
;
4516 * Adjust a single secondary execution control bit to intercept/allow an
4517 * instruction in the guest. This is usually done based on whether or not a
4518 * feature has been exposed to the guest in order to correctly emulate faults.
4521 vmx_adjust_secondary_exec_control(struct vcpu_vmx
*vmx
, u32
*exec_control
,
4522 u32 control
, bool enabled
, bool exiting
)
4525 * If the control is for an opt-in feature, clear the control if the
4526 * feature is not exposed to the guest, i.e. not enabled. If the
4527 * control is opt-out, i.e. an exiting control, clear the control if
4528 * the feature _is_ exposed to the guest, i.e. exiting/interception is
4529 * disabled for the associated instruction. Note, the caller is
4530 * responsible presetting exec_control to set all supported bits.
4532 if (enabled
== exiting
)
4533 *exec_control
&= ~control
;
4536 * Update the nested MSR settings so that a nested VMM can/can't set
4537 * controls for features that are/aren't exposed to the guest.
4541 * All features that can be added or removed to VMX MSRs must
4542 * be supported in the first place for nested virtualization.
4544 if (WARN_ON_ONCE(!(vmcs_config
.nested
.secondary_ctls_high
& control
)))
4548 vmx
->nested
.msrs
.secondary_ctls_high
|= control
;
4550 vmx
->nested
.msrs
.secondary_ctls_high
&= ~control
;
4555 * Wrapper macro for the common case of adjusting a secondary execution control
4556 * based on a single guest CPUID bit, with a dedicated feature bit. This also
4557 * verifies that the control is actually supported by KVM and hardware.
4559 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4561 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4564 if (cpu_has_vmx_##name()) { \
4565 if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
4566 __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
4568 __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
4569 vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4570 __enabled, exiting); \
4574 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4575 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4576 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4578 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4579 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4581 static u32
vmx_secondary_exec_control(struct vcpu_vmx
*vmx
)
4583 struct kvm_vcpu
*vcpu
= &vmx
->vcpu
;
4585 u32 exec_control
= vmcs_config
.cpu_based_2nd_exec_ctrl
;
4587 if (vmx_pt_mode_is_system())
4588 exec_control
&= ~(SECONDARY_EXEC_PT_USE_GPA
| SECONDARY_EXEC_PT_CONCEAL_VMX
);
4589 if (!cpu_need_virtualize_apic_accesses(vcpu
))
4590 exec_control
&= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
4592 exec_control
&= ~SECONDARY_EXEC_ENABLE_VPID
;
4594 exec_control
&= ~SECONDARY_EXEC_ENABLE_EPT
;
4595 enable_unrestricted_guest
= 0;
4597 if (!enable_unrestricted_guest
)
4598 exec_control
&= ~SECONDARY_EXEC_UNRESTRICTED_GUEST
;
4599 if (kvm_pause_in_guest(vmx
->vcpu
.kvm
))
4600 exec_control
&= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING
;
4601 if (!kvm_vcpu_apicv_active(vcpu
))
4602 exec_control
&= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT
|
4603 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
);
4604 exec_control
&= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
;
4607 * KVM doesn't support VMFUNC for L1, but the control is set in KVM's
4608 * base configuration as KVM emulates VMFUNC[EPTP_SWITCHING] for L2.
4610 exec_control
&= ~SECONDARY_EXEC_ENABLE_VMFUNC
;
4612 /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
4613 * in vmx_set_cr4. */
4614 exec_control
&= ~SECONDARY_EXEC_DESC
;
4616 /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
4618 We can NOT enable shadow_vmcs here because we don't have yet
4621 exec_control
&= ~SECONDARY_EXEC_SHADOW_VMCS
;
4624 * PML is enabled/disabled when dirty logging of memsmlots changes, but
4625 * it needs to be set here when dirty logging is already active, e.g.
4626 * if this vCPU was created after dirty logging was enabled.
4628 if (!enable_pml
|| !atomic_read(&vcpu
->kvm
->nr_memslots_dirty_logging
))
4629 exec_control
&= ~SECONDARY_EXEC_ENABLE_PML
;
4631 vmx_adjust_sec_exec_feature(vmx
, &exec_control
, xsaves
, XSAVES
);
4634 * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
4635 * feature is exposed to the guest. This creates a virtualization hole
4636 * if both are supported in hardware but only one is exposed to the
4637 * guest, but letting the guest execute RDTSCP or RDPID when either one
4638 * is advertised is preferable to emulating the advertised instruction
4639 * in KVM on #UD, and obviously better than incorrectly injecting #UD.
4641 if (cpu_has_vmx_rdtscp()) {
4642 bool rdpid_or_rdtscp_enabled
=
4643 guest_cpuid_has(vcpu
, X86_FEATURE_RDTSCP
) ||
4644 guest_cpuid_has(vcpu
, X86_FEATURE_RDPID
);
4646 vmx_adjust_secondary_exec_control(vmx
, &exec_control
,
4647 SECONDARY_EXEC_ENABLE_RDTSCP
,
4648 rdpid_or_rdtscp_enabled
, false);
4651 vmx_adjust_sec_exec_feature(vmx
, &exec_control
, invpcid
, INVPCID
);
4653 vmx_adjust_sec_exec_exiting(vmx
, &exec_control
, rdrand
, RDRAND
);
4654 vmx_adjust_sec_exec_exiting(vmx
, &exec_control
, rdseed
, RDSEED
);
4656 vmx_adjust_sec_exec_control(vmx
, &exec_control
, waitpkg
, WAITPKG
,
4657 ENABLE_USR_WAIT_PAUSE
, false);
4659 if (!vcpu
->kvm
->arch
.bus_lock_detection_enabled
)
4660 exec_control
&= ~SECONDARY_EXEC_BUS_LOCK_DETECTION
;
4662 if (!kvm_notify_vmexit_enabled(vcpu
->kvm
))
4663 exec_control
&= ~SECONDARY_EXEC_NOTIFY_VM_EXITING
;
4665 return exec_control
;
4668 static inline int vmx_get_pid_table_order(struct kvm
*kvm
)
4670 return get_order(kvm
->arch
.max_vcpu_ids
* sizeof(*to_kvm_vmx(kvm
)->pid_table
));
4673 static int vmx_alloc_ipiv_pid_table(struct kvm
*kvm
)
4676 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(kvm
);
4678 if (!irqchip_in_kernel(kvm
) || !enable_ipiv
)
4681 if (kvm_vmx
->pid_table
)
4684 pages
= alloc_pages(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
,
4685 vmx_get_pid_table_order(kvm
));
4689 kvm_vmx
->pid_table
= (void *)page_address(pages
);
4693 static int vmx_vcpu_precreate(struct kvm
*kvm
)
4695 return vmx_alloc_ipiv_pid_table(kvm
);
4698 #define VMX_XSS_EXIT_BITMAP 0
4700 static void init_vmcs(struct vcpu_vmx
*vmx
)
4702 struct kvm
*kvm
= vmx
->vcpu
.kvm
;
4703 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(kvm
);
4706 nested_vmx_set_vmcs_shadowing_bitmap();
4708 if (cpu_has_vmx_msr_bitmap())
4709 vmcs_write64(MSR_BITMAP
, __pa(vmx
->vmcs01
.msr_bitmap
));
4711 vmcs_write64(VMCS_LINK_POINTER
, INVALID_GPA
); /* 22.3.1.5 */
4714 pin_controls_set(vmx
, vmx_pin_based_exec_ctrl(vmx
));
4716 exec_controls_set(vmx
, vmx_exec_control(vmx
));
4718 if (cpu_has_secondary_exec_ctrls())
4719 secondary_exec_controls_set(vmx
, vmx_secondary_exec_control(vmx
));
4721 if (cpu_has_tertiary_exec_ctrls())
4722 tertiary_exec_controls_set(vmx
, vmx_tertiary_exec_control(vmx
));
4724 if (enable_apicv
&& lapic_in_kernel(&vmx
->vcpu
)) {
4725 vmcs_write64(EOI_EXIT_BITMAP0
, 0);
4726 vmcs_write64(EOI_EXIT_BITMAP1
, 0);
4727 vmcs_write64(EOI_EXIT_BITMAP2
, 0);
4728 vmcs_write64(EOI_EXIT_BITMAP3
, 0);
4730 vmcs_write16(GUEST_INTR_STATUS
, 0);
4732 vmcs_write16(POSTED_INTR_NV
, POSTED_INTR_VECTOR
);
4733 vmcs_write64(POSTED_INTR_DESC_ADDR
, __pa((&vmx
->pi_desc
)));
4736 if (vmx_can_use_ipiv(&vmx
->vcpu
)) {
4737 vmcs_write64(PID_POINTER_TABLE
, __pa(kvm_vmx
->pid_table
));
4738 vmcs_write16(LAST_PID_POINTER_INDEX
, kvm
->arch
.max_vcpu_ids
- 1);
4741 if (!kvm_pause_in_guest(kvm
)) {
4742 vmcs_write32(PLE_GAP
, ple_gap
);
4743 vmx
->ple_window
= ple_window
;
4744 vmx
->ple_window_dirty
= true;
4747 if (kvm_notify_vmexit_enabled(kvm
))
4748 vmcs_write32(NOTIFY_WINDOW
, kvm
->arch
.notify_window
);
4750 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK
, 0);
4751 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH
, 0);
4752 vmcs_write32(CR3_TARGET_COUNT
, 0); /* 22.2.1 */
4754 vmcs_write16(HOST_FS_SELECTOR
, 0); /* 22.2.4 */
4755 vmcs_write16(HOST_GS_SELECTOR
, 0); /* 22.2.4 */
4756 vmx_set_constant_host_state(vmx
);
4757 vmcs_writel(HOST_FS_BASE
, 0); /* 22.2.4 */
4758 vmcs_writel(HOST_GS_BASE
, 0); /* 22.2.4 */
4760 if (cpu_has_vmx_vmfunc())
4761 vmcs_write64(VM_FUNCTION_CONTROL
, 0);
4763 vmcs_write32(VM_EXIT_MSR_STORE_COUNT
, 0);
4764 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT
, 0);
4765 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.host
.val
));
4766 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT
, 0);
4767 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR
, __pa(vmx
->msr_autoload
.guest
.val
));
4769 if (vmcs_config
.vmentry_ctrl
& VM_ENTRY_LOAD_IA32_PAT
)
4770 vmcs_write64(GUEST_IA32_PAT
, vmx
->vcpu
.arch
.pat
);
4772 vm_exit_controls_set(vmx
, vmx_vmexit_ctrl());
4774 /* 22.2.1, 20.8.1 */
4775 vm_entry_controls_set(vmx
, vmx_vmentry_ctrl());
4777 vmx
->vcpu
.arch
.cr0_guest_owned_bits
= vmx_l1_guest_owned_cr0_bits();
4778 vmcs_writel(CR0_GUEST_HOST_MASK
, ~vmx
->vcpu
.arch
.cr0_guest_owned_bits
);
4780 set_cr4_guest_host_mask(vmx
);
4783 vmcs_write16(VIRTUAL_PROCESSOR_ID
, vmx
->vpid
);
4785 if (cpu_has_vmx_xsaves())
4786 vmcs_write64(XSS_EXIT_BITMAP
, VMX_XSS_EXIT_BITMAP
);
4789 vmcs_write64(PML_ADDRESS
, page_to_phys(vmx
->pml_pg
));
4790 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
4793 vmx_write_encls_bitmap(&vmx
->vcpu
, NULL
);
4795 if (vmx_pt_mode_is_host_guest()) {
4796 memset(&vmx
->pt_desc
, 0, sizeof(vmx
->pt_desc
));
4797 /* Bit[6~0] are forced to 1, writes are ignored. */
4798 vmx
->pt_desc
.guest
.output_mask
= 0x7F;
4799 vmcs_write64(GUEST_IA32_RTIT_CTL
, 0);
4802 vmcs_write32(GUEST_SYSENTER_CS
, 0);
4803 vmcs_writel(GUEST_SYSENTER_ESP
, 0);
4804 vmcs_writel(GUEST_SYSENTER_EIP
, 0);
4805 vmcs_write64(GUEST_IA32_DEBUGCTL
, 0);
4807 if (cpu_has_vmx_tpr_shadow()) {
4808 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
, 0);
4809 if (cpu_need_tpr_shadow(&vmx
->vcpu
))
4810 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR
,
4811 __pa(vmx
->vcpu
.arch
.apic
->regs
));
4812 vmcs_write32(TPR_THRESHOLD
, 0);
4815 vmx_setup_uret_msrs(vmx
);
4818 static void __vmx_vcpu_reset(struct kvm_vcpu
*vcpu
)
4820 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4825 memcpy(&vmx
->nested
.msrs
, &vmcs_config
.nested
, sizeof(vmx
->nested
.msrs
));
4827 vcpu_setup_sgx_lepubkeyhash(vcpu
);
4829 vmx
->nested
.posted_intr_nv
= -1;
4830 vmx
->nested
.vmxon_ptr
= INVALID_GPA
;
4831 vmx
->nested
.current_vmptr
= INVALID_GPA
;
4833 #ifdef CONFIG_KVM_HYPERV
4834 vmx
->nested
.hv_evmcs_vmptr
= EVMPTR_INVALID
;
4837 vcpu
->arch
.microcode_version
= 0x100000000ULL
;
4838 vmx
->msr_ia32_feature_control_valid_bits
= FEAT_CTL_LOCKED
;
4841 * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
4842 * or POSTED_INTR_WAKEUP_VECTOR.
4844 vmx
->pi_desc
.nv
= POSTED_INTR_VECTOR
;
4845 vmx
->pi_desc
.sn
= 1;
4848 static void vmx_vcpu_reset(struct kvm_vcpu
*vcpu
, bool init_event
)
4850 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4853 __vmx_vcpu_reset(vcpu
);
4855 vmx
->rmode
.vm86_active
= 0;
4858 vmx
->msr_ia32_umwait_control
= 0;
4860 vmx
->hv_deadline_tsc
= -1;
4861 kvm_set_cr8(vcpu
, 0);
4863 vmx_segment_cache_clear(vmx
);
4864 kvm_register_mark_available(vcpu
, VCPU_EXREG_SEGMENTS
);
4866 seg_setup(VCPU_SREG_CS
);
4867 vmcs_write16(GUEST_CS_SELECTOR
, 0xf000);
4868 vmcs_writel(GUEST_CS_BASE
, 0xffff0000ul
);
4870 seg_setup(VCPU_SREG_DS
);
4871 seg_setup(VCPU_SREG_ES
);
4872 seg_setup(VCPU_SREG_FS
);
4873 seg_setup(VCPU_SREG_GS
);
4874 seg_setup(VCPU_SREG_SS
);
4876 vmcs_write16(GUEST_TR_SELECTOR
, 0);
4877 vmcs_writel(GUEST_TR_BASE
, 0);
4878 vmcs_write32(GUEST_TR_LIMIT
, 0xffff);
4879 vmcs_write32(GUEST_TR_AR_BYTES
, 0x008b);
4881 vmcs_write16(GUEST_LDTR_SELECTOR
, 0);
4882 vmcs_writel(GUEST_LDTR_BASE
, 0);
4883 vmcs_write32(GUEST_LDTR_LIMIT
, 0xffff);
4884 vmcs_write32(GUEST_LDTR_AR_BYTES
, 0x00082);
4886 vmcs_writel(GUEST_GDTR_BASE
, 0);
4887 vmcs_write32(GUEST_GDTR_LIMIT
, 0xffff);
4889 vmcs_writel(GUEST_IDTR_BASE
, 0);
4890 vmcs_write32(GUEST_IDTR_LIMIT
, 0xffff);
4892 vmcs_write32(GUEST_ACTIVITY_STATE
, GUEST_ACTIVITY_ACTIVE
);
4893 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO
, 0);
4894 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
, 0);
4895 if (kvm_mpx_supported())
4896 vmcs_write64(GUEST_BNDCFGS
, 0);
4898 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0); /* 22.2.1 */
4900 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
4902 vpid_sync_context(vmx
->vpid
);
4904 vmx_update_fb_clear_dis(vcpu
, vmx
);
4907 static void vmx_enable_irq_window(struct kvm_vcpu
*vcpu
)
4909 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_INTR_WINDOW_EXITING
);
4912 static void vmx_enable_nmi_window(struct kvm_vcpu
*vcpu
)
4915 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) & GUEST_INTR_STATE_STI
) {
4916 vmx_enable_irq_window(vcpu
);
4920 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_NMI_WINDOW_EXITING
);
4923 static void vmx_inject_irq(struct kvm_vcpu
*vcpu
, bool reinjected
)
4925 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4927 int irq
= vcpu
->arch
.interrupt
.nr
;
4929 trace_kvm_inj_virq(irq
, vcpu
->arch
.interrupt
.soft
, reinjected
);
4931 ++vcpu
->stat
.irq_injections
;
4932 if (vmx
->rmode
.vm86_active
) {
4934 if (vcpu
->arch
.interrupt
.soft
)
4935 inc_eip
= vcpu
->arch
.event_exit_inst_len
;
4936 kvm_inject_realmode_interrupt(vcpu
, irq
, inc_eip
);
4939 intr
= irq
| INTR_INFO_VALID_MASK
;
4940 if (vcpu
->arch
.interrupt
.soft
) {
4941 intr
|= INTR_TYPE_SOFT_INTR
;
4942 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN
,
4943 vmx
->vcpu
.arch
.event_exit_inst_len
);
4945 intr
|= INTR_TYPE_EXT_INTR
;
4946 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, intr
);
4948 vmx_clear_hlt(vcpu
);
4951 static void vmx_inject_nmi(struct kvm_vcpu
*vcpu
)
4953 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4957 * Tracking the NMI-blocked state in software is built upon
4958 * finding the next open IRQ window. This, in turn, depends on
4959 * well-behaving guests: They have to keep IRQs disabled at
4960 * least as long as the NMI handler runs. Otherwise we may
4961 * cause NMI nesting, maybe breaking the guest. But as this is
4962 * highly unlikely, we can live with the residual risk.
4964 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 1;
4965 vmx
->loaded_vmcs
->vnmi_blocked_time
= 0;
4968 ++vcpu
->stat
.nmi_injections
;
4969 vmx
->loaded_vmcs
->nmi_known_unmasked
= false;
4971 if (vmx
->rmode
.vm86_active
) {
4972 kvm_inject_realmode_interrupt(vcpu
, NMI_VECTOR
, 0);
4976 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
,
4977 INTR_TYPE_NMI_INTR
| INTR_INFO_VALID_MASK
| NMI_VECTOR
);
4979 vmx_clear_hlt(vcpu
);
4982 bool vmx_get_nmi_mask(struct kvm_vcpu
*vcpu
)
4984 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
4988 return vmx
->loaded_vmcs
->soft_vnmi_blocked
;
4989 if (vmx
->loaded_vmcs
->nmi_known_unmasked
)
4991 masked
= vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) & GUEST_INTR_STATE_NMI
;
4992 vmx
->loaded_vmcs
->nmi_known_unmasked
= !masked
;
4996 void vmx_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
)
4998 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5001 if (vmx
->loaded_vmcs
->soft_vnmi_blocked
!= masked
) {
5002 vmx
->loaded_vmcs
->soft_vnmi_blocked
= masked
;
5003 vmx
->loaded_vmcs
->vnmi_blocked_time
= 0;
5006 vmx
->loaded_vmcs
->nmi_known_unmasked
= !masked
;
5008 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
5009 GUEST_INTR_STATE_NMI
);
5011 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO
,
5012 GUEST_INTR_STATE_NMI
);
5016 bool vmx_nmi_blocked(struct kvm_vcpu
*vcpu
)
5018 if (is_guest_mode(vcpu
) && nested_exit_on_nmi(vcpu
))
5021 if (!enable_vnmi
&& to_vmx(vcpu
)->loaded_vmcs
->soft_vnmi_blocked
)
5024 return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
5025 (GUEST_INTR_STATE_MOV_SS
| GUEST_INTR_STATE_STI
|
5026 GUEST_INTR_STATE_NMI
));
5029 static int vmx_nmi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
5031 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
5034 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
5035 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_nmi(vcpu
))
5038 return !vmx_nmi_blocked(vcpu
);
5041 bool vmx_interrupt_blocked(struct kvm_vcpu
*vcpu
)
5043 if (is_guest_mode(vcpu
) && nested_exit_on_intr(vcpu
))
5046 return !(vmx_get_rflags(vcpu
) & X86_EFLAGS_IF
) ||
5047 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
5048 (GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
));
5051 static int vmx_interrupt_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
5053 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
5057 * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
5058 * e.g. if the IRQ arrived asynchronously after checking nested events.
5060 if (for_injection
&& is_guest_mode(vcpu
) && nested_exit_on_intr(vcpu
))
5063 return !vmx_interrupt_blocked(vcpu
);
5066 static int vmx_set_tss_addr(struct kvm
*kvm
, unsigned int addr
)
5070 if (enable_unrestricted_guest
)
5073 mutex_lock(&kvm
->slots_lock
);
5074 ret
= __x86_set_memory_region(kvm
, TSS_PRIVATE_MEMSLOT
, addr
,
5076 mutex_unlock(&kvm
->slots_lock
);
5079 return PTR_ERR(ret
);
5081 to_kvm_vmx(kvm
)->tss_addr
= addr
;
5083 return init_rmode_tss(kvm
, ret
);
5086 static int vmx_set_identity_map_addr(struct kvm
*kvm
, u64 ident_addr
)
5088 to_kvm_vmx(kvm
)->ept_identity_map_addr
= ident_addr
;
5092 static bool rmode_exception(struct kvm_vcpu
*vcpu
, int vec
)
5097 * Update instruction length as we may reinject the exception
5098 * from user space while in guest debugging mode.
5100 to_vmx(vcpu
)->vcpu
.arch
.event_exit_inst_len
=
5101 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
5102 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_SW_BP
)
5106 return !(vcpu
->guest_debug
&
5107 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
));
5121 static int handle_rmode_exception(struct kvm_vcpu
*vcpu
,
5122 int vec
, u32 err_code
)
5125 * Instruction with address size override prefix opcode 0x67
5126 * Cause the #SS fault with 0 error code in VM86 mode.
5128 if (((vec
== GP_VECTOR
) || (vec
== SS_VECTOR
)) && err_code
== 0) {
5129 if (kvm_emulate_instruction(vcpu
, 0)) {
5130 if (vcpu
->arch
.halt_request
) {
5131 vcpu
->arch
.halt_request
= 0;
5132 return kvm_emulate_halt_noskip(vcpu
);
5140 * Forward all other exceptions that are valid in real mode.
5141 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
5142 * the required debugging infrastructure rework.
5144 kvm_queue_exception(vcpu
, vec
);
5148 static int handle_machine_check(struct kvm_vcpu
*vcpu
)
5150 /* handled by vmx_vcpu_run() */
5155 * If the host has split lock detection disabled, then #AC is
5156 * unconditionally injected into the guest, which is the pre split lock
5157 * detection behaviour.
5159 * If the host has split lock detection enabled then #AC is
5160 * only injected into the guest when:
5161 * - Guest CPL == 3 (user mode)
5162 * - Guest has #AC detection enabled in CR0
5163 * - Guest EFLAGS has AC bit set
5165 bool vmx_guest_inject_ac(struct kvm_vcpu
*vcpu
)
5167 if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT
))
5170 return vmx_get_cpl(vcpu
) == 3 && kvm_is_cr0_bit_set(vcpu
, X86_CR0_AM
) &&
5171 (kvm_get_rflags(vcpu
) & X86_EFLAGS_AC
);
5174 static int handle_exception_nmi(struct kvm_vcpu
*vcpu
)
5176 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5177 struct kvm_run
*kvm_run
= vcpu
->run
;
5178 u32 intr_info
, ex_no
, error_code
;
5179 unsigned long cr2
, dr6
;
5182 vect_info
= vmx
->idt_vectoring_info
;
5183 intr_info
= vmx_get_intr_info(vcpu
);
5186 * Machine checks are handled by handle_exception_irqoff(), or by
5187 * vmx_vcpu_run() if a #MC occurs on VM-Entry. NMIs are handled by
5188 * vmx_vcpu_enter_exit().
5190 if (is_machine_check(intr_info
) || is_nmi(intr_info
))
5194 * Queue the exception here instead of in handle_nm_fault_irqoff().
5195 * This ensures the nested_vmx check is not skipped so vmexit can
5196 * be reflected to L1 (when it intercepts #NM) before reaching this
5199 if (is_nm_fault(intr_info
)) {
5200 kvm_queue_exception(vcpu
, NM_VECTOR
);
5204 if (is_invalid_opcode(intr_info
))
5205 return handle_ud(vcpu
);
5208 if (intr_info
& INTR_INFO_DELIVER_CODE_MASK
)
5209 error_code
= vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
5211 if (!vmx
->rmode
.vm86_active
&& is_gp_fault(intr_info
)) {
5212 WARN_ON_ONCE(!enable_vmware_backdoor
);
5215 * VMware backdoor emulation on #GP interception only handles
5216 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero
5217 * error code on #GP.
5220 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
5223 return kvm_emulate_instruction(vcpu
, EMULTYPE_VMWARE_GP
);
5227 * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
5228 * MMIO, it is better to report an internal error.
5229 * See the comments in vmx_handle_exit.
5231 if ((vect_info
& VECTORING_INFO_VALID_MASK
) &&
5232 !(is_page_fault(intr_info
) && !(error_code
& PFERR_RSVD_MASK
))) {
5233 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
5234 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_SIMUL_EX
;
5235 vcpu
->run
->internal
.ndata
= 4;
5236 vcpu
->run
->internal
.data
[0] = vect_info
;
5237 vcpu
->run
->internal
.data
[1] = intr_info
;
5238 vcpu
->run
->internal
.data
[2] = error_code
;
5239 vcpu
->run
->internal
.data
[3] = vcpu
->arch
.last_vmentry_cpu
;
5243 if (is_page_fault(intr_info
)) {
5244 cr2
= vmx_get_exit_qual(vcpu
);
5245 if (enable_ept
&& !vcpu
->arch
.apf
.host_apf_flags
) {
5247 * EPT will cause page fault only if we need to
5248 * detect illegal GPAs.
5250 WARN_ON_ONCE(!allow_smaller_maxphyaddr
);
5251 kvm_fixup_and_inject_pf_error(vcpu
, cr2
, error_code
);
5254 return kvm_handle_page_fault(vcpu
, error_code
, cr2
, NULL
, 0);
5257 ex_no
= intr_info
& INTR_INFO_VECTOR_MASK
;
5259 if (vmx
->rmode
.vm86_active
&& rmode_exception(vcpu
, ex_no
))
5260 return handle_rmode_exception(vcpu
, ex_no
, error_code
);
5264 dr6
= vmx_get_exit_qual(vcpu
);
5265 if (!(vcpu
->guest_debug
&
5266 (KVM_GUESTDBG_SINGLESTEP
| KVM_GUESTDBG_USE_HW_BP
))) {
5268 * If the #DB was due to ICEBP, a.k.a. INT1, skip the
5269 * instruction. ICEBP generates a trap-like #DB, but
5270 * despite its interception control being tied to #DB,
5271 * is an instruction intercept, i.e. the VM-Exit occurs
5272 * on the ICEBP itself. Use the inner "skip" helper to
5273 * avoid single-step #DB and MTF updates, as ICEBP is
5274 * higher priority. Note, skipping ICEBP still clears
5275 * STI and MOVSS blocking.
5277 * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
5278 * if single-step is enabled in RFLAGS and STI or MOVSS
5279 * blocking is active, as the CPU doesn't set the bit
5280 * on VM-Exit due to #DB interception. VM-Entry has a
5281 * consistency check that a single-step #DB is pending
5282 * in this scenario as the previous instruction cannot
5283 * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
5284 * don't modify RFLAGS), therefore the one instruction
5285 * delay when activating single-step breakpoints must
5286 * have already expired. Note, the CPU sets/clears BS
5287 * as appropriate for all other VM-Exits types.
5289 if (is_icebp(intr_info
))
5290 WARN_ON(!skip_emulated_instruction(vcpu
));
5291 else if ((vmx_get_rflags(vcpu
) & X86_EFLAGS_TF
) &&
5292 (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
) &
5293 (GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_MOV_SS
)))
5294 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS
,
5295 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
) | DR6_BS
);
5297 kvm_queue_exception_p(vcpu
, DB_VECTOR
, dr6
);
5300 kvm_run
->debug
.arch
.dr6
= dr6
| DR6_ACTIVE_LOW
;
5301 kvm_run
->debug
.arch
.dr7
= vmcs_readl(GUEST_DR7
);
5305 * Update instruction length as we may reinject #BP from
5306 * user space while in guest debugging mode. Reading it for
5307 * #DB as well causes no harm, it is not used in that case.
5309 vmx
->vcpu
.arch
.event_exit_inst_len
=
5310 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
);
5311 kvm_run
->exit_reason
= KVM_EXIT_DEBUG
;
5312 kvm_run
->debug
.arch
.pc
= kvm_get_linear_rip(vcpu
);
5313 kvm_run
->debug
.arch
.exception
= ex_no
;
5316 if (vmx_guest_inject_ac(vcpu
)) {
5317 kvm_queue_exception_e(vcpu
, AC_VECTOR
, error_code
);
5322 * Handle split lock. Depending on detection mode this will
5323 * either warn and disable split lock detection for this
5324 * task or force SIGBUS on it.
5326 if (handle_guest_split_lock(kvm_rip_read(vcpu
)))
5330 kvm_run
->exit_reason
= KVM_EXIT_EXCEPTION
;
5331 kvm_run
->ex
.exception
= ex_no
;
5332 kvm_run
->ex
.error_code
= error_code
;
5338 static __always_inline
int handle_external_interrupt(struct kvm_vcpu
*vcpu
)
5340 ++vcpu
->stat
.irq_exits
;
5344 static int handle_triple_fault(struct kvm_vcpu
*vcpu
)
5346 vcpu
->run
->exit_reason
= KVM_EXIT_SHUTDOWN
;
5347 vcpu
->mmio_needed
= 0;
5351 static int handle_io(struct kvm_vcpu
*vcpu
)
5353 unsigned long exit_qualification
;
5354 int size
, in
, string
;
5357 exit_qualification
= vmx_get_exit_qual(vcpu
);
5358 string
= (exit_qualification
& 16) != 0;
5360 ++vcpu
->stat
.io_exits
;
5363 return kvm_emulate_instruction(vcpu
, 0);
5365 port
= exit_qualification
>> 16;
5366 size
= (exit_qualification
& 7) + 1;
5367 in
= (exit_qualification
& 8) != 0;
5369 return kvm_fast_pio(vcpu
, size
, port
, in
);
5373 vmx_patch_hypercall(struct kvm_vcpu
*vcpu
, unsigned char *hypercall
)
5376 * Patch in the VMCALL instruction:
5378 hypercall
[0] = 0x0f;
5379 hypercall
[1] = 0x01;
5380 hypercall
[2] = 0xc1;
5383 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5384 static int handle_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long val
)
5386 if (is_guest_mode(vcpu
)) {
5387 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5388 unsigned long orig_val
= val
;
5391 * We get here when L2 changed cr0 in a way that did not change
5392 * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
5393 * but did change L0 shadowed bits. So we first calculate the
5394 * effective cr0 value that L1 would like to write into the
5395 * hardware. It consists of the L2-owned bits from the new
5396 * value combined with the L1-owned bits from L1's guest_cr0.
5398 val
= (val
& ~vmcs12
->cr0_guest_host_mask
) |
5399 (vmcs12
->guest_cr0
& vmcs12
->cr0_guest_host_mask
);
5401 if (kvm_set_cr0(vcpu
, val
))
5403 vmcs_writel(CR0_READ_SHADOW
, orig_val
);
5406 return kvm_set_cr0(vcpu
, val
);
5410 static int handle_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long val
)
5412 if (is_guest_mode(vcpu
)) {
5413 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
5414 unsigned long orig_val
= val
;
5416 /* analogously to handle_set_cr0 */
5417 val
= (val
& ~vmcs12
->cr4_guest_host_mask
) |
5418 (vmcs12
->guest_cr4
& vmcs12
->cr4_guest_host_mask
);
5419 if (kvm_set_cr4(vcpu
, val
))
5421 vmcs_writel(CR4_READ_SHADOW
, orig_val
);
5424 return kvm_set_cr4(vcpu
, val
);
5427 static int handle_desc(struct kvm_vcpu
*vcpu
)
5430 * UMIP emulation relies on intercepting writes to CR4.UMIP, i.e. this
5431 * and other code needs to be updated if UMIP can be guest owned.
5433 BUILD_BUG_ON(KVM_POSSIBLE_CR4_GUEST_BITS
& X86_CR4_UMIP
);
5435 WARN_ON_ONCE(!kvm_is_cr4_bit_set(vcpu
, X86_CR4_UMIP
));
5436 return kvm_emulate_instruction(vcpu
, 0);
5439 static int handle_cr(struct kvm_vcpu
*vcpu
)
5441 unsigned long exit_qualification
, val
;
5447 exit_qualification
= vmx_get_exit_qual(vcpu
);
5448 cr
= exit_qualification
& 15;
5449 reg
= (exit_qualification
>> 8) & 15;
5450 switch ((exit_qualification
>> 4) & 3) {
5451 case 0: /* mov to cr */
5452 val
= kvm_register_read(vcpu
, reg
);
5453 trace_kvm_cr_write(cr
, val
);
5456 err
= handle_set_cr0(vcpu
, val
);
5457 return kvm_complete_insn_gp(vcpu
, err
);
5459 WARN_ON_ONCE(enable_unrestricted_guest
);
5461 err
= kvm_set_cr3(vcpu
, val
);
5462 return kvm_complete_insn_gp(vcpu
, err
);
5464 err
= handle_set_cr4(vcpu
, val
);
5465 return kvm_complete_insn_gp(vcpu
, err
);
5467 u8 cr8_prev
= kvm_get_cr8(vcpu
);
5469 err
= kvm_set_cr8(vcpu
, cr8
);
5470 ret
= kvm_complete_insn_gp(vcpu
, err
);
5471 if (lapic_in_kernel(vcpu
))
5473 if (cr8_prev
<= cr8
)
5476 * TODO: we might be squashing a
5477 * KVM_GUESTDBG_SINGLESTEP-triggered
5478 * KVM_EXIT_DEBUG here.
5480 vcpu
->run
->exit_reason
= KVM_EXIT_SET_TPR
;
5486 KVM_BUG(1, vcpu
->kvm
, "Guest always owns CR0.TS");
5488 case 1: /*mov from cr*/
5491 WARN_ON_ONCE(enable_unrestricted_guest
);
5493 val
= kvm_read_cr3(vcpu
);
5494 kvm_register_write(vcpu
, reg
, val
);
5495 trace_kvm_cr_read(cr
, val
);
5496 return kvm_skip_emulated_instruction(vcpu
);
5498 val
= kvm_get_cr8(vcpu
);
5499 kvm_register_write(vcpu
, reg
, val
);
5500 trace_kvm_cr_read(cr
, val
);
5501 return kvm_skip_emulated_instruction(vcpu
);
5505 val
= (exit_qualification
>> LMSW_SOURCE_DATA_SHIFT
) & 0x0f;
5506 trace_kvm_cr_write(0, (kvm_read_cr0_bits(vcpu
, ~0xful
) | val
));
5507 kvm_lmsw(vcpu
, val
);
5509 return kvm_skip_emulated_instruction(vcpu
);
5513 vcpu
->run
->exit_reason
= 0;
5514 vcpu_unimpl(vcpu
, "unhandled control register: op %d cr %d\n",
5515 (int)(exit_qualification
>> 4) & 3, cr
);
5519 static int handle_dr(struct kvm_vcpu
*vcpu
)
5521 unsigned long exit_qualification
;
5525 exit_qualification
= vmx_get_exit_qual(vcpu
);
5526 dr
= exit_qualification
& DEBUG_REG_ACCESS_NUM
;
5528 /* First, if DR does not exist, trigger UD */
5529 if (!kvm_require_dr(vcpu
, dr
))
5532 if (vmx_get_cpl(vcpu
) > 0)
5535 dr7
= vmcs_readl(GUEST_DR7
);
5538 * As the vm-exit takes precedence over the debug trap, we
5539 * need to emulate the latter, either for the host or the
5540 * guest debugging itself.
5542 if (vcpu
->guest_debug
& KVM_GUESTDBG_USE_HW_BP
) {
5543 vcpu
->run
->debug
.arch
.dr6
= DR6_BD
| DR6_ACTIVE_LOW
;
5544 vcpu
->run
->debug
.arch
.dr7
= dr7
;
5545 vcpu
->run
->debug
.arch
.pc
= kvm_get_linear_rip(vcpu
);
5546 vcpu
->run
->debug
.arch
.exception
= DB_VECTOR
;
5547 vcpu
->run
->exit_reason
= KVM_EXIT_DEBUG
;
5550 kvm_queue_exception_p(vcpu
, DB_VECTOR
, DR6_BD
);
5555 if (vcpu
->guest_debug
== 0) {
5556 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_MOV_DR_EXITING
);
5559 * No more DR vmexits; force a reload of the debug registers
5560 * and reenter on this instruction. The next vmexit will
5561 * retrieve the full state of the debug registers.
5563 vcpu
->arch
.switch_db_regs
|= KVM_DEBUGREG_WONT_EXIT
;
5567 reg
= DEBUG_REG_ACCESS_REG(exit_qualification
);
5568 if (exit_qualification
& TYPE_MOV_FROM_DR
) {
5571 kvm_get_dr(vcpu
, dr
, &val
);
5572 kvm_register_write(vcpu
, reg
, val
);
5575 err
= kvm_set_dr(vcpu
, dr
, kvm_register_read(vcpu
, reg
));
5579 return kvm_complete_insn_gp(vcpu
, err
);
5582 static void vmx_sync_dirty_debug_regs(struct kvm_vcpu
*vcpu
)
5584 get_debugreg(vcpu
->arch
.db
[0], 0);
5585 get_debugreg(vcpu
->arch
.db
[1], 1);
5586 get_debugreg(vcpu
->arch
.db
[2], 2);
5587 get_debugreg(vcpu
->arch
.db
[3], 3);
5588 get_debugreg(vcpu
->arch
.dr6
, 6);
5589 vcpu
->arch
.dr7
= vmcs_readl(GUEST_DR7
);
5591 vcpu
->arch
.switch_db_regs
&= ~KVM_DEBUGREG_WONT_EXIT
;
5592 exec_controls_setbit(to_vmx(vcpu
), CPU_BASED_MOV_DR_EXITING
);
5595 * exc_debug expects dr6 to be cleared after it runs, avoid that it sees
5596 * a stale dr6 from the guest.
5598 set_debugreg(DR6_RESERVED
, 6);
5601 static void vmx_set_dr7(struct kvm_vcpu
*vcpu
, unsigned long val
)
5603 vmcs_writel(GUEST_DR7
, val
);
5606 static int handle_tpr_below_threshold(struct kvm_vcpu
*vcpu
)
5608 kvm_apic_update_ppr(vcpu
);
5612 static int handle_interrupt_window(struct kvm_vcpu
*vcpu
)
5614 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_INTR_WINDOW_EXITING
);
5616 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5618 ++vcpu
->stat
.irq_window_exits
;
5622 static int handle_invlpg(struct kvm_vcpu
*vcpu
)
5624 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5626 kvm_mmu_invlpg(vcpu
, exit_qualification
);
5627 return kvm_skip_emulated_instruction(vcpu
);
5630 static int handle_apic_access(struct kvm_vcpu
*vcpu
)
5632 if (likely(fasteoi
)) {
5633 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5634 int access_type
, offset
;
5636 access_type
= exit_qualification
& APIC_ACCESS_TYPE
;
5637 offset
= exit_qualification
& APIC_ACCESS_OFFSET
;
5639 * Sane guest uses MOV to write EOI, with written value
5640 * not cared. So make a short-circuit here by avoiding
5641 * heavy instruction emulation.
5643 if ((access_type
== TYPE_LINEAR_APIC_INST_WRITE
) &&
5644 (offset
== APIC_EOI
)) {
5645 kvm_lapic_set_eoi(vcpu
);
5646 return kvm_skip_emulated_instruction(vcpu
);
5649 return kvm_emulate_instruction(vcpu
, 0);
5652 static int handle_apic_eoi_induced(struct kvm_vcpu
*vcpu
)
5654 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5655 int vector
= exit_qualification
& 0xff;
5657 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
5658 kvm_apic_set_eoi_accelerated(vcpu
, vector
);
5662 static int handle_apic_write(struct kvm_vcpu
*vcpu
)
5664 unsigned long exit_qualification
= vmx_get_exit_qual(vcpu
);
5667 * APIC-write VM-Exit is trap-like, KVM doesn't need to advance RIP and
5668 * hardware has done any necessary aliasing, offset adjustments, etc...
5669 * for the access. I.e. the correct value has already been written to
5670 * the vAPIC page for the correct 16-byte chunk. KVM needs only to
5671 * retrieve the register value and emulate the access.
5673 u32 offset
= exit_qualification
& 0xff0;
5675 kvm_apic_write_nodecode(vcpu
, offset
);
5679 static int handle_task_switch(struct kvm_vcpu
*vcpu
)
5681 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5682 unsigned long exit_qualification
;
5683 bool has_error_code
= false;
5686 int reason
, type
, idt_v
, idt_index
;
5688 idt_v
= (vmx
->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
);
5689 idt_index
= (vmx
->idt_vectoring_info
& VECTORING_INFO_VECTOR_MASK
);
5690 type
= (vmx
->idt_vectoring_info
& VECTORING_INFO_TYPE_MASK
);
5692 exit_qualification
= vmx_get_exit_qual(vcpu
);
5694 reason
= (u32
)exit_qualification
>> 30;
5695 if (reason
== TASK_SWITCH_GATE
&& idt_v
) {
5697 case INTR_TYPE_NMI_INTR
:
5698 vcpu
->arch
.nmi_injected
= false;
5699 vmx_set_nmi_mask(vcpu
, true);
5701 case INTR_TYPE_EXT_INTR
:
5702 case INTR_TYPE_SOFT_INTR
:
5703 kvm_clear_interrupt_queue(vcpu
);
5705 case INTR_TYPE_HARD_EXCEPTION
:
5706 if (vmx
->idt_vectoring_info
&
5707 VECTORING_INFO_DELIVER_CODE_MASK
) {
5708 has_error_code
= true;
5710 vmcs_read32(IDT_VECTORING_ERROR_CODE
);
5713 case INTR_TYPE_SOFT_EXCEPTION
:
5714 kvm_clear_exception_queue(vcpu
);
5720 tss_selector
= exit_qualification
;
5722 if (!idt_v
|| (type
!= INTR_TYPE_HARD_EXCEPTION
&&
5723 type
!= INTR_TYPE_EXT_INTR
&&
5724 type
!= INTR_TYPE_NMI_INTR
))
5725 WARN_ON(!skip_emulated_instruction(vcpu
));
5728 * TODO: What about debug traps on tss switch?
5729 * Are we supposed to inject them and update dr6?
5731 return kvm_task_switch(vcpu
, tss_selector
,
5732 type
== INTR_TYPE_SOFT_INTR
? idt_index
: -1,
5733 reason
, has_error_code
, error_code
);
5736 static int handle_ept_violation(struct kvm_vcpu
*vcpu
)
5738 unsigned long exit_qualification
;
5742 exit_qualification
= vmx_get_exit_qual(vcpu
);
5745 * EPT violation happened while executing iret from NMI,
5746 * "blocked by NMI" bit has to be set before next VM entry.
5747 * There are errata that may cause this bit to not be set:
5750 if (!(to_vmx(vcpu
)->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
5752 (exit_qualification
& INTR_INFO_UNBLOCK_NMI
))
5753 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
, GUEST_INTR_STATE_NMI
);
5755 gpa
= vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
5756 trace_kvm_page_fault(vcpu
, gpa
, exit_qualification
);
5758 /* Is it a read fault? */
5759 error_code
= (exit_qualification
& EPT_VIOLATION_ACC_READ
)
5760 ? PFERR_USER_MASK
: 0;
5761 /* Is it a write fault? */
5762 error_code
|= (exit_qualification
& EPT_VIOLATION_ACC_WRITE
)
5763 ? PFERR_WRITE_MASK
: 0;
5764 /* Is it a fetch fault? */
5765 error_code
|= (exit_qualification
& EPT_VIOLATION_ACC_INSTR
)
5766 ? PFERR_FETCH_MASK
: 0;
5767 /* ept page table entry is present? */
5768 error_code
|= (exit_qualification
& EPT_VIOLATION_RWX_MASK
)
5769 ? PFERR_PRESENT_MASK
: 0;
5771 error_code
|= (exit_qualification
& EPT_VIOLATION_GVA_TRANSLATED
) != 0 ?
5772 PFERR_GUEST_FINAL_MASK
: PFERR_GUEST_PAGE_MASK
;
5774 vcpu
->arch
.exit_qualification
= exit_qualification
;
5777 * Check that the GPA doesn't exceed physical memory limits, as that is
5778 * a guest page fault. We have to emulate the instruction here, because
5779 * if the illegal address is that of a paging structure, then
5780 * EPT_VIOLATION_ACC_WRITE bit is set. Alternatively, if supported we
5781 * would also use advanced VM-exit information for EPT violations to
5782 * reconstruct the page fault error code.
5784 if (unlikely(allow_smaller_maxphyaddr
&& !kvm_vcpu_is_legal_gpa(vcpu
, gpa
)))
5785 return kvm_emulate_instruction(vcpu
, 0);
5787 return kvm_mmu_page_fault(vcpu
, gpa
, error_code
, NULL
, 0);
5790 static int handle_ept_misconfig(struct kvm_vcpu
*vcpu
)
5794 if (vmx_check_emulate_instruction(vcpu
, EMULTYPE_PF
, NULL
, 0))
5798 * A nested guest cannot optimize MMIO vmexits, because we have an
5799 * nGPA here instead of the required GPA.
5801 gpa
= vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
5802 if (!is_guest_mode(vcpu
) &&
5803 !kvm_io_bus_write(vcpu
, KVM_FAST_MMIO_BUS
, gpa
, 0, NULL
)) {
5804 trace_kvm_fast_mmio(gpa
);
5805 return kvm_skip_emulated_instruction(vcpu
);
5808 return kvm_mmu_page_fault(vcpu
, gpa
, PFERR_RSVD_MASK
, NULL
, 0);
5811 static int handle_nmi_window(struct kvm_vcpu
*vcpu
)
5813 if (KVM_BUG_ON(!enable_vnmi
, vcpu
->kvm
))
5816 exec_controls_clearbit(to_vmx(vcpu
), CPU_BASED_NMI_WINDOW_EXITING
);
5817 ++vcpu
->stat
.nmi_window_exits
;
5818 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
5823 static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu
*vcpu
)
5825 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5827 return vmx
->emulation_required
&& !vmx
->rmode
.vm86_active
&&
5828 (kvm_is_exception_pending(vcpu
) || vcpu
->arch
.exception
.injected
);
5831 static int handle_invalid_guest_state(struct kvm_vcpu
*vcpu
)
5833 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5834 bool intr_window_requested
;
5835 unsigned count
= 130;
5837 intr_window_requested
= exec_controls_get(vmx
) &
5838 CPU_BASED_INTR_WINDOW_EXITING
;
5840 while (vmx
->emulation_required
&& count
-- != 0) {
5841 if (intr_window_requested
&& !vmx_interrupt_blocked(vcpu
))
5842 return handle_interrupt_window(&vmx
->vcpu
);
5844 if (kvm_test_request(KVM_REQ_EVENT
, vcpu
))
5847 if (!kvm_emulate_instruction(vcpu
, 0))
5850 if (vmx_emulation_required_with_pending_exception(vcpu
)) {
5851 kvm_prepare_emulation_failure_exit(vcpu
);
5855 if (vcpu
->arch
.halt_request
) {
5856 vcpu
->arch
.halt_request
= 0;
5857 return kvm_emulate_halt_noskip(vcpu
);
5861 * Note, return 1 and not 0, vcpu_run() will invoke
5862 * xfer_to_guest_mode() which will create a proper return
5865 if (__xfer_to_guest_mode_work_pending())
5872 static int vmx_vcpu_pre_run(struct kvm_vcpu
*vcpu
)
5874 if (vmx_emulation_required_with_pending_exception(vcpu
)) {
5875 kvm_prepare_emulation_failure_exit(vcpu
);
5882 static void grow_ple_window(struct kvm_vcpu
*vcpu
)
5884 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5885 unsigned int old
= vmx
->ple_window
;
5887 vmx
->ple_window
= __grow_ple_window(old
, ple_window
,
5891 if (vmx
->ple_window
!= old
) {
5892 vmx
->ple_window_dirty
= true;
5893 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
5894 vmx
->ple_window
, old
);
5898 static void shrink_ple_window(struct kvm_vcpu
*vcpu
)
5900 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5901 unsigned int old
= vmx
->ple_window
;
5903 vmx
->ple_window
= __shrink_ple_window(old
, ple_window
,
5907 if (vmx
->ple_window
!= old
) {
5908 vmx
->ple_window_dirty
= true;
5909 trace_kvm_ple_window_update(vcpu
->vcpu_id
,
5910 vmx
->ple_window
, old
);
5915 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5916 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5918 static int handle_pause(struct kvm_vcpu
*vcpu
)
5920 if (!kvm_pause_in_guest(vcpu
->kvm
))
5921 grow_ple_window(vcpu
);
5924 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
5925 * VM-execution control is ignored if CPL > 0. OTOH, KVM
5926 * never set PAUSE_EXITING and just set PLE if supported,
5927 * so the vcpu must be CPL=0 if it gets a PAUSE exit.
5929 kvm_vcpu_on_spin(vcpu
, true);
5930 return kvm_skip_emulated_instruction(vcpu
);
5933 static int handle_monitor_trap(struct kvm_vcpu
*vcpu
)
5938 static int handle_invpcid(struct kvm_vcpu
*vcpu
)
5940 u32 vmx_instruction_info
;
5949 if (!guest_cpuid_has(vcpu
, X86_FEATURE_INVPCID
)) {
5950 kvm_queue_exception(vcpu
, UD_VECTOR
);
5954 vmx_instruction_info
= vmcs_read32(VMX_INSTRUCTION_INFO
);
5955 gpr_index
= vmx_get_instr_info_reg2(vmx_instruction_info
);
5956 type
= kvm_register_read(vcpu
, gpr_index
);
5958 /* According to the Intel instruction reference, the memory operand
5959 * is read even if it isn't needed (e.g., for type==all)
5961 if (get_vmx_mem_address(vcpu
, vmx_get_exit_qual(vcpu
),
5962 vmx_instruction_info
, false,
5963 sizeof(operand
), &gva
))
5966 return kvm_handle_invpcid(vcpu
, type
, gva
);
5969 static int handle_pml_full(struct kvm_vcpu
*vcpu
)
5971 unsigned long exit_qualification
;
5973 trace_kvm_pml_full(vcpu
->vcpu_id
);
5975 exit_qualification
= vmx_get_exit_qual(vcpu
);
5978 * PML buffer FULL happened while executing iret from NMI,
5979 * "blocked by NMI" bit has to be set before next VM entry.
5981 if (!(to_vmx(vcpu
)->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
5983 (exit_qualification
& INTR_INFO_UNBLOCK_NMI
))
5984 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
5985 GUEST_INTR_STATE_NMI
);
5988 * PML buffer already flushed at beginning of VMEXIT. Nothing to do
5989 * here.., and there's no userspace involvement needed for PML.
5994 static fastpath_t
handle_fastpath_preemption_timer(struct kvm_vcpu
*vcpu
)
5996 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
5998 if (!vmx
->req_immediate_exit
&&
5999 !unlikely(vmx
->loaded_vmcs
->hv_timer_soft_disabled
)) {
6000 kvm_lapic_expired_hv_timer(vcpu
);
6001 return EXIT_FASTPATH_REENTER_GUEST
;
6004 return EXIT_FASTPATH_NONE
;
6007 static int handle_preemption_timer(struct kvm_vcpu
*vcpu
)
6009 handle_fastpath_preemption_timer(vcpu
);
6014 * When nested=0, all VMX instruction VM Exits filter here. The handlers
6015 * are overwritten by nested_vmx_setup() when nested=1.
6017 static int handle_vmx_instruction(struct kvm_vcpu
*vcpu
)
6019 kvm_queue_exception(vcpu
, UD_VECTOR
);
6023 #ifndef CONFIG_X86_SGX_KVM
6024 static int handle_encls(struct kvm_vcpu
*vcpu
)
6027 * SGX virtualization is disabled. There is no software enable bit for
6028 * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
6029 * the guest from executing ENCLS (when SGX is supported by hardware).
6031 kvm_queue_exception(vcpu
, UD_VECTOR
);
6034 #endif /* CONFIG_X86_SGX_KVM */
6036 static int handle_bus_lock_vmexit(struct kvm_vcpu
*vcpu
)
6039 * Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
6040 * VM-Exits. Unconditionally set the flag here and leave the handling to
6041 * vmx_handle_exit().
6043 to_vmx(vcpu
)->exit_reason
.bus_lock_detected
= true;
6047 static int handle_notify(struct kvm_vcpu
*vcpu
)
6049 unsigned long exit_qual
= vmx_get_exit_qual(vcpu
);
6050 bool context_invalid
= exit_qual
& NOTIFY_VM_CONTEXT_INVALID
;
6052 ++vcpu
->stat
.notify_window_exits
;
6055 * Notify VM exit happened while executing iret from NMI,
6056 * "blocked by NMI" bit has to be set before next VM entry.
6058 if (enable_vnmi
&& (exit_qual
& INTR_INFO_UNBLOCK_NMI
))
6059 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
6060 GUEST_INTR_STATE_NMI
);
6062 if (vcpu
->kvm
->arch
.notify_vmexit_flags
& KVM_X86_NOTIFY_VMEXIT_USER
||
6064 vcpu
->run
->exit_reason
= KVM_EXIT_NOTIFY
;
6065 vcpu
->run
->notify
.flags
= context_invalid
?
6066 KVM_NOTIFY_CONTEXT_INVALID
: 0;
6074 * The exit handlers return 1 if the exit was handled fully and guest execution
6075 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
6076 * to be done to userspace and return 0.
6078 static int (*kvm_vmx_exit_handlers
[])(struct kvm_vcpu
*vcpu
) = {
6079 [EXIT_REASON_EXCEPTION_NMI
] = handle_exception_nmi
,
6080 [EXIT_REASON_EXTERNAL_INTERRUPT
] = handle_external_interrupt
,
6081 [EXIT_REASON_TRIPLE_FAULT
] = handle_triple_fault
,
6082 [EXIT_REASON_NMI_WINDOW
] = handle_nmi_window
,
6083 [EXIT_REASON_IO_INSTRUCTION
] = handle_io
,
6084 [EXIT_REASON_CR_ACCESS
] = handle_cr
,
6085 [EXIT_REASON_DR_ACCESS
] = handle_dr
,
6086 [EXIT_REASON_CPUID
] = kvm_emulate_cpuid
,
6087 [EXIT_REASON_MSR_READ
] = kvm_emulate_rdmsr
,
6088 [EXIT_REASON_MSR_WRITE
] = kvm_emulate_wrmsr
,
6089 [EXIT_REASON_INTERRUPT_WINDOW
] = handle_interrupt_window
,
6090 [EXIT_REASON_HLT
] = kvm_emulate_halt
,
6091 [EXIT_REASON_INVD
] = kvm_emulate_invd
,
6092 [EXIT_REASON_INVLPG
] = handle_invlpg
,
6093 [EXIT_REASON_RDPMC
] = kvm_emulate_rdpmc
,
6094 [EXIT_REASON_VMCALL
] = kvm_emulate_hypercall
,
6095 [EXIT_REASON_VMCLEAR
] = handle_vmx_instruction
,
6096 [EXIT_REASON_VMLAUNCH
] = handle_vmx_instruction
,
6097 [EXIT_REASON_VMPTRLD
] = handle_vmx_instruction
,
6098 [EXIT_REASON_VMPTRST
] = handle_vmx_instruction
,
6099 [EXIT_REASON_VMREAD
] = handle_vmx_instruction
,
6100 [EXIT_REASON_VMRESUME
] = handle_vmx_instruction
,
6101 [EXIT_REASON_VMWRITE
] = handle_vmx_instruction
,
6102 [EXIT_REASON_VMOFF
] = handle_vmx_instruction
,
6103 [EXIT_REASON_VMON
] = handle_vmx_instruction
,
6104 [EXIT_REASON_TPR_BELOW_THRESHOLD
] = handle_tpr_below_threshold
,
6105 [EXIT_REASON_APIC_ACCESS
] = handle_apic_access
,
6106 [EXIT_REASON_APIC_WRITE
] = handle_apic_write
,
6107 [EXIT_REASON_EOI_INDUCED
] = handle_apic_eoi_induced
,
6108 [EXIT_REASON_WBINVD
] = kvm_emulate_wbinvd
,
6109 [EXIT_REASON_XSETBV
] = kvm_emulate_xsetbv
,
6110 [EXIT_REASON_TASK_SWITCH
] = handle_task_switch
,
6111 [EXIT_REASON_MCE_DURING_VMENTRY
] = handle_machine_check
,
6112 [EXIT_REASON_GDTR_IDTR
] = handle_desc
,
6113 [EXIT_REASON_LDTR_TR
] = handle_desc
,
6114 [EXIT_REASON_EPT_VIOLATION
] = handle_ept_violation
,
6115 [EXIT_REASON_EPT_MISCONFIG
] = handle_ept_misconfig
,
6116 [EXIT_REASON_PAUSE_INSTRUCTION
] = handle_pause
,
6117 [EXIT_REASON_MWAIT_INSTRUCTION
] = kvm_emulate_mwait
,
6118 [EXIT_REASON_MONITOR_TRAP_FLAG
] = handle_monitor_trap
,
6119 [EXIT_REASON_MONITOR_INSTRUCTION
] = kvm_emulate_monitor
,
6120 [EXIT_REASON_INVEPT
] = handle_vmx_instruction
,
6121 [EXIT_REASON_INVVPID
] = handle_vmx_instruction
,
6122 [EXIT_REASON_RDRAND
] = kvm_handle_invalid_op
,
6123 [EXIT_REASON_RDSEED
] = kvm_handle_invalid_op
,
6124 [EXIT_REASON_PML_FULL
] = handle_pml_full
,
6125 [EXIT_REASON_INVPCID
] = handle_invpcid
,
6126 [EXIT_REASON_VMFUNC
] = handle_vmx_instruction
,
6127 [EXIT_REASON_PREEMPTION_TIMER
] = handle_preemption_timer
,
6128 [EXIT_REASON_ENCLS
] = handle_encls
,
6129 [EXIT_REASON_BUS_LOCK
] = handle_bus_lock_vmexit
,
6130 [EXIT_REASON_NOTIFY
] = handle_notify
,
6133 static const int kvm_vmx_max_exit_handlers
=
6134 ARRAY_SIZE(kvm_vmx_exit_handlers
);
6136 static void vmx_get_exit_info(struct kvm_vcpu
*vcpu
, u32
*reason
,
6137 u64
*info1
, u64
*info2
,
6138 u32
*intr_info
, u32
*error_code
)
6140 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6142 *reason
= vmx
->exit_reason
.full
;
6143 *info1
= vmx_get_exit_qual(vcpu
);
6144 if (!(vmx
->exit_reason
.failed_vmentry
)) {
6145 *info2
= vmx
->idt_vectoring_info
;
6146 *intr_info
= vmx_get_intr_info(vcpu
);
6147 if (is_exception_with_error_code(*intr_info
))
6148 *error_code
= vmcs_read32(VM_EXIT_INTR_ERROR_CODE
);
6158 static void vmx_destroy_pml_buffer(struct vcpu_vmx
*vmx
)
6161 __free_page(vmx
->pml_pg
);
6166 static void vmx_flush_pml_buffer(struct kvm_vcpu
*vcpu
)
6168 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6172 pml_idx
= vmcs_read16(GUEST_PML_INDEX
);
6174 /* Do nothing if PML buffer is empty */
6175 if (pml_idx
== (PML_ENTITY_NUM
- 1))
6178 /* PML index always points to next available PML buffer entity */
6179 if (pml_idx
>= PML_ENTITY_NUM
)
6184 pml_buf
= page_address(vmx
->pml_pg
);
6185 for (; pml_idx
< PML_ENTITY_NUM
; pml_idx
++) {
6188 gpa
= pml_buf
[pml_idx
];
6189 WARN_ON(gpa
& (PAGE_SIZE
- 1));
6190 kvm_vcpu_mark_page_dirty(vcpu
, gpa
>> PAGE_SHIFT
);
6193 /* reset PML index */
6194 vmcs_write16(GUEST_PML_INDEX
, PML_ENTITY_NUM
- 1);
6197 static void vmx_dump_sel(char *name
, uint32_t sel
)
6199 pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
6200 name
, vmcs_read16(sel
),
6201 vmcs_read32(sel
+ GUEST_ES_AR_BYTES
- GUEST_ES_SELECTOR
),
6202 vmcs_read32(sel
+ GUEST_ES_LIMIT
- GUEST_ES_SELECTOR
),
6203 vmcs_readl(sel
+ GUEST_ES_BASE
- GUEST_ES_SELECTOR
));
6206 static void vmx_dump_dtsel(char *name
, uint32_t limit
)
6208 pr_err("%s limit=0x%08x, base=0x%016lx\n",
6209 name
, vmcs_read32(limit
),
6210 vmcs_readl(limit
+ GUEST_GDTR_BASE
- GUEST_GDTR_LIMIT
));
6213 static void vmx_dump_msrs(char *name
, struct vmx_msrs
*m
)
6216 struct vmx_msr_entry
*e
;
6218 pr_err("MSR %s:\n", name
);
6219 for (i
= 0, e
= m
->val
; i
< m
->nr
; ++i
, ++e
)
6220 pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i
, e
->index
, e
->value
);
6223 void dump_vmcs(struct kvm_vcpu
*vcpu
)
6225 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6226 u32 vmentry_ctl
, vmexit_ctl
;
6227 u32 cpu_based_exec_ctrl
, pin_based_exec_ctrl
, secondary_exec_control
;
6228 u64 tertiary_exec_control
;
6232 if (!dump_invalid_vmcs
) {
6233 pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
6237 vmentry_ctl
= vmcs_read32(VM_ENTRY_CONTROLS
);
6238 vmexit_ctl
= vmcs_read32(VM_EXIT_CONTROLS
);
6239 cpu_based_exec_ctrl
= vmcs_read32(CPU_BASED_VM_EXEC_CONTROL
);
6240 pin_based_exec_ctrl
= vmcs_read32(PIN_BASED_VM_EXEC_CONTROL
);
6241 cr4
= vmcs_readl(GUEST_CR4
);
6243 if (cpu_has_secondary_exec_ctrls())
6244 secondary_exec_control
= vmcs_read32(SECONDARY_VM_EXEC_CONTROL
);
6246 secondary_exec_control
= 0;
6248 if (cpu_has_tertiary_exec_ctrls())
6249 tertiary_exec_control
= vmcs_read64(TERTIARY_VM_EXEC_CONTROL
);
6251 tertiary_exec_control
= 0;
6253 pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
6254 vmx
->loaded_vmcs
->vmcs
, vcpu
->arch
.last_vmentry_cpu
);
6255 pr_err("*** Guest State ***\n");
6256 pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6257 vmcs_readl(GUEST_CR0
), vmcs_readl(CR0_READ_SHADOW
),
6258 vmcs_readl(CR0_GUEST_HOST_MASK
));
6259 pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
6260 cr4
, vmcs_readl(CR4_READ_SHADOW
), vmcs_readl(CR4_GUEST_HOST_MASK
));
6261 pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3
));
6262 if (cpu_has_vmx_ept()) {
6263 pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
6264 vmcs_read64(GUEST_PDPTR0
), vmcs_read64(GUEST_PDPTR1
));
6265 pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
6266 vmcs_read64(GUEST_PDPTR2
), vmcs_read64(GUEST_PDPTR3
));
6268 pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
6269 vmcs_readl(GUEST_RSP
), vmcs_readl(GUEST_RIP
));
6270 pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
6271 vmcs_readl(GUEST_RFLAGS
), vmcs_readl(GUEST_DR7
));
6272 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6273 vmcs_readl(GUEST_SYSENTER_ESP
),
6274 vmcs_read32(GUEST_SYSENTER_CS
), vmcs_readl(GUEST_SYSENTER_EIP
));
6275 vmx_dump_sel("CS: ", GUEST_CS_SELECTOR
);
6276 vmx_dump_sel("DS: ", GUEST_DS_SELECTOR
);
6277 vmx_dump_sel("SS: ", GUEST_SS_SELECTOR
);
6278 vmx_dump_sel("ES: ", GUEST_ES_SELECTOR
);
6279 vmx_dump_sel("FS: ", GUEST_FS_SELECTOR
);
6280 vmx_dump_sel("GS: ", GUEST_GS_SELECTOR
);
6281 vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT
);
6282 vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR
);
6283 vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT
);
6284 vmx_dump_sel("TR: ", GUEST_TR_SELECTOR
);
6285 efer_slot
= vmx_find_loadstore_msr_slot(&vmx
->msr_autoload
.guest
, MSR_EFER
);
6286 if (vmentry_ctl
& VM_ENTRY_LOAD_IA32_EFER
)
6287 pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER
));
6288 else if (efer_slot
>= 0)
6289 pr_err("EFER= 0x%016llx (autoload)\n",
6290 vmx
->msr_autoload
.guest
.val
[efer_slot
].value
);
6291 else if (vmentry_ctl
& VM_ENTRY_IA32E_MODE
)
6292 pr_err("EFER= 0x%016llx (effective)\n",
6293 vcpu
->arch
.efer
| (EFER_LMA
| EFER_LME
));
6295 pr_err("EFER= 0x%016llx (effective)\n",
6296 vcpu
->arch
.efer
& ~(EFER_LMA
| EFER_LME
));
6297 if (vmentry_ctl
& VM_ENTRY_LOAD_IA32_PAT
)
6298 pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT
));
6299 pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
6300 vmcs_read64(GUEST_IA32_DEBUGCTL
),
6301 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS
));
6302 if (cpu_has_load_perf_global_ctrl() &&
6303 vmentry_ctl
& VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
)
6304 pr_err("PerfGlobCtl = 0x%016llx\n",
6305 vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL
));
6306 if (vmentry_ctl
& VM_ENTRY_LOAD_BNDCFGS
)
6307 pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS
));
6308 pr_err("Interruptibility = %08x ActivityState = %08x\n",
6309 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
),
6310 vmcs_read32(GUEST_ACTIVITY_STATE
));
6311 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
)
6312 pr_err("InterruptStatus = %04x\n",
6313 vmcs_read16(GUEST_INTR_STATUS
));
6314 if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT
) > 0)
6315 vmx_dump_msrs("guest autoload", &vmx
->msr_autoload
.guest
);
6316 if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT
) > 0)
6317 vmx_dump_msrs("guest autostore", &vmx
->msr_autostore
.guest
);
6319 pr_err("*** Host State ***\n");
6320 pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
6321 vmcs_readl(HOST_RIP
), vmcs_readl(HOST_RSP
));
6322 pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
6323 vmcs_read16(HOST_CS_SELECTOR
), vmcs_read16(HOST_SS_SELECTOR
),
6324 vmcs_read16(HOST_DS_SELECTOR
), vmcs_read16(HOST_ES_SELECTOR
),
6325 vmcs_read16(HOST_FS_SELECTOR
), vmcs_read16(HOST_GS_SELECTOR
),
6326 vmcs_read16(HOST_TR_SELECTOR
));
6327 pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
6328 vmcs_readl(HOST_FS_BASE
), vmcs_readl(HOST_GS_BASE
),
6329 vmcs_readl(HOST_TR_BASE
));
6330 pr_err("GDTBase=%016lx IDTBase=%016lx\n",
6331 vmcs_readl(HOST_GDTR_BASE
), vmcs_readl(HOST_IDTR_BASE
));
6332 pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
6333 vmcs_readl(HOST_CR0
), vmcs_readl(HOST_CR3
),
6334 vmcs_readl(HOST_CR4
));
6335 pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
6336 vmcs_readl(HOST_IA32_SYSENTER_ESP
),
6337 vmcs_read32(HOST_IA32_SYSENTER_CS
),
6338 vmcs_readl(HOST_IA32_SYSENTER_EIP
));
6339 if (vmexit_ctl
& VM_EXIT_LOAD_IA32_EFER
)
6340 pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER
));
6341 if (vmexit_ctl
& VM_EXIT_LOAD_IA32_PAT
)
6342 pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT
));
6343 if (cpu_has_load_perf_global_ctrl() &&
6344 vmexit_ctl
& VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
)
6345 pr_err("PerfGlobCtl = 0x%016llx\n",
6346 vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL
));
6347 if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT
) > 0)
6348 vmx_dump_msrs("host autoload", &vmx
->msr_autoload
.host
);
6350 pr_err("*** Control State ***\n");
6351 pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
6352 cpu_based_exec_ctrl
, secondary_exec_control
, tertiary_exec_control
);
6353 pr_err("PinBased=0x%08x EntryControls=%08x ExitControls=%08x\n",
6354 pin_based_exec_ctrl
, vmentry_ctl
, vmexit_ctl
);
6355 pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
6356 vmcs_read32(EXCEPTION_BITMAP
),
6357 vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK
),
6358 vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH
));
6359 pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
6360 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD
),
6361 vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE
),
6362 vmcs_read32(VM_ENTRY_INSTRUCTION_LEN
));
6363 pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
6364 vmcs_read32(VM_EXIT_INTR_INFO
),
6365 vmcs_read32(VM_EXIT_INTR_ERROR_CODE
),
6366 vmcs_read32(VM_EXIT_INSTRUCTION_LEN
));
6367 pr_err(" reason=%08x qualification=%016lx\n",
6368 vmcs_read32(VM_EXIT_REASON
), vmcs_readl(EXIT_QUALIFICATION
));
6369 pr_err("IDTVectoring: info=%08x errcode=%08x\n",
6370 vmcs_read32(IDT_VECTORING_INFO_FIELD
),
6371 vmcs_read32(IDT_VECTORING_ERROR_CODE
));
6372 pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET
));
6373 if (secondary_exec_control
& SECONDARY_EXEC_TSC_SCALING
)
6374 pr_err("TSC Multiplier = 0x%016llx\n",
6375 vmcs_read64(TSC_MULTIPLIER
));
6376 if (cpu_based_exec_ctrl
& CPU_BASED_TPR_SHADOW
) {
6377 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY
) {
6378 u16 status
= vmcs_read16(GUEST_INTR_STATUS
);
6379 pr_err("SVI|RVI = %02x|%02x ", status
>> 8, status
& 0xff);
6381 pr_cont("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD
));
6382 if (secondary_exec_control
& SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
)
6383 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR
));
6384 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR
));
6386 if (pin_based_exec_ctrl
& PIN_BASED_POSTED_INTR
)
6387 pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV
));
6388 if ((secondary_exec_control
& SECONDARY_EXEC_ENABLE_EPT
))
6389 pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER
));
6390 if (secondary_exec_control
& SECONDARY_EXEC_PAUSE_LOOP_EXITING
)
6391 pr_err("PLE Gap=%08x Window=%08x\n",
6392 vmcs_read32(PLE_GAP
), vmcs_read32(PLE_WINDOW
));
6393 if (secondary_exec_control
& SECONDARY_EXEC_ENABLE_VPID
)
6394 pr_err("Virtual processor ID = 0x%04x\n",
6395 vmcs_read16(VIRTUAL_PROCESSOR_ID
));
6399 * The guest has exited. See if we can fix it or if we need userspace
6402 static int __vmx_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
6404 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6405 union vmx_exit_reason exit_reason
= vmx
->exit_reason
;
6406 u32 vectoring_info
= vmx
->idt_vectoring_info
;
6407 u16 exit_handler_index
;
6410 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
6411 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
6412 * querying dirty_bitmap, we only need to kick all vcpus out of guest
6413 * mode as if vcpus is in root mode, the PML buffer must has been
6414 * flushed already. Note, PML is never enabled in hardware while
6417 if (enable_pml
&& !is_guest_mode(vcpu
))
6418 vmx_flush_pml_buffer(vcpu
);
6421 * KVM should never reach this point with a pending nested VM-Enter.
6422 * More specifically, short-circuiting VM-Entry to emulate L2 due to
6423 * invalid guest state should never happen as that means KVM knowingly
6424 * allowed a nested VM-Enter with an invalid vmcs12. More below.
6426 if (KVM_BUG_ON(vmx
->nested
.nested_run_pending
, vcpu
->kvm
))
6429 if (is_guest_mode(vcpu
)) {
6431 * PML is never enabled when running L2, bail immediately if a
6432 * PML full exit occurs as something is horribly wrong.
6434 if (exit_reason
.basic
== EXIT_REASON_PML_FULL
)
6435 goto unexpected_vmexit
;
6438 * The host physical addresses of some pages of guest memory
6439 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
6440 * Page). The CPU may write to these pages via their host
6441 * physical address while L2 is running, bypassing any
6442 * address-translation-based dirty tracking (e.g. EPT write
6445 * Mark them dirty on every exit from L2 to prevent them from
6446 * getting out of sync with dirty tracking.
6448 nested_mark_vmcs12_pages_dirty(vcpu
);
6451 * Synthesize a triple fault if L2 state is invalid. In normal
6452 * operation, nested VM-Enter rejects any attempt to enter L2
6453 * with invalid state. However, those checks are skipped if
6454 * state is being stuffed via RSM or KVM_SET_NESTED_STATE. If
6455 * L2 state is invalid, it means either L1 modified SMRAM state
6456 * or userspace provided bad state. Synthesize TRIPLE_FAULT as
6457 * doing so is architecturally allowed in the RSM case, and is
6458 * the least awful solution for the userspace case without
6459 * risking false positives.
6461 if (vmx
->emulation_required
) {
6462 nested_vmx_vmexit(vcpu
, EXIT_REASON_TRIPLE_FAULT
, 0, 0);
6466 if (nested_vmx_reflect_vmexit(vcpu
))
6470 /* If guest state is invalid, start emulating. L2 is handled above. */
6471 if (vmx
->emulation_required
)
6472 return handle_invalid_guest_state(vcpu
);
6474 if (exit_reason
.failed_vmentry
) {
6476 vcpu
->run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
6477 vcpu
->run
->fail_entry
.hardware_entry_failure_reason
6479 vcpu
->run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
6483 if (unlikely(vmx
->fail
)) {
6485 vcpu
->run
->exit_reason
= KVM_EXIT_FAIL_ENTRY
;
6486 vcpu
->run
->fail_entry
.hardware_entry_failure_reason
6487 = vmcs_read32(VM_INSTRUCTION_ERROR
);
6488 vcpu
->run
->fail_entry
.cpu
= vcpu
->arch
.last_vmentry_cpu
;
6494 * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6495 * delivery event since it indicates guest is accessing MMIO.
6496 * The vm-exit can be triggered again after return to guest that
6497 * will cause infinite loop.
6499 if ((vectoring_info
& VECTORING_INFO_VALID_MASK
) &&
6500 (exit_reason
.basic
!= EXIT_REASON_EXCEPTION_NMI
&&
6501 exit_reason
.basic
!= EXIT_REASON_EPT_VIOLATION
&&
6502 exit_reason
.basic
!= EXIT_REASON_PML_FULL
&&
6503 exit_reason
.basic
!= EXIT_REASON_APIC_ACCESS
&&
6504 exit_reason
.basic
!= EXIT_REASON_TASK_SWITCH
&&
6505 exit_reason
.basic
!= EXIT_REASON_NOTIFY
)) {
6508 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
6509 vcpu
->run
->internal
.suberror
= KVM_INTERNAL_ERROR_DELIVERY_EV
;
6510 vcpu
->run
->internal
.data
[0] = vectoring_info
;
6511 vcpu
->run
->internal
.data
[1] = exit_reason
.full
;
6512 vcpu
->run
->internal
.data
[2] = vcpu
->arch
.exit_qualification
;
6513 if (exit_reason
.basic
== EXIT_REASON_EPT_MISCONFIG
) {
6514 vcpu
->run
->internal
.data
[ndata
++] =
6515 vmcs_read64(GUEST_PHYSICAL_ADDRESS
);
6517 vcpu
->run
->internal
.data
[ndata
++] = vcpu
->arch
.last_vmentry_cpu
;
6518 vcpu
->run
->internal
.ndata
= ndata
;
6522 if (unlikely(!enable_vnmi
&&
6523 vmx
->loaded_vmcs
->soft_vnmi_blocked
)) {
6524 if (!vmx_interrupt_blocked(vcpu
)) {
6525 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 0;
6526 } else if (vmx
->loaded_vmcs
->vnmi_blocked_time
> 1000000000LL &&
6527 vcpu
->arch
.nmi_pending
) {
6529 * This CPU don't support us in finding the end of an
6530 * NMI-blocked window if the guest runs with IRQs
6531 * disabled. So we pull the trigger after 1 s of
6532 * futile waiting, but inform the user about this.
6534 printk(KERN_WARNING
"%s: Breaking out of NMI-blocked "
6535 "state on VCPU %d after 1 s timeout\n",
6536 __func__
, vcpu
->vcpu_id
);
6537 vmx
->loaded_vmcs
->soft_vnmi_blocked
= 0;
6541 if (exit_fastpath
!= EXIT_FASTPATH_NONE
)
6544 if (exit_reason
.basic
>= kvm_vmx_max_exit_handlers
)
6545 goto unexpected_vmexit
;
6546 #ifdef CONFIG_MITIGATION_RETPOLINE
6547 if (exit_reason
.basic
== EXIT_REASON_MSR_WRITE
)
6548 return kvm_emulate_wrmsr(vcpu
);
6549 else if (exit_reason
.basic
== EXIT_REASON_PREEMPTION_TIMER
)
6550 return handle_preemption_timer(vcpu
);
6551 else if (exit_reason
.basic
== EXIT_REASON_INTERRUPT_WINDOW
)
6552 return handle_interrupt_window(vcpu
);
6553 else if (exit_reason
.basic
== EXIT_REASON_EXTERNAL_INTERRUPT
)
6554 return handle_external_interrupt(vcpu
);
6555 else if (exit_reason
.basic
== EXIT_REASON_HLT
)
6556 return kvm_emulate_halt(vcpu
);
6557 else if (exit_reason
.basic
== EXIT_REASON_EPT_MISCONFIG
)
6558 return handle_ept_misconfig(vcpu
);
6561 exit_handler_index
= array_index_nospec((u16
)exit_reason
.basic
,
6562 kvm_vmx_max_exit_handlers
);
6563 if (!kvm_vmx_exit_handlers
[exit_handler_index
])
6564 goto unexpected_vmexit
;
6566 return kvm_vmx_exit_handlers
[exit_handler_index
](vcpu
);
6569 vcpu_unimpl(vcpu
, "vmx: unexpected exit reason 0x%x\n",
6572 vcpu
->run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
6573 vcpu
->run
->internal
.suberror
=
6574 KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON
;
6575 vcpu
->run
->internal
.ndata
= 2;
6576 vcpu
->run
->internal
.data
[0] = exit_reason
.full
;
6577 vcpu
->run
->internal
.data
[1] = vcpu
->arch
.last_vmentry_cpu
;
6581 static int vmx_handle_exit(struct kvm_vcpu
*vcpu
, fastpath_t exit_fastpath
)
6583 int ret
= __vmx_handle_exit(vcpu
, exit_fastpath
);
6586 * Exit to user space when bus lock detected to inform that there is
6587 * a bus lock in guest.
6589 if (to_vmx(vcpu
)->exit_reason
.bus_lock_detected
) {
6591 vcpu
->run
->exit_reason
= KVM_EXIT_X86_BUS_LOCK
;
6593 vcpu
->run
->flags
|= KVM_RUN_X86_BUS_LOCK
;
6600 * Software based L1D cache flush which is used when microcode providing
6601 * the cache control MSR is not loaded.
6603 * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
6604 * flush it is required to read in 64 KiB because the replacement algorithm
6605 * is not exactly LRU. This could be sized at runtime via topology
6606 * information but as all relevant affected CPUs have 32KiB L1D cache size
6607 * there is no point in doing so.
6609 static noinstr
void vmx_l1d_flush(struct kvm_vcpu
*vcpu
)
6611 int size
= PAGE_SIZE
<< L1D_CACHE_ORDER
;
6614 * This code is only executed when the flush mode is 'cond' or
6617 if (static_branch_likely(&vmx_l1d_flush_cond
)) {
6621 * Clear the per-vcpu flush bit, it gets set again
6622 * either from vcpu_run() or from one of the unsafe
6625 flush_l1d
= vcpu
->arch
.l1tf_flush_l1d
;
6626 vcpu
->arch
.l1tf_flush_l1d
= false;
6629 * Clear the per-cpu flush bit, it gets set again from
6630 * the interrupt handlers.
6632 flush_l1d
|= kvm_get_cpu_l1tf_flush_l1d();
6633 kvm_clear_cpu_l1tf_flush_l1d();
6639 vcpu
->stat
.l1d_flush
++;
6641 if (static_cpu_has(X86_FEATURE_FLUSH_L1D
)) {
6642 native_wrmsrl(MSR_IA32_FLUSH_CMD
, L1D_FLUSH
);
6647 /* First ensure the pages are in the TLB */
6648 "xorl %%eax, %%eax\n"
6649 ".Lpopulate_tlb:\n\t"
6650 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6651 "addl $4096, %%eax\n\t"
6652 "cmpl %%eax, %[size]\n\t"
6653 "jne .Lpopulate_tlb\n\t"
6654 "xorl %%eax, %%eax\n\t"
6656 /* Now fill the cache */
6657 "xorl %%eax, %%eax\n"
6659 "movzbl (%[flush_pages], %%" _ASM_AX
"), %%ecx\n\t"
6660 "addl $64, %%eax\n\t"
6661 "cmpl %%eax, %[size]\n\t"
6662 "jne .Lfill_cache\n\t"
6664 :: [flush_pages
] "r" (vmx_l1d_flush_pages
),
6666 : "eax", "ebx", "ecx", "edx");
6669 static void vmx_update_cr8_intercept(struct kvm_vcpu
*vcpu
, int tpr
, int irr
)
6671 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
6674 if (is_guest_mode(vcpu
) &&
6675 nested_cpu_has(vmcs12
, CPU_BASED_TPR_SHADOW
))
6678 tpr_threshold
= (irr
== -1 || tpr
< irr
) ? 0 : irr
;
6679 if (is_guest_mode(vcpu
))
6680 to_vmx(vcpu
)->nested
.l1_tpr_threshold
= tpr_threshold
;
6682 vmcs_write32(TPR_THRESHOLD
, tpr_threshold
);
6685 void vmx_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
)
6687 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6688 u32 sec_exec_control
;
6690 if (!lapic_in_kernel(vcpu
))
6693 if (!flexpriority_enabled
&&
6694 !cpu_has_vmx_virtualize_x2apic_mode())
6697 /* Postpone execution until vmcs01 is the current VMCS. */
6698 if (is_guest_mode(vcpu
)) {
6699 vmx
->nested
.change_vmcs01_virtual_apic_mode
= true;
6703 sec_exec_control
= secondary_exec_controls_get(vmx
);
6704 sec_exec_control
&= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
6705 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
);
6707 switch (kvm_get_apic_mode(vcpu
)) {
6708 case LAPIC_MODE_INVALID
:
6709 WARN_ONCE(true, "Invalid local APIC state");
6711 case LAPIC_MODE_DISABLED
:
6713 case LAPIC_MODE_XAPIC
:
6714 if (flexpriority_enabled
) {
6716 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
6717 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
6720 * Flush the TLB, reloading the APIC access page will
6721 * only do so if its physical address has changed, but
6722 * the guest may have inserted a non-APIC mapping into
6723 * the TLB while the APIC access page was disabled.
6725 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
6728 case LAPIC_MODE_X2APIC
:
6729 if (cpu_has_vmx_virtualize_x2apic_mode())
6731 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
;
6734 secondary_exec_controls_set(vmx
, sec_exec_control
);
6736 vmx_update_msr_bitmap_x2apic(vcpu
);
6739 static void vmx_set_apic_access_page_addr(struct kvm_vcpu
*vcpu
)
6741 const gfn_t gfn
= APIC_DEFAULT_PHYS_BASE
>> PAGE_SHIFT
;
6742 struct kvm
*kvm
= vcpu
->kvm
;
6743 struct kvm_memslots
*slots
= kvm_memslots(kvm
);
6744 struct kvm_memory_slot
*slot
;
6745 unsigned long mmu_seq
;
6748 /* Defer reload until vmcs01 is the current VMCS. */
6749 if (is_guest_mode(vcpu
)) {
6750 to_vmx(vcpu
)->nested
.reload_vmcs01_apic_access_page
= true;
6754 if (!(secondary_exec_controls_get(to_vmx(vcpu
)) &
6755 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
))
6759 * Explicitly grab the memslot using KVM's internal slot ID to ensure
6760 * KVM doesn't unintentionally grab a userspace memslot. It _should_
6761 * be impossible for userspace to create a memslot for the APIC when
6762 * APICv is enabled, but paranoia won't hurt in this case.
6764 slot
= id_to_memslot(slots
, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
);
6765 if (!slot
|| slot
->flags
& KVM_MEMSLOT_INVALID
)
6769 * Ensure that the mmu_notifier sequence count is read before KVM
6770 * retrieves the pfn from the primary MMU. Note, the memslot is
6771 * protected by SRCU, not the mmu_notifier. Pairs with the smp_wmb()
6772 * in kvm_mmu_invalidate_end().
6774 mmu_seq
= kvm
->mmu_invalidate_seq
;
6778 * No need to retry if the memslot does not exist or is invalid. KVM
6779 * controls the APIC-access page memslot, and only deletes the memslot
6780 * if APICv is permanently inhibited, i.e. the memslot won't reappear.
6782 pfn
= gfn_to_pfn_memslot(slot
, gfn
);
6783 if (is_error_noslot_pfn(pfn
))
6786 read_lock(&vcpu
->kvm
->mmu_lock
);
6787 if (mmu_invalidate_retry_gfn(kvm
, mmu_seq
, gfn
)) {
6788 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD
, vcpu
);
6789 read_unlock(&vcpu
->kvm
->mmu_lock
);
6793 vmcs_write64(APIC_ACCESS_ADDR
, pfn_to_hpa(pfn
));
6794 read_unlock(&vcpu
->kvm
->mmu_lock
);
6797 * No need for a manual TLB flush at this point, KVM has already done a
6798 * flush if there were SPTEs pointing at the previous page.
6802 * Do not pin apic access page in memory, the MMU notifier
6803 * will call us again if it is migrated or swapped out.
6805 kvm_release_pfn_clean(pfn
);
6808 static void vmx_hwapic_isr_update(int max_isr
)
6816 status
= vmcs_read16(GUEST_INTR_STATUS
);
6818 if (max_isr
!= old
) {
6820 status
|= max_isr
<< 8;
6821 vmcs_write16(GUEST_INTR_STATUS
, status
);
6825 static void vmx_set_rvi(int vector
)
6833 status
= vmcs_read16(GUEST_INTR_STATUS
);
6834 old
= (u8
)status
& 0xff;
6835 if ((u8
)vector
!= old
) {
6837 status
|= (u8
)vector
;
6838 vmcs_write16(GUEST_INTR_STATUS
, status
);
6842 static void vmx_hwapic_irr_update(struct kvm_vcpu
*vcpu
, int max_irr
)
6845 * When running L2, updating RVI is only relevant when
6846 * vmcs12 virtual-interrupt-delivery enabled.
6847 * However, it can be enabled only when L1 also
6848 * intercepts external-interrupts and in that case
6849 * we should not update vmcs02 RVI but instead intercept
6850 * interrupt. Therefore, do nothing when running L2.
6852 if (!is_guest_mode(vcpu
))
6853 vmx_set_rvi(max_irr
);
6856 static int vmx_sync_pir_to_irr(struct kvm_vcpu
*vcpu
)
6858 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6860 bool got_posted_interrupt
;
6862 if (KVM_BUG_ON(!enable_apicv
, vcpu
->kvm
))
6865 if (pi_test_on(&vmx
->pi_desc
)) {
6866 pi_clear_on(&vmx
->pi_desc
);
6868 * IOMMU can write to PID.ON, so the barrier matters even on UP.
6869 * But on x86 this is just a compiler barrier anyway.
6871 smp_mb__after_atomic();
6872 got_posted_interrupt
=
6873 kvm_apic_update_irr(vcpu
, vmx
->pi_desc
.pir
, &max_irr
);
6875 max_irr
= kvm_lapic_find_highest_irr(vcpu
);
6876 got_posted_interrupt
= false;
6880 * Newly recognized interrupts are injected via either virtual interrupt
6881 * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
6882 * disabled in two cases:
6884 * 1) If L2 is running and the vCPU has a new pending interrupt. If L1
6885 * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
6886 * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
6887 * into L2, but KVM doesn't use virtual interrupt delivery to inject
6888 * interrupts into L2, and so KVM_REQ_EVENT is again needed.
6890 * 2) If APICv is disabled for this vCPU, assigned devices may still
6891 * attempt to post interrupts. The posted interrupt vector will cause
6892 * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
6894 if (!is_guest_mode(vcpu
) && kvm_vcpu_apicv_active(vcpu
))
6895 vmx_set_rvi(max_irr
);
6896 else if (got_posted_interrupt
)
6897 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
6902 static void vmx_load_eoi_exitmap(struct kvm_vcpu
*vcpu
, u64
*eoi_exit_bitmap
)
6904 if (!kvm_vcpu_apicv_active(vcpu
))
6907 vmcs_write64(EOI_EXIT_BITMAP0
, eoi_exit_bitmap
[0]);
6908 vmcs_write64(EOI_EXIT_BITMAP1
, eoi_exit_bitmap
[1]);
6909 vmcs_write64(EOI_EXIT_BITMAP2
, eoi_exit_bitmap
[2]);
6910 vmcs_write64(EOI_EXIT_BITMAP3
, eoi_exit_bitmap
[3]);
6913 static void vmx_apicv_pre_state_restore(struct kvm_vcpu
*vcpu
)
6915 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6917 pi_clear_on(&vmx
->pi_desc
);
6918 memset(vmx
->pi_desc
.pir
, 0, sizeof(vmx
->pi_desc
.pir
));
6921 void vmx_do_interrupt_irqoff(unsigned long entry
);
6922 void vmx_do_nmi_irqoff(void);
6924 static void handle_nm_fault_irqoff(struct kvm_vcpu
*vcpu
)
6927 * Save xfd_err to guest_fpu before interrupt is enabled, so the
6928 * MSR value is not clobbered by the host activity before the guest
6929 * has chance to consume it.
6931 * Do not blindly read xfd_err here, since this exception might
6932 * be caused by L1 interception on a platform which doesn't
6933 * support xfd at all.
6935 * Do it conditionally upon guest_fpu::xfd. xfd_err matters
6936 * only when xfd contains a non-zero value.
6938 * Queuing exception is done in vmx_handle_exit. See comment there.
6940 if (vcpu
->arch
.guest_fpu
.fpstate
->xfd
)
6941 rdmsrl(MSR_IA32_XFD_ERR
, vcpu
->arch
.guest_fpu
.xfd_err
);
6944 static void handle_exception_irqoff(struct vcpu_vmx
*vmx
)
6946 u32 intr_info
= vmx_get_intr_info(&vmx
->vcpu
);
6948 /* if exit due to PF check for async PF */
6949 if (is_page_fault(intr_info
))
6950 vmx
->vcpu
.arch
.apf
.host_apf_flags
= kvm_read_and_reset_apf_flags();
6951 /* if exit due to NM, handle before interrupts are enabled */
6952 else if (is_nm_fault(intr_info
))
6953 handle_nm_fault_irqoff(&vmx
->vcpu
);
6954 /* Handle machine checks before interrupts are enabled */
6955 else if (is_machine_check(intr_info
))
6956 kvm_machine_check();
6959 static void handle_external_interrupt_irqoff(struct kvm_vcpu
*vcpu
)
6961 u32 intr_info
= vmx_get_intr_info(vcpu
);
6962 unsigned int vector
= intr_info
& INTR_INFO_VECTOR_MASK
;
6963 gate_desc
*desc
= (gate_desc
*)host_idt_base
+ vector
;
6965 if (KVM_BUG(!is_external_intr(intr_info
), vcpu
->kvm
,
6966 "unexpected VM-Exit interrupt info: 0x%x", intr_info
))
6969 kvm_before_interrupt(vcpu
, KVM_HANDLING_IRQ
);
6970 vmx_do_interrupt_irqoff(gate_offset(desc
));
6971 kvm_after_interrupt(vcpu
);
6973 vcpu
->arch
.at_instruction_boundary
= true;
6976 static void vmx_handle_exit_irqoff(struct kvm_vcpu
*vcpu
)
6978 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
6980 if (vmx
->emulation_required
)
6983 if (vmx
->exit_reason
.basic
== EXIT_REASON_EXTERNAL_INTERRUPT
)
6984 handle_external_interrupt_irqoff(vcpu
);
6985 else if (vmx
->exit_reason
.basic
== EXIT_REASON_EXCEPTION_NMI
)
6986 handle_exception_irqoff(vmx
);
6990 * The kvm parameter can be NULL (module initialization, or invocation before
6991 * VM creation). Be sure to check the kvm parameter before using it.
6993 static bool vmx_has_emulated_msr(struct kvm
*kvm
, u32 index
)
6996 case MSR_IA32_SMBASE
:
6997 if (!IS_ENABLED(CONFIG_KVM_SMM
))
7000 * We cannot do SMM unless we can run the guest in big
7003 return enable_unrestricted_guest
|| emulate_invalid_guest_state
;
7004 case KVM_FIRST_EMULATED_VMX_MSR
... KVM_LAST_EMULATED_VMX_MSR
:
7006 case MSR_AMD64_VIRT_SPEC_CTRL
:
7007 case MSR_AMD64_TSC_RATIO
:
7008 /* This is AMD only. */
7015 static void vmx_recover_nmi_blocking(struct vcpu_vmx
*vmx
)
7020 bool idtv_info_valid
;
7022 idtv_info_valid
= vmx
->idt_vectoring_info
& VECTORING_INFO_VALID_MASK
;
7025 if (vmx
->loaded_vmcs
->nmi_known_unmasked
)
7028 exit_intr_info
= vmx_get_intr_info(&vmx
->vcpu
);
7029 unblock_nmi
= (exit_intr_info
& INTR_INFO_UNBLOCK_NMI
) != 0;
7030 vector
= exit_intr_info
& INTR_INFO_VECTOR_MASK
;
7032 * SDM 3: 27.7.1.2 (September 2008)
7033 * Re-set bit "block by NMI" before VM entry if vmexit caused by
7034 * a guest IRET fault.
7035 * SDM 3: 23.2.2 (September 2008)
7036 * Bit 12 is undefined in any of the following cases:
7037 * If the VM exit sets the valid bit in the IDT-vectoring
7038 * information field.
7039 * If the VM exit is due to a double fault.
7041 if ((exit_intr_info
& INTR_INFO_VALID_MASK
) && unblock_nmi
&&
7042 vector
!= DF_VECTOR
&& !idtv_info_valid
)
7043 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO
,
7044 GUEST_INTR_STATE_NMI
);
7046 vmx
->loaded_vmcs
->nmi_known_unmasked
=
7047 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO
)
7048 & GUEST_INTR_STATE_NMI
);
7049 } else if (unlikely(vmx
->loaded_vmcs
->soft_vnmi_blocked
))
7050 vmx
->loaded_vmcs
->vnmi_blocked_time
+=
7051 ktime_to_ns(ktime_sub(ktime_get(),
7052 vmx
->loaded_vmcs
->entry_time
));
7055 static void __vmx_complete_interrupts(struct kvm_vcpu
*vcpu
,
7056 u32 idt_vectoring_info
,
7057 int instr_len_field
,
7058 int error_code_field
)
7062 bool idtv_info_valid
;
7064 idtv_info_valid
= idt_vectoring_info
& VECTORING_INFO_VALID_MASK
;
7066 vcpu
->arch
.nmi_injected
= false;
7067 kvm_clear_exception_queue(vcpu
);
7068 kvm_clear_interrupt_queue(vcpu
);
7070 if (!idtv_info_valid
)
7073 kvm_make_request(KVM_REQ_EVENT
, vcpu
);
7075 vector
= idt_vectoring_info
& VECTORING_INFO_VECTOR_MASK
;
7076 type
= idt_vectoring_info
& VECTORING_INFO_TYPE_MASK
;
7079 case INTR_TYPE_NMI_INTR
:
7080 vcpu
->arch
.nmi_injected
= true;
7082 * SDM 3: 27.7.1.2 (September 2008)
7083 * Clear bit "block by NMI" before VM entry if a NMI
7086 vmx_set_nmi_mask(vcpu
, false);
7088 case INTR_TYPE_SOFT_EXCEPTION
:
7089 vcpu
->arch
.event_exit_inst_len
= vmcs_read32(instr_len_field
);
7091 case INTR_TYPE_HARD_EXCEPTION
:
7092 if (idt_vectoring_info
& VECTORING_INFO_DELIVER_CODE_MASK
) {
7093 u32 err
= vmcs_read32(error_code_field
);
7094 kvm_requeue_exception_e(vcpu
, vector
, err
);
7096 kvm_requeue_exception(vcpu
, vector
);
7098 case INTR_TYPE_SOFT_INTR
:
7099 vcpu
->arch
.event_exit_inst_len
= vmcs_read32(instr_len_field
);
7101 case INTR_TYPE_EXT_INTR
:
7102 kvm_queue_interrupt(vcpu
, vector
, type
== INTR_TYPE_SOFT_INTR
);
7109 static void vmx_complete_interrupts(struct vcpu_vmx
*vmx
)
7111 __vmx_complete_interrupts(&vmx
->vcpu
, vmx
->idt_vectoring_info
,
7112 VM_EXIT_INSTRUCTION_LEN
,
7113 IDT_VECTORING_ERROR_CODE
);
7116 static void vmx_cancel_injection(struct kvm_vcpu
*vcpu
)
7118 __vmx_complete_interrupts(vcpu
,
7119 vmcs_read32(VM_ENTRY_INTR_INFO_FIELD
),
7120 VM_ENTRY_INSTRUCTION_LEN
,
7121 VM_ENTRY_EXCEPTION_ERROR_CODE
);
7123 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD
, 0);
7126 static void atomic_switch_perf_msrs(struct vcpu_vmx
*vmx
)
7129 struct perf_guest_switch_msr
*msrs
;
7130 struct kvm_pmu
*pmu
= vcpu_to_pmu(&vmx
->vcpu
);
7132 pmu
->host_cross_mapped_mask
= 0;
7133 if (pmu
->pebs_enable
& pmu
->global_ctrl
)
7134 intel_pmu_cross_mapped_check(pmu
);
7136 /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
7137 msrs
= perf_guest_get_msrs(&nr_msrs
, (void *)pmu
);
7141 for (i
= 0; i
< nr_msrs
; i
++)
7142 if (msrs
[i
].host
== msrs
[i
].guest
)
7143 clear_atomic_switch_msr(vmx
, msrs
[i
].msr
);
7145 add_atomic_switch_msr(vmx
, msrs
[i
].msr
, msrs
[i
].guest
,
7146 msrs
[i
].host
, false);
7149 static void vmx_update_hv_timer(struct kvm_vcpu
*vcpu
)
7151 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7155 if (vmx
->req_immediate_exit
) {
7156 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, 0);
7157 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= false;
7158 } else if (vmx
->hv_deadline_tsc
!= -1) {
7160 if (vmx
->hv_deadline_tsc
> tscl
)
7161 /* set_hv_timer ensures the delta fits in 32-bits */
7162 delta_tsc
= (u32
)((vmx
->hv_deadline_tsc
- tscl
) >>
7163 cpu_preemption_timer_multi
);
7167 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, delta_tsc
);
7168 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= false;
7169 } else if (!vmx
->loaded_vmcs
->hv_timer_soft_disabled
) {
7170 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE
, -1);
7171 vmx
->loaded_vmcs
->hv_timer_soft_disabled
= true;
7175 void noinstr
vmx_update_host_rsp(struct vcpu_vmx
*vmx
, unsigned long host_rsp
)
7177 if (unlikely(host_rsp
!= vmx
->loaded_vmcs
->host_state
.rsp
)) {
7178 vmx
->loaded_vmcs
->host_state
.rsp
= host_rsp
;
7179 vmcs_writel(HOST_RSP
, host_rsp
);
7183 void noinstr
vmx_spec_ctrl_restore_host(struct vcpu_vmx
*vmx
,
7186 u64 hostval
= this_cpu_read(x86_spec_ctrl_current
);
7188 if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL
))
7191 if (flags
& VMX_RUN_SAVE_SPEC_CTRL
)
7192 vmx
->spec_ctrl
= __rdmsr(MSR_IA32_SPEC_CTRL
);
7195 * If the guest/host SPEC_CTRL values differ, restore the host value.
7197 * For legacy IBRS, the IBRS bit always needs to be written after
7198 * transitioning from a less privileged predictor mode, regardless of
7199 * whether the guest/host values differ.
7201 if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS
) ||
7202 vmx
->spec_ctrl
!= hostval
)
7203 native_wrmsrl(MSR_IA32_SPEC_CTRL
, hostval
);
7208 static fastpath_t
vmx_exit_handlers_fastpath(struct kvm_vcpu
*vcpu
)
7210 switch (to_vmx(vcpu
)->exit_reason
.basic
) {
7211 case EXIT_REASON_MSR_WRITE
:
7212 return handle_fastpath_set_msr_irqoff(vcpu
);
7213 case EXIT_REASON_PREEMPTION_TIMER
:
7214 return handle_fastpath_preemption_timer(vcpu
);
7216 return EXIT_FASTPATH_NONE
;
7220 static noinstr
void vmx_vcpu_enter_exit(struct kvm_vcpu
*vcpu
,
7223 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7225 guest_state_enter_irqoff();
7227 /* L1D Flush includes CPU buffer clear to mitigate MDS */
7228 if (static_branch_unlikely(&vmx_l1d_should_flush
))
7229 vmx_l1d_flush(vcpu
);
7230 else if (static_branch_unlikely(&mds_user_clear
))
7231 mds_clear_cpu_buffers();
7232 else if (static_branch_unlikely(&mmio_stale_data_clear
) &&
7233 kvm_arch_has_assigned_device(vcpu
->kvm
))
7234 mds_clear_cpu_buffers();
7236 vmx_disable_fb_clear(vmx
);
7238 if (vcpu
->arch
.cr2
!= native_read_cr2())
7239 native_write_cr2(vcpu
->arch
.cr2
);
7241 vmx
->fail
= __vmx_vcpu_run(vmx
, (unsigned long *)&vcpu
->arch
.regs
,
7244 vcpu
->arch
.cr2
= native_read_cr2();
7245 vcpu
->arch
.regs_avail
&= ~VMX_REGS_LAZY_LOAD_SET
;
7247 vmx
->idt_vectoring_info
= 0;
7249 vmx_enable_fb_clear(vmx
);
7251 if (unlikely(vmx
->fail
)) {
7252 vmx
->exit_reason
.full
= 0xdead;
7256 vmx
->exit_reason
.full
= vmcs_read32(VM_EXIT_REASON
);
7257 if (likely(!vmx
->exit_reason
.failed_vmentry
))
7258 vmx
->idt_vectoring_info
= vmcs_read32(IDT_VECTORING_INFO_FIELD
);
7260 if ((u16
)vmx
->exit_reason
.basic
== EXIT_REASON_EXCEPTION_NMI
&&
7261 is_nmi(vmx_get_intr_info(vcpu
))) {
7262 kvm_before_interrupt(vcpu
, KVM_HANDLING_NMI
);
7263 vmx_do_nmi_irqoff();
7264 kvm_after_interrupt(vcpu
);
7268 guest_state_exit_irqoff();
7271 static fastpath_t
vmx_vcpu_run(struct kvm_vcpu
*vcpu
)
7273 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7274 unsigned long cr3
, cr4
;
7276 /* Record the guest's net vcpu time for enforced NMI injections. */
7277 if (unlikely(!enable_vnmi
&&
7278 vmx
->loaded_vmcs
->soft_vnmi_blocked
))
7279 vmx
->loaded_vmcs
->entry_time
= ktime_get();
7282 * Don't enter VMX if guest state is invalid, let the exit handler
7283 * start emulation until we arrive back to a valid state. Synthesize a
7284 * consistency check VM-Exit due to invalid guest state and bail.
7286 if (unlikely(vmx
->emulation_required
)) {
7289 vmx
->exit_reason
.full
= EXIT_REASON_INVALID_STATE
;
7290 vmx
->exit_reason
.failed_vmentry
= 1;
7291 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
);
7292 vmx
->exit_qualification
= ENTRY_FAIL_DEFAULT
;
7293 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
);
7294 vmx
->exit_intr_info
= 0;
7295 return EXIT_FASTPATH_NONE
;
7298 trace_kvm_entry(vcpu
);
7300 if (vmx
->ple_window_dirty
) {
7301 vmx
->ple_window_dirty
= false;
7302 vmcs_write32(PLE_WINDOW
, vmx
->ple_window
);
7306 * We did this in prepare_switch_to_guest, because it needs to
7307 * be within srcu_read_lock.
7309 WARN_ON_ONCE(vmx
->nested
.need_vmcs12_to_shadow_sync
);
7311 if (kvm_register_is_dirty(vcpu
, VCPU_REGS_RSP
))
7312 vmcs_writel(GUEST_RSP
, vcpu
->arch
.regs
[VCPU_REGS_RSP
]);
7313 if (kvm_register_is_dirty(vcpu
, VCPU_REGS_RIP
))
7314 vmcs_writel(GUEST_RIP
, vcpu
->arch
.regs
[VCPU_REGS_RIP
]);
7315 vcpu
->arch
.regs_dirty
= 0;
7318 * Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
7319 * prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
7320 * it switches back to the current->mm, which can occur in KVM context
7321 * when switching to a temporary mm to patch kernel code, e.g. if KVM
7322 * toggles a static key while handling a VM-Exit.
7324 cr3
= __get_current_cr3_fast();
7325 if (unlikely(cr3
!= vmx
->loaded_vmcs
->host_state
.cr3
)) {
7326 vmcs_writel(HOST_CR3
, cr3
);
7327 vmx
->loaded_vmcs
->host_state
.cr3
= cr3
;
7330 cr4
= cr4_read_shadow();
7331 if (unlikely(cr4
!= vmx
->loaded_vmcs
->host_state
.cr4
)) {
7332 vmcs_writel(HOST_CR4
, cr4
);
7333 vmx
->loaded_vmcs
->host_state
.cr4
= cr4
;
7336 /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
7337 if (unlikely(vcpu
->arch
.switch_db_regs
& KVM_DEBUGREG_WONT_EXIT
))
7338 set_debugreg(vcpu
->arch
.dr6
, 6);
7340 /* When single-stepping over STI and MOV SS, we must clear the
7341 * corresponding interruptibility bits in the guest state. Otherwise
7342 * vmentry fails as it then expects bit 14 (BS) in pending debug
7343 * exceptions being set, but that's not correct for the guest debugging
7345 if (vcpu
->guest_debug
& KVM_GUESTDBG_SINGLESTEP
)
7346 vmx_set_interrupt_shadow(vcpu
, 0);
7348 kvm_load_guest_xsave_state(vcpu
);
7350 pt_guest_enter(vmx
);
7352 atomic_switch_perf_msrs(vmx
);
7353 if (intel_pmu_lbr_is_enabled(vcpu
))
7354 vmx_passthrough_lbr_msrs(vcpu
);
7356 if (enable_preemption_timer
)
7357 vmx_update_hv_timer(vcpu
);
7359 kvm_wait_lapic_expire(vcpu
);
7361 /* The actual VMENTER/EXIT is in the .noinstr.text section. */
7362 vmx_vcpu_enter_exit(vcpu
, __vmx_vcpu_run_flags(vmx
));
7364 /* All fields are clean at this point */
7365 if (kvm_is_using_evmcs()) {
7366 current_evmcs
->hv_clean_fields
|=
7367 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL
;
7369 current_evmcs
->hv_vp_id
= kvm_hv_get_vpindex(vcpu
);
7372 /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
7373 if (vmx
->host_debugctlmsr
)
7374 update_debugctlmsr(vmx
->host_debugctlmsr
);
7376 #ifndef CONFIG_X86_64
7378 * The sysexit path does not restore ds/es, so we must set them to
7379 * a reasonable value ourselves.
7381 * We can't defer this to vmx_prepare_switch_to_host() since that
7382 * function may be executed in interrupt context, which saves and
7383 * restore segments around it, nullifying its effect.
7385 loadsegment(ds
, __USER_DS
);
7386 loadsegment(es
, __USER_DS
);
7391 kvm_load_host_xsave_state(vcpu
);
7393 if (is_guest_mode(vcpu
)) {
7395 * Track VMLAUNCH/VMRESUME that have made past guest state
7398 if (vmx
->nested
.nested_run_pending
&&
7399 !vmx
->exit_reason
.failed_vmentry
)
7400 ++vcpu
->stat
.nested_run
;
7402 vmx
->nested
.nested_run_pending
= 0;
7405 if (unlikely(vmx
->fail
))
7406 return EXIT_FASTPATH_NONE
;
7408 if (unlikely((u16
)vmx
->exit_reason
.basic
== EXIT_REASON_MCE_DURING_VMENTRY
))
7409 kvm_machine_check();
7411 trace_kvm_exit(vcpu
, KVM_ISA_VMX
);
7413 if (unlikely(vmx
->exit_reason
.failed_vmentry
))
7414 return EXIT_FASTPATH_NONE
;
7416 vmx
->loaded_vmcs
->launched
= 1;
7418 vmx_recover_nmi_blocking(vmx
);
7419 vmx_complete_interrupts(vmx
);
7421 if (is_guest_mode(vcpu
))
7422 return EXIT_FASTPATH_NONE
;
7424 return vmx_exit_handlers_fastpath(vcpu
);
7427 static void vmx_vcpu_free(struct kvm_vcpu
*vcpu
)
7429 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7432 vmx_destroy_pml_buffer(vmx
);
7433 free_vpid(vmx
->vpid
);
7434 nested_vmx_free_vcpu(vcpu
);
7435 free_loaded_vmcs(vmx
->loaded_vmcs
);
7438 static int vmx_vcpu_create(struct kvm_vcpu
*vcpu
)
7440 struct vmx_uret_msr
*tsx_ctrl
;
7441 struct vcpu_vmx
*vmx
;
7444 BUILD_BUG_ON(offsetof(struct vcpu_vmx
, vcpu
) != 0);
7447 INIT_LIST_HEAD(&vmx
->pi_wakeup_list
);
7451 vmx
->vpid
= allocate_vpid();
7454 * If PML is turned on, failure on enabling PML just results in failure
7455 * of creating the vcpu, therefore we can simplify PML logic (by
7456 * avoiding dealing with cases, such as enabling PML partially on vcpus
7457 * for the guest), etc.
7460 vmx
->pml_pg
= alloc_page(GFP_KERNEL_ACCOUNT
| __GFP_ZERO
);
7465 for (i
= 0; i
< kvm_nr_uret_msrs
; ++i
)
7466 vmx
->guest_uret_msrs
[i
].mask
= -1ull;
7467 if (boot_cpu_has(X86_FEATURE_RTM
)) {
7469 * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
7470 * Keep the host value unchanged to avoid changing CPUID bits
7471 * under the host kernel's feet.
7473 tsx_ctrl
= vmx_find_uret_msr(vmx
, MSR_IA32_TSX_CTRL
);
7475 tsx_ctrl
->mask
= ~(u64
)TSX_CTRL_CPUID_CLEAR
;
7478 err
= alloc_loaded_vmcs(&vmx
->vmcs01
);
7483 * Use Hyper-V 'Enlightened MSR Bitmap' feature when KVM runs as a
7484 * nested (L1) hypervisor and Hyper-V in L0 supports it. Enable the
7485 * feature only for vmcs01, KVM currently isn't equipped to realize any
7486 * performance benefits from enabling it for vmcs02.
7488 if (kvm_is_using_evmcs() &&
7489 (ms_hyperv
.nested_features
& HV_X64_NESTED_MSR_BITMAP
)) {
7490 struct hv_enlightened_vmcs
*evmcs
= (void *)vmx
->vmcs01
.vmcs
;
7492 evmcs
->hv_enlightenments_control
.msr_bitmap
= 1;
7495 /* The MSR bitmap starts with all ones */
7496 bitmap_fill(vmx
->shadow_msr_intercept
.read
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
7497 bitmap_fill(vmx
->shadow_msr_intercept
.write
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
7499 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_TSC
, MSR_TYPE_R
);
7500 #ifdef CONFIG_X86_64
7501 vmx_disable_intercept_for_msr(vcpu
, MSR_FS_BASE
, MSR_TYPE_RW
);
7502 vmx_disable_intercept_for_msr(vcpu
, MSR_GS_BASE
, MSR_TYPE_RW
);
7503 vmx_disable_intercept_for_msr(vcpu
, MSR_KERNEL_GS_BASE
, MSR_TYPE_RW
);
7505 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_CS
, MSR_TYPE_RW
);
7506 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_ESP
, MSR_TYPE_RW
);
7507 vmx_disable_intercept_for_msr(vcpu
, MSR_IA32_SYSENTER_EIP
, MSR_TYPE_RW
);
7508 if (kvm_cstate_in_guest(vcpu
->kvm
)) {
7509 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C1_RES
, MSR_TYPE_R
);
7510 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C3_RESIDENCY
, MSR_TYPE_R
);
7511 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C6_RESIDENCY
, MSR_TYPE_R
);
7512 vmx_disable_intercept_for_msr(vcpu
, MSR_CORE_C7_RESIDENCY
, MSR_TYPE_R
);
7515 vmx
->loaded_vmcs
= &vmx
->vmcs01
;
7517 if (cpu_need_virtualize_apic_accesses(vcpu
)) {
7518 err
= kvm_alloc_apic_access_page(vcpu
->kvm
);
7523 if (enable_ept
&& !enable_unrestricted_guest
) {
7524 err
= init_rmode_identity_map(vcpu
->kvm
);
7529 if (vmx_can_use_ipiv(vcpu
))
7530 WRITE_ONCE(to_kvm_vmx(vcpu
->kvm
)->pid_table
[vcpu
->vcpu_id
],
7531 __pa(&vmx
->pi_desc
) | PID_TABLE_ENTRY_VALID
);
7536 free_loaded_vmcs(vmx
->loaded_vmcs
);
7538 vmx_destroy_pml_buffer(vmx
);
7540 free_vpid(vmx
->vpid
);
7544 #define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7545 #define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n"
7547 static int vmx_vm_init(struct kvm
*kvm
)
7550 kvm
->arch
.pause_in_guest
= true;
7552 if (boot_cpu_has(X86_BUG_L1TF
) && enable_ept
) {
7553 switch (l1tf_mitigation
) {
7554 case L1TF_MITIGATION_OFF
:
7555 case L1TF_MITIGATION_FLUSH_NOWARN
:
7556 /* 'I explicitly don't care' is set */
7558 case L1TF_MITIGATION_FLUSH
:
7559 case L1TF_MITIGATION_FLUSH_NOSMT
:
7560 case L1TF_MITIGATION_FULL
:
7562 * Warn upon starting the first VM in a potentially
7563 * insecure environment.
7565 if (sched_smt_active())
7566 pr_warn_once(L1TF_MSG_SMT
);
7567 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
)
7568 pr_warn_once(L1TF_MSG_L1D
);
7570 case L1TF_MITIGATION_FULL_FORCE
:
7571 /* Flush is enforced */
7578 static u8
vmx_get_mt_mask(struct kvm_vcpu
*vcpu
, gfn_t gfn
, bool is_mmio
)
7580 /* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
7581 * memory aliases with conflicting memory types and sometimes MCEs.
7582 * We have to be careful as to what are honored and when.
7584 * For MMIO, guest CD/MTRR are ignored. The EPT memory type is set to
7585 * UC. The effective memory type is UC or WC depending on guest PAT.
7586 * This was historically the source of MCEs and we want to be
7589 * When there is no need to deal with noncoherent DMA (e.g., no VT-d
7590 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The
7591 * EPT memory type is set to WB. The effective memory type is forced
7594 * Otherwise, we trust guest. Guest CD/MTRR/PAT are all honored. The
7595 * EPT memory type is used to emulate guest CD/MTRR.
7599 return MTRR_TYPE_UNCACHABLE
<< VMX_EPT_MT_EPTE_SHIFT
;
7601 if (!kvm_arch_has_noncoherent_dma(vcpu
->kvm
))
7602 return (MTRR_TYPE_WRBACK
<< VMX_EPT_MT_EPTE_SHIFT
) | VMX_EPT_IPAT_BIT
;
7604 if (kvm_read_cr0_bits(vcpu
, X86_CR0_CD
)) {
7605 if (kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_CD_NW_CLEARED
))
7606 return MTRR_TYPE_WRBACK
<< VMX_EPT_MT_EPTE_SHIFT
;
7608 return (MTRR_TYPE_UNCACHABLE
<< VMX_EPT_MT_EPTE_SHIFT
) |
7612 return kvm_mtrr_get_guest_memory_type(vcpu
, gfn
) << VMX_EPT_MT_EPTE_SHIFT
;
7615 static void vmcs_set_secondary_exec_control(struct vcpu_vmx
*vmx
, u32 new_ctl
)
7618 * These bits in the secondary execution controls field
7619 * are dynamic, the others are mostly based on the hypervisor
7620 * architecture and the guest's CPUID. Do not touch the
7624 SECONDARY_EXEC_SHADOW_VMCS
|
7625 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE
|
7626 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
|
7627 SECONDARY_EXEC_DESC
;
7629 u32 cur_ctl
= secondary_exec_controls_get(vmx
);
7631 secondary_exec_controls_set(vmx
, (new_ctl
& ~mask
) | (cur_ctl
& mask
));
7635 * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
7636 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7638 static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu
*vcpu
)
7640 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7641 struct kvm_cpuid_entry2
*entry
;
7643 vmx
->nested
.msrs
.cr0_fixed1
= 0xffffffff;
7644 vmx
->nested
.msrs
.cr4_fixed1
= X86_CR4_PCE
;
7646 #define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
7647 if (entry && (entry->_reg & (_cpuid_mask))) \
7648 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7651 entry
= kvm_find_cpuid_entry(vcpu
, 0x1);
7652 cr4_fixed1_update(X86_CR4_VME
, edx
, feature_bit(VME
));
7653 cr4_fixed1_update(X86_CR4_PVI
, edx
, feature_bit(VME
));
7654 cr4_fixed1_update(X86_CR4_TSD
, edx
, feature_bit(TSC
));
7655 cr4_fixed1_update(X86_CR4_DE
, edx
, feature_bit(DE
));
7656 cr4_fixed1_update(X86_CR4_PSE
, edx
, feature_bit(PSE
));
7657 cr4_fixed1_update(X86_CR4_PAE
, edx
, feature_bit(PAE
));
7658 cr4_fixed1_update(X86_CR4_MCE
, edx
, feature_bit(MCE
));
7659 cr4_fixed1_update(X86_CR4_PGE
, edx
, feature_bit(PGE
));
7660 cr4_fixed1_update(X86_CR4_OSFXSR
, edx
, feature_bit(FXSR
));
7661 cr4_fixed1_update(X86_CR4_OSXMMEXCPT
, edx
, feature_bit(XMM
));
7662 cr4_fixed1_update(X86_CR4_VMXE
, ecx
, feature_bit(VMX
));
7663 cr4_fixed1_update(X86_CR4_SMXE
, ecx
, feature_bit(SMX
));
7664 cr4_fixed1_update(X86_CR4_PCIDE
, ecx
, feature_bit(PCID
));
7665 cr4_fixed1_update(X86_CR4_OSXSAVE
, ecx
, feature_bit(XSAVE
));
7667 entry
= kvm_find_cpuid_entry_index(vcpu
, 0x7, 0);
7668 cr4_fixed1_update(X86_CR4_FSGSBASE
, ebx
, feature_bit(FSGSBASE
));
7669 cr4_fixed1_update(X86_CR4_SMEP
, ebx
, feature_bit(SMEP
));
7670 cr4_fixed1_update(X86_CR4_SMAP
, ebx
, feature_bit(SMAP
));
7671 cr4_fixed1_update(X86_CR4_PKE
, ecx
, feature_bit(PKU
));
7672 cr4_fixed1_update(X86_CR4_UMIP
, ecx
, feature_bit(UMIP
));
7673 cr4_fixed1_update(X86_CR4_LA57
, ecx
, feature_bit(LA57
));
7675 entry
= kvm_find_cpuid_entry_index(vcpu
, 0x7, 1);
7676 cr4_fixed1_update(X86_CR4_LAM_SUP
, eax
, feature_bit(LAM
));
7678 #undef cr4_fixed1_update
7681 static void update_intel_pt_cfg(struct kvm_vcpu
*vcpu
)
7683 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7684 struct kvm_cpuid_entry2
*best
= NULL
;
7687 for (i
= 0; i
< PT_CPUID_LEAVES
; i
++) {
7688 best
= kvm_find_cpuid_entry_index(vcpu
, 0x14, i
);
7691 vmx
->pt_desc
.caps
[CPUID_EAX
+ i
*PT_CPUID_REGS_NUM
] = best
->eax
;
7692 vmx
->pt_desc
.caps
[CPUID_EBX
+ i
*PT_CPUID_REGS_NUM
] = best
->ebx
;
7693 vmx
->pt_desc
.caps
[CPUID_ECX
+ i
*PT_CPUID_REGS_NUM
] = best
->ecx
;
7694 vmx
->pt_desc
.caps
[CPUID_EDX
+ i
*PT_CPUID_REGS_NUM
] = best
->edx
;
7697 /* Get the number of configurable Address Ranges for filtering */
7698 vmx
->pt_desc
.num_address_ranges
= intel_pt_validate_cap(vmx
->pt_desc
.caps
,
7699 PT_CAP_num_address_ranges
);
7701 /* Initialize and clear the no dependency bits */
7702 vmx
->pt_desc
.ctl_bitmask
= ~(RTIT_CTL_TRACEEN
| RTIT_CTL_OS
|
7703 RTIT_CTL_USR
| RTIT_CTL_TSC_EN
| RTIT_CTL_DISRETC
|
7704 RTIT_CTL_BRANCH_EN
);
7707 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
7708 * will inject an #GP
7710 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_cr3_filtering
))
7711 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_CR3EN
;
7714 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
7715 * PSBFreq can be set
7717 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_psb_cyc
))
7718 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_CYCLEACC
|
7719 RTIT_CTL_CYC_THRESH
| RTIT_CTL_PSB_FREQ
);
7722 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn and MTCFreq can be set
7724 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_mtc
))
7725 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_MTC_EN
|
7726 RTIT_CTL_MTC_RANGE
);
7728 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
7729 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_ptwrite
))
7730 vmx
->pt_desc
.ctl_bitmask
&= ~(RTIT_CTL_FUP_ON_PTW
|
7733 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
7734 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_power_event_trace
))
7735 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_PWR_EVT_EN
;
7737 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
7738 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_topa_output
))
7739 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_TOPA
;
7741 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
7742 if (intel_pt_validate_cap(vmx
->pt_desc
.caps
, PT_CAP_output_subsys
))
7743 vmx
->pt_desc
.ctl_bitmask
&= ~RTIT_CTL_FABRIC_EN
;
7745 /* unmask address range configure area */
7746 for (i
= 0; i
< vmx
->pt_desc
.num_address_ranges
; i
++)
7747 vmx
->pt_desc
.ctl_bitmask
&= ~(0xfULL
<< (32 + i
* 4));
7750 static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
)
7752 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
7755 * XSAVES is effectively enabled if and only if XSAVE is also exposed
7756 * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
7757 * set if and only if XSAVE is supported.
7759 if (boot_cpu_has(X86_FEATURE_XSAVE
) &&
7760 guest_cpuid_has(vcpu
, X86_FEATURE_XSAVE
))
7761 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_XSAVES
);
7763 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_VMX
);
7764 kvm_governed_feature_check_and_set(vcpu
, X86_FEATURE_LAM
);
7766 vmx_setup_uret_msrs(vmx
);
7768 if (cpu_has_secondary_exec_ctrls())
7769 vmcs_set_secondary_exec_control(vmx
,
7770 vmx_secondary_exec_control(vmx
));
7772 if (guest_can_use(vcpu
, X86_FEATURE_VMX
))
7773 vmx
->msr_ia32_feature_control_valid_bits
|=
7774 FEAT_CTL_VMX_ENABLED_INSIDE_SMX
|
7775 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
;
7777 vmx
->msr_ia32_feature_control_valid_bits
&=
7778 ~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX
|
7779 FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX
);
7781 if (guest_can_use(vcpu
, X86_FEATURE_VMX
))
7782 nested_vmx_cr_fixed1_bits_update(vcpu
);
7784 if (boot_cpu_has(X86_FEATURE_INTEL_PT
) &&
7785 guest_cpuid_has(vcpu
, X86_FEATURE_INTEL_PT
))
7786 update_intel_pt_cfg(vcpu
);
7788 if (boot_cpu_has(X86_FEATURE_RTM
)) {
7789 struct vmx_uret_msr
*msr
;
7790 msr
= vmx_find_uret_msr(vmx
, MSR_IA32_TSX_CTRL
);
7792 bool enabled
= guest_cpuid_has(vcpu
, X86_FEATURE_RTM
);
7793 vmx_set_guest_uret_msr(vmx
, msr
, enabled
? 0 : TSX_CTRL_RTM_DISABLE
);
7797 if (kvm_cpu_cap_has(X86_FEATURE_XFD
))
7798 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_XFD_ERR
, MSR_TYPE_R
,
7799 !guest_cpuid_has(vcpu
, X86_FEATURE_XFD
));
7801 if (boot_cpu_has(X86_FEATURE_IBPB
))
7802 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_PRED_CMD
, MSR_TYPE_W
,
7803 !guest_has_pred_cmd_msr(vcpu
));
7805 if (boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
7806 vmx_set_intercept_for_msr(vcpu
, MSR_IA32_FLUSH_CMD
, MSR_TYPE_W
,
7807 !guest_cpuid_has(vcpu
, X86_FEATURE_FLUSH_L1D
));
7809 set_cr4_guest_host_mask(vmx
);
7811 vmx_write_encls_bitmap(vcpu
, NULL
);
7812 if (guest_cpuid_has(vcpu
, X86_FEATURE_SGX
))
7813 vmx
->msr_ia32_feature_control_valid_bits
|= FEAT_CTL_SGX_ENABLED
;
7815 vmx
->msr_ia32_feature_control_valid_bits
&= ~FEAT_CTL_SGX_ENABLED
;
7817 if (guest_cpuid_has(vcpu
, X86_FEATURE_SGX_LC
))
7818 vmx
->msr_ia32_feature_control_valid_bits
|=
7819 FEAT_CTL_SGX_LC_ENABLED
;
7821 vmx
->msr_ia32_feature_control_valid_bits
&=
7822 ~FEAT_CTL_SGX_LC_ENABLED
;
7824 /* Refresh #PF interception to account for MAXPHYADDR changes. */
7825 vmx_update_exception_bitmap(vcpu
);
7828 static u64
vmx_get_perf_capabilities(void)
7830 u64 perf_cap
= PMU_CAP_FW_WRITES
;
7831 struct x86_pmu_lbr lbr
;
7832 u64 host_perf_cap
= 0;
7837 if (boot_cpu_has(X86_FEATURE_PDCM
))
7838 rdmsrl(MSR_IA32_PERF_CAPABILITIES
, host_perf_cap
);
7840 if (!cpu_feature_enabled(X86_FEATURE_ARCH_LBR
)) {
7841 x86_perf_get_lbr(&lbr
);
7843 perf_cap
|= host_perf_cap
& PMU_CAP_LBR_FMT
;
7846 if (vmx_pebs_supported()) {
7847 perf_cap
|= host_perf_cap
& PERF_CAP_PEBS_MASK
;
7848 if ((perf_cap
& PERF_CAP_PEBS_FORMAT
) < 4)
7849 perf_cap
&= ~PERF_CAP_PEBS_BASELINE
;
7855 static __init
void vmx_set_cpu_caps(void)
7861 kvm_cpu_cap_set(X86_FEATURE_VMX
);
7864 if (kvm_mpx_supported())
7865 kvm_cpu_cap_check_and_set(X86_FEATURE_MPX
);
7866 if (!cpu_has_vmx_invpcid())
7867 kvm_cpu_cap_clear(X86_FEATURE_INVPCID
);
7868 if (vmx_pt_mode_is_host_guest())
7869 kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT
);
7870 if (vmx_pebs_supported()) {
7871 kvm_cpu_cap_check_and_set(X86_FEATURE_DS
);
7872 kvm_cpu_cap_check_and_set(X86_FEATURE_DTES64
);
7876 kvm_cpu_cap_clear(X86_FEATURE_PDCM
);
7877 kvm_caps
.supported_perf_cap
= vmx_get_perf_capabilities();
7880 kvm_cpu_cap_clear(X86_FEATURE_SGX
);
7881 kvm_cpu_cap_clear(X86_FEATURE_SGX_LC
);
7882 kvm_cpu_cap_clear(X86_FEATURE_SGX1
);
7883 kvm_cpu_cap_clear(X86_FEATURE_SGX2
);
7886 if (vmx_umip_emulated())
7887 kvm_cpu_cap_set(X86_FEATURE_UMIP
);
7890 kvm_caps
.supported_xss
= 0;
7891 if (!cpu_has_vmx_xsaves())
7892 kvm_cpu_cap_clear(X86_FEATURE_XSAVES
);
7894 /* CPUID 0x80000001 and 0x7 (RDPID) */
7895 if (!cpu_has_vmx_rdtscp()) {
7896 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP
);
7897 kvm_cpu_cap_clear(X86_FEATURE_RDPID
);
7900 if (cpu_has_vmx_waitpkg())
7901 kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG
);
7904 static void vmx_request_immediate_exit(struct kvm_vcpu
*vcpu
)
7906 to_vmx(vcpu
)->req_immediate_exit
= true;
7909 static int vmx_check_intercept_io(struct kvm_vcpu
*vcpu
,
7910 struct x86_instruction_info
*info
)
7912 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
7913 unsigned short port
;
7917 if (info
->intercept
== x86_intercept_in
||
7918 info
->intercept
== x86_intercept_ins
) {
7919 port
= info
->src_val
;
7920 size
= info
->dst_bytes
;
7922 port
= info
->dst_val
;
7923 size
= info
->src_bytes
;
7927 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
7928 * VM-exits depend on the 'unconditional IO exiting' VM-execution
7931 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
7933 if (!nested_cpu_has(vmcs12
, CPU_BASED_USE_IO_BITMAPS
))
7934 intercept
= nested_cpu_has(vmcs12
,
7935 CPU_BASED_UNCOND_IO_EXITING
);
7937 intercept
= nested_vmx_check_io_bitmaps(vcpu
, port
, size
);
7939 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7940 return intercept
? X86EMUL_UNHANDLEABLE
: X86EMUL_CONTINUE
;
7943 static int vmx_check_intercept(struct kvm_vcpu
*vcpu
,
7944 struct x86_instruction_info
*info
,
7945 enum x86_intercept_stage stage
,
7946 struct x86_exception
*exception
)
7948 struct vmcs12
*vmcs12
= get_vmcs12(vcpu
);
7950 switch (info
->intercept
) {
7952 * RDPID causes #UD if disabled through secondary execution controls.
7953 * Because it is marked as EmulateOnUD, we need to intercept it here.
7954 * Note, RDPID is hidden behind ENABLE_RDTSCP.
7956 case x86_intercept_rdpid
:
7957 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_ENABLE_RDTSCP
)) {
7958 exception
->vector
= UD_VECTOR
;
7959 exception
->error_code_valid
= false;
7960 return X86EMUL_PROPAGATE_FAULT
;
7964 case x86_intercept_in
:
7965 case x86_intercept_ins
:
7966 case x86_intercept_out
:
7967 case x86_intercept_outs
:
7968 return vmx_check_intercept_io(vcpu
, info
);
7970 case x86_intercept_lgdt
:
7971 case x86_intercept_lidt
:
7972 case x86_intercept_lldt
:
7973 case x86_intercept_ltr
:
7974 case x86_intercept_sgdt
:
7975 case x86_intercept_sidt
:
7976 case x86_intercept_sldt
:
7977 case x86_intercept_str
:
7978 if (!nested_cpu_has2(vmcs12
, SECONDARY_EXEC_DESC
))
7979 return X86EMUL_CONTINUE
;
7981 /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
7984 case x86_intercept_pause
:
7986 * PAUSE is a single-byte NOP with a REPE prefix, i.e. collides
7987 * with vanilla NOPs in the emulator. Apply the interception
7988 * check only to actual PAUSE instructions. Don't check
7989 * PAUSE-loop-exiting, software can't expect a given PAUSE to
7990 * exit, i.e. KVM is within its rights to allow L2 to execute
7993 if ((info
->rep_prefix
!= REPE_PREFIX
) ||
7994 !nested_cpu_has2(vmcs12
, CPU_BASED_PAUSE_EXITING
))
7995 return X86EMUL_CONTINUE
;
7999 /* TODO: check more intercepts... */
8004 return X86EMUL_UNHANDLEABLE
;
8007 #ifdef CONFIG_X86_64
8008 /* (a << shift) / divisor, return 1 if overflow otherwise 0 */
8009 static inline int u64_shl_div_u64(u64 a
, unsigned int shift
,
8010 u64 divisor
, u64
*result
)
8012 u64 low
= a
<< shift
, high
= a
>> (64 - shift
);
8014 /* To avoid the overflow on divq */
8015 if (high
>= divisor
)
8018 /* Low hold the result, high hold rem which is discarded */
8019 asm("divq %2\n\t" : "=a" (low
), "=d" (high
) :
8020 "rm" (divisor
), "0" (low
), "1" (high
));
8026 static int vmx_set_hv_timer(struct kvm_vcpu
*vcpu
, u64 guest_deadline_tsc
,
8029 struct vcpu_vmx
*vmx
;
8030 u64 tscl
, guest_tscl
, delta_tsc
, lapic_timer_advance_cycles
;
8031 struct kvm_timer
*ktimer
= &vcpu
->arch
.apic
->lapic_timer
;
8035 guest_tscl
= kvm_read_l1_tsc(vcpu
, tscl
);
8036 delta_tsc
= max(guest_deadline_tsc
, guest_tscl
) - guest_tscl
;
8037 lapic_timer_advance_cycles
= nsec_to_cycles(vcpu
,
8038 ktimer
->timer_advance_ns
);
8040 if (delta_tsc
> lapic_timer_advance_cycles
)
8041 delta_tsc
-= lapic_timer_advance_cycles
;
8045 /* Convert to host delta tsc if tsc scaling is enabled */
8046 if (vcpu
->arch
.l1_tsc_scaling_ratio
!= kvm_caps
.default_tsc_scaling_ratio
&&
8047 delta_tsc
&& u64_shl_div_u64(delta_tsc
,
8048 kvm_caps
.tsc_scaling_ratio_frac_bits
,
8049 vcpu
->arch
.l1_tsc_scaling_ratio
, &delta_tsc
))
8053 * If the delta tsc can't fit in the 32 bit after the multi shift,
8054 * we can't use the preemption timer.
8055 * It's possible that it fits on later vmentries, but checking
8056 * on every vmentry is costly so we just use an hrtimer.
8058 if (delta_tsc
>> (cpu_preemption_timer_multi
+ 32))
8061 vmx
->hv_deadline_tsc
= tscl
+ delta_tsc
;
8062 *expired
= !delta_tsc
;
8066 static void vmx_cancel_hv_timer(struct kvm_vcpu
*vcpu
)
8068 to_vmx(vcpu
)->hv_deadline_tsc
= -1;
8072 static void vmx_sched_in(struct kvm_vcpu
*vcpu
, int cpu
)
8074 if (!kvm_pause_in_guest(vcpu
->kvm
))
8075 shrink_ple_window(vcpu
);
8078 void vmx_update_cpu_dirty_logging(struct kvm_vcpu
*vcpu
)
8080 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
8082 if (WARN_ON_ONCE(!enable_pml
))
8085 if (is_guest_mode(vcpu
)) {
8086 vmx
->nested
.update_vmcs01_cpu_dirty_logging
= true;
8091 * Note, nr_memslots_dirty_logging can be changed concurrent with this
8092 * code, but in that case another update request will be made and so
8093 * the guest will never run with a stale PML value.
8095 if (atomic_read(&vcpu
->kvm
->nr_memslots_dirty_logging
))
8096 secondary_exec_controls_setbit(vmx
, SECONDARY_EXEC_ENABLE_PML
);
8098 secondary_exec_controls_clearbit(vmx
, SECONDARY_EXEC_ENABLE_PML
);
8101 static void vmx_setup_mce(struct kvm_vcpu
*vcpu
)
8103 if (vcpu
->arch
.mcg_cap
& MCG_LMCE_P
)
8104 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
|=
8105 FEAT_CTL_LMCE_ENABLED
;
8107 to_vmx(vcpu
)->msr_ia32_feature_control_valid_bits
&=
8108 ~FEAT_CTL_LMCE_ENABLED
;
8111 #ifdef CONFIG_KVM_SMM
8112 static int vmx_smi_allowed(struct kvm_vcpu
*vcpu
, bool for_injection
)
8114 /* we need a nested vmexit to enter SMM, postpone if run is pending */
8115 if (to_vmx(vcpu
)->nested
.nested_run_pending
)
8117 return !is_smm(vcpu
);
8120 static int vmx_enter_smm(struct kvm_vcpu
*vcpu
, union kvm_smram
*smram
)
8122 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
8125 * TODO: Implement custom flows for forcing the vCPU out/in of L2 on
8126 * SMI and RSM. Using the common VM-Exit + VM-Enter routines is wrong
8127 * SMI and RSM only modify state that is saved and restored via SMRAM.
8128 * E.g. most MSRs are left untouched, but many are modified by VM-Exit
8129 * and VM-Enter, and thus L2's values may be corrupted on SMI+RSM.
8131 vmx
->nested
.smm
.guest_mode
= is_guest_mode(vcpu
);
8132 if (vmx
->nested
.smm
.guest_mode
)
8133 nested_vmx_vmexit(vcpu
, -1, 0, 0);
8135 vmx
->nested
.smm
.vmxon
= vmx
->nested
.vmxon
;
8136 vmx
->nested
.vmxon
= false;
8137 vmx_clear_hlt(vcpu
);
8141 static int vmx_leave_smm(struct kvm_vcpu
*vcpu
, const union kvm_smram
*smram
)
8143 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
8146 if (vmx
->nested
.smm
.vmxon
) {
8147 vmx
->nested
.vmxon
= true;
8148 vmx
->nested
.smm
.vmxon
= false;
8151 if (vmx
->nested
.smm
.guest_mode
) {
8152 ret
= nested_vmx_enter_non_root_mode(vcpu
, false);
8156 vmx
->nested
.nested_run_pending
= 1;
8157 vmx
->nested
.smm
.guest_mode
= false;
8162 static void vmx_enable_smi_window(struct kvm_vcpu
*vcpu
)
8164 /* RSM will cause a vmexit anyway. */
8168 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu
*vcpu
)
8170 return to_vmx(vcpu
)->nested
.vmxon
&& !is_guest_mode(vcpu
);
8173 static void vmx_migrate_timers(struct kvm_vcpu
*vcpu
)
8175 if (is_guest_mode(vcpu
)) {
8176 struct hrtimer
*timer
= &to_vmx(vcpu
)->nested
.preemption_timer
;
8178 if (hrtimer_try_to_cancel(timer
) == 1)
8179 hrtimer_start_expires(timer
, HRTIMER_MODE_ABS_PINNED
);
8183 static void vmx_hardware_unsetup(void)
8185 kvm_set_posted_intr_wakeup_handler(NULL
);
8188 nested_vmx_hardware_unsetup();
8193 #define VMX_REQUIRED_APICV_INHIBITS \
8195 BIT(APICV_INHIBIT_REASON_DISABLE)| \
8196 BIT(APICV_INHIBIT_REASON_ABSENT) | \
8197 BIT(APICV_INHIBIT_REASON_HYPERV) | \
8198 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
8199 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
8200 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
8201 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) \
8204 static void vmx_vm_destroy(struct kvm
*kvm
)
8206 struct kvm_vmx
*kvm_vmx
= to_kvm_vmx(kvm
);
8208 free_pages((unsigned long)kvm_vmx
->pid_table
, vmx_get_pid_table_order(kvm
));
8212 * Note, the SDM states that the linear address is masked *after* the modified
8213 * canonicality check, whereas KVM masks (untags) the address and then performs
8214 * a "normal" canonicality check. Functionally, the two methods are identical,
8215 * and when the masking occurs relative to the canonicality check isn't visible
8216 * to software, i.e. KVM's behavior doesn't violate the SDM.
8218 gva_t
vmx_get_untagged_addr(struct kvm_vcpu
*vcpu
, gva_t gva
, unsigned int flags
)
8221 unsigned long cr3_bits
;
8223 if (flags
& (X86EMUL_F_FETCH
| X86EMUL_F_IMPLICIT
| X86EMUL_F_INVLPG
))
8226 if (!is_64_bit_mode(vcpu
))
8230 * Bit 63 determines if the address should be treated as user address
8231 * or a supervisor address.
8233 if (!(gva
& BIT_ULL(63))) {
8234 cr3_bits
= kvm_get_active_cr3_lam_bits(vcpu
);
8235 if (!(cr3_bits
& (X86_CR3_LAM_U57
| X86_CR3_LAM_U48
)))
8238 /* LAM_U48 is ignored if LAM_U57 is set. */
8239 lam_bit
= cr3_bits
& X86_CR3_LAM_U57
? 56 : 47;
8241 if (!kvm_is_cr4_bit_set(vcpu
, X86_CR4_LAM_SUP
))
8244 lam_bit
= kvm_is_cr4_bit_set(vcpu
, X86_CR4_LA57
) ? 56 : 47;
8248 * Untag the address by sign-extending the lam_bit, but NOT to bit 63.
8249 * Bit 63 is retained from the raw virtual address so that untagging
8250 * doesn't change a user access to a supervisor access, and vice versa.
8252 return (sign_extend64(gva
, lam_bit
) & ~BIT_ULL(63)) | (gva
& BIT_ULL(63));
8255 static struct kvm_x86_ops vmx_x86_ops __initdata
= {
8256 .name
= KBUILD_MODNAME
,
8258 .check_processor_compatibility
= vmx_check_processor_compat
,
8260 .hardware_unsetup
= vmx_hardware_unsetup
,
8262 .hardware_enable
= vmx_hardware_enable
,
8263 .hardware_disable
= vmx_hardware_disable
,
8264 .has_emulated_msr
= vmx_has_emulated_msr
,
8266 .vm_size
= sizeof(struct kvm_vmx
),
8267 .vm_init
= vmx_vm_init
,
8268 .vm_destroy
= vmx_vm_destroy
,
8270 .vcpu_precreate
= vmx_vcpu_precreate
,
8271 .vcpu_create
= vmx_vcpu_create
,
8272 .vcpu_free
= vmx_vcpu_free
,
8273 .vcpu_reset
= vmx_vcpu_reset
,
8275 .prepare_switch_to_guest
= vmx_prepare_switch_to_guest
,
8276 .vcpu_load
= vmx_vcpu_load
,
8277 .vcpu_put
= vmx_vcpu_put
,
8279 .update_exception_bitmap
= vmx_update_exception_bitmap
,
8280 .get_msr_feature
= vmx_get_msr_feature
,
8281 .get_msr
= vmx_get_msr
,
8282 .set_msr
= vmx_set_msr
,
8283 .get_segment_base
= vmx_get_segment_base
,
8284 .get_segment
= vmx_get_segment
,
8285 .set_segment
= vmx_set_segment
,
8286 .get_cpl
= vmx_get_cpl
,
8287 .get_cs_db_l_bits
= vmx_get_cs_db_l_bits
,
8288 .is_valid_cr0
= vmx_is_valid_cr0
,
8289 .set_cr0
= vmx_set_cr0
,
8290 .is_valid_cr4
= vmx_is_valid_cr4
,
8291 .set_cr4
= vmx_set_cr4
,
8292 .set_efer
= vmx_set_efer
,
8293 .get_idt
= vmx_get_idt
,
8294 .set_idt
= vmx_set_idt
,
8295 .get_gdt
= vmx_get_gdt
,
8296 .set_gdt
= vmx_set_gdt
,
8297 .set_dr7
= vmx_set_dr7
,
8298 .sync_dirty_debug_regs
= vmx_sync_dirty_debug_regs
,
8299 .cache_reg
= vmx_cache_reg
,
8300 .get_rflags
= vmx_get_rflags
,
8301 .set_rflags
= vmx_set_rflags
,
8302 .get_if_flag
= vmx_get_if_flag
,
8304 .flush_tlb_all
= vmx_flush_tlb_all
,
8305 .flush_tlb_current
= vmx_flush_tlb_current
,
8306 .flush_tlb_gva
= vmx_flush_tlb_gva
,
8307 .flush_tlb_guest
= vmx_flush_tlb_guest
,
8309 .vcpu_pre_run
= vmx_vcpu_pre_run
,
8310 .vcpu_run
= vmx_vcpu_run
,
8311 .handle_exit
= vmx_handle_exit
,
8312 .skip_emulated_instruction
= vmx_skip_emulated_instruction
,
8313 .update_emulated_instruction
= vmx_update_emulated_instruction
,
8314 .set_interrupt_shadow
= vmx_set_interrupt_shadow
,
8315 .get_interrupt_shadow
= vmx_get_interrupt_shadow
,
8316 .patch_hypercall
= vmx_patch_hypercall
,
8317 .inject_irq
= vmx_inject_irq
,
8318 .inject_nmi
= vmx_inject_nmi
,
8319 .inject_exception
= vmx_inject_exception
,
8320 .cancel_injection
= vmx_cancel_injection
,
8321 .interrupt_allowed
= vmx_interrupt_allowed
,
8322 .nmi_allowed
= vmx_nmi_allowed
,
8323 .get_nmi_mask
= vmx_get_nmi_mask
,
8324 .set_nmi_mask
= vmx_set_nmi_mask
,
8325 .enable_nmi_window
= vmx_enable_nmi_window
,
8326 .enable_irq_window
= vmx_enable_irq_window
,
8327 .update_cr8_intercept
= vmx_update_cr8_intercept
,
8328 .set_virtual_apic_mode
= vmx_set_virtual_apic_mode
,
8329 .set_apic_access_page_addr
= vmx_set_apic_access_page_addr
,
8330 .refresh_apicv_exec_ctrl
= vmx_refresh_apicv_exec_ctrl
,
8331 .load_eoi_exitmap
= vmx_load_eoi_exitmap
,
8332 .apicv_pre_state_restore
= vmx_apicv_pre_state_restore
,
8333 .required_apicv_inhibits
= VMX_REQUIRED_APICV_INHIBITS
,
8334 .hwapic_irr_update
= vmx_hwapic_irr_update
,
8335 .hwapic_isr_update
= vmx_hwapic_isr_update
,
8336 .guest_apic_has_interrupt
= vmx_guest_apic_has_interrupt
,
8337 .sync_pir_to_irr
= vmx_sync_pir_to_irr
,
8338 .deliver_interrupt
= vmx_deliver_interrupt
,
8339 .dy_apicv_has_pending_interrupt
= pi_has_pending_interrupt
,
8341 .set_tss_addr
= vmx_set_tss_addr
,
8342 .set_identity_map_addr
= vmx_set_identity_map_addr
,
8343 .get_mt_mask
= vmx_get_mt_mask
,
8345 .get_exit_info
= vmx_get_exit_info
,
8347 .vcpu_after_set_cpuid
= vmx_vcpu_after_set_cpuid
,
8349 .has_wbinvd_exit
= cpu_has_vmx_wbinvd_exit
,
8351 .get_l2_tsc_offset
= vmx_get_l2_tsc_offset
,
8352 .get_l2_tsc_multiplier
= vmx_get_l2_tsc_multiplier
,
8353 .write_tsc_offset
= vmx_write_tsc_offset
,
8354 .write_tsc_multiplier
= vmx_write_tsc_multiplier
,
8356 .load_mmu_pgd
= vmx_load_mmu_pgd
,
8358 .check_intercept
= vmx_check_intercept
,
8359 .handle_exit_irqoff
= vmx_handle_exit_irqoff
,
8361 .request_immediate_exit
= vmx_request_immediate_exit
,
8363 .sched_in
= vmx_sched_in
,
8365 .cpu_dirty_log_size
= PML_ENTITY_NUM
,
8366 .update_cpu_dirty_logging
= vmx_update_cpu_dirty_logging
,
8368 .nested_ops
= &vmx_nested_ops
,
8370 .pi_update_irte
= vmx_pi_update_irte
,
8371 .pi_start_assignment
= vmx_pi_start_assignment
,
8373 #ifdef CONFIG_X86_64
8374 .set_hv_timer
= vmx_set_hv_timer
,
8375 .cancel_hv_timer
= vmx_cancel_hv_timer
,
8378 .setup_mce
= vmx_setup_mce
,
8380 #ifdef CONFIG_KVM_SMM
8381 .smi_allowed
= vmx_smi_allowed
,
8382 .enter_smm
= vmx_enter_smm
,
8383 .leave_smm
= vmx_leave_smm
,
8384 .enable_smi_window
= vmx_enable_smi_window
,
8387 .check_emulate_instruction
= vmx_check_emulate_instruction
,
8388 .apic_init_signal_blocked
= vmx_apic_init_signal_blocked
,
8389 .migrate_timers
= vmx_migrate_timers
,
8391 .msr_filter_changed
= vmx_msr_filter_changed
,
8392 .complete_emulated_msr
= kvm_complete_insn_gp
,
8394 .vcpu_deliver_sipi_vector
= kvm_vcpu_deliver_sipi_vector
,
8396 .get_untagged_addr
= vmx_get_untagged_addr
,
8399 static unsigned int vmx_handle_intel_pt_intr(void)
8401 struct kvm_vcpu
*vcpu
= kvm_get_running_vcpu();
8403 /* '0' on failure so that the !PT case can use a RET0 static call. */
8404 if (!vcpu
|| !kvm_handling_nmi_from_guest(vcpu
))
8407 kvm_make_request(KVM_REQ_PMI
, vcpu
);
8408 __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT
,
8409 (unsigned long *)&vcpu
->arch
.pmu
.global_status
);
8413 static __init
void vmx_setup_user_return_msrs(void)
8417 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
8418 * will emulate SYSCALL in legacy mode if the vendor string in guest
8419 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
8420 * support this emulation, MSR_STAR is included in the list for i386,
8421 * but is never loaded into hardware. MSR_CSTAR is also never loaded
8422 * into hardware and is here purely for emulation purposes.
8424 const u32 vmx_uret_msrs_list
[] = {
8425 #ifdef CONFIG_X86_64
8426 MSR_SYSCALL_MASK
, MSR_LSTAR
, MSR_CSTAR
,
8428 MSR_EFER
, MSR_TSC_AUX
, MSR_STAR
,
8433 BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list
) != MAX_NR_USER_RETURN_MSRS
);
8435 for (i
= 0; i
< ARRAY_SIZE(vmx_uret_msrs_list
); ++i
)
8436 kvm_add_user_return_msr(vmx_uret_msrs_list
[i
]);
8439 static void __init
vmx_setup_me_spte_mask(void)
8444 * kvm_get_shadow_phys_bits() returns shadow_phys_bits. Use
8445 * the former to avoid exposing shadow_phys_bits.
8447 * On pre-MKTME system, boot_cpu_data.x86_phys_bits equals to
8448 * shadow_phys_bits. On MKTME and/or TDX capable systems,
8449 * boot_cpu_data.x86_phys_bits holds the actual physical address
8450 * w/o the KeyID bits, and shadow_phys_bits equals to MAXPHYADDR
8451 * reported by CPUID. Those bits between are KeyID bits.
8453 if (boot_cpu_data
.x86_phys_bits
!= kvm_get_shadow_phys_bits())
8454 me_mask
= rsvd_bits(boot_cpu_data
.x86_phys_bits
,
8455 kvm_get_shadow_phys_bits() - 1);
8457 * Unlike SME, host kernel doesn't support setting up any
8458 * MKTME KeyID on Intel platforms. No memory encryption
8459 * bits should be included into the SPTE.
8461 kvm_mmu_set_me_spte_mask(0, me_mask
);
8464 static struct kvm_x86_init_ops vmx_init_ops __initdata
;
8466 static __init
int hardware_setup(void)
8468 unsigned long host_bndcfgs
;
8473 host_idt_base
= dt
.address
;
8475 vmx_setup_user_return_msrs();
8477 if (setup_vmcs_config(&vmcs_config
, &vmx_capability
) < 0)
8480 if (cpu_has_perf_global_ctrl_bug())
8481 pr_warn_once("VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
8482 "does not work properly. Using workaround\n");
8484 if (boot_cpu_has(X86_FEATURE_NX
))
8485 kvm_enable_efer_bits(EFER_NX
);
8487 if (boot_cpu_has(X86_FEATURE_MPX
)) {
8488 rdmsrl(MSR_IA32_BNDCFGS
, host_bndcfgs
);
8489 WARN_ONCE(host_bndcfgs
, "BNDCFGS in host will be lost");
8492 if (!cpu_has_vmx_mpx())
8493 kvm_caps
.supported_xcr0
&= ~(XFEATURE_MASK_BNDREGS
|
8494 XFEATURE_MASK_BNDCSR
);
8496 if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
8497 !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
8500 if (!cpu_has_vmx_ept() ||
8501 !cpu_has_vmx_ept_4levels() ||
8502 !cpu_has_vmx_ept_mt_wb() ||
8503 !cpu_has_vmx_invept_global())
8506 /* NX support is required for shadow paging. */
8507 if (!enable_ept
&& !boot_cpu_has(X86_FEATURE_NX
)) {
8508 pr_err_ratelimited("NX (Execute Disable) not supported\n");
8512 if (!cpu_has_vmx_ept_ad_bits() || !enable_ept
)
8513 enable_ept_ad_bits
= 0;
8515 if (!cpu_has_vmx_unrestricted_guest() || !enable_ept
)
8516 enable_unrestricted_guest
= 0;
8518 if (!cpu_has_vmx_flexpriority())
8519 flexpriority_enabled
= 0;
8521 if (!cpu_has_virtual_nmis())
8524 #ifdef CONFIG_X86_SGX_KVM
8525 if (!cpu_has_vmx_encls_vmexit())
8530 * set_apic_access_page_addr() is used to reload apic access
8531 * page upon invalidation. No need to do anything if not
8532 * using the APIC_ACCESS_ADDR VMCS field.
8534 if (!flexpriority_enabled
)
8535 vmx_x86_ops
.set_apic_access_page_addr
= NULL
;
8537 if (!cpu_has_vmx_tpr_shadow())
8538 vmx_x86_ops
.update_cr8_intercept
= NULL
;
8540 #if IS_ENABLED(CONFIG_HYPERV)
8541 if (ms_hyperv
.nested_features
& HV_X64_NESTED_GUEST_MAPPING_FLUSH
8543 vmx_x86_ops
.flush_remote_tlbs
= hv_flush_remote_tlbs
;
8544 vmx_x86_ops
.flush_remote_tlbs_range
= hv_flush_remote_tlbs_range
;
8548 if (!cpu_has_vmx_ple()) {
8551 ple_window_grow
= 0;
8553 ple_window_shrink
= 0;
8556 if (!cpu_has_vmx_apicv())
8559 vmx_x86_ops
.sync_pir_to_irr
= NULL
;
8561 if (!enable_apicv
|| !cpu_has_vmx_ipiv())
8562 enable_ipiv
= false;
8564 if (cpu_has_vmx_tsc_scaling())
8565 kvm_caps
.has_tsc_control
= true;
8567 kvm_caps
.max_tsc_scaling_ratio
= KVM_VMX_TSC_MULTIPLIER_MAX
;
8568 kvm_caps
.tsc_scaling_ratio_frac_bits
= 48;
8569 kvm_caps
.has_bus_lock_exit
= cpu_has_vmx_bus_lock_detection();
8570 kvm_caps
.has_notify_vmexit
= cpu_has_notify_vmexit();
8572 set_bit(0, vmx_vpid_bitmap
); /* 0 is reserved for host */
8575 kvm_mmu_set_ept_masks(enable_ept_ad_bits
,
8576 cpu_has_vmx_ept_execute_only());
8579 * Setup shadow_me_value/shadow_me_mask to include MKTME KeyID
8580 * bits to shadow_zero_check.
8582 vmx_setup_me_spte_mask();
8584 kvm_configure_mmu(enable_ept
, 0, vmx_get_max_ept_level(),
8585 ept_caps_to_lpage_level(vmx_capability
.ept
));
8588 * Only enable PML when hardware supports PML feature, and both EPT
8589 * and EPT A/D bit features are enabled -- PML depends on them to work.
8591 if (!enable_ept
|| !enable_ept_ad_bits
|| !cpu_has_vmx_pml())
8595 vmx_x86_ops
.cpu_dirty_log_size
= 0;
8597 if (!cpu_has_vmx_preemption_timer())
8598 enable_preemption_timer
= false;
8600 if (enable_preemption_timer
) {
8601 u64 use_timer_freq
= 5000ULL * 1000 * 1000;
8603 cpu_preemption_timer_multi
=
8604 vmcs_config
.misc
& VMX_MISC_PREEMPTION_TIMER_RATE_MASK
;
8607 use_timer_freq
= (u64
)tsc_khz
* 1000;
8608 use_timer_freq
>>= cpu_preemption_timer_multi
;
8611 * KVM "disables" the preemption timer by setting it to its max
8612 * value. Don't use the timer if it might cause spurious exits
8613 * at a rate faster than 0.1 Hz (of uninterrupted guest time).
8615 if (use_timer_freq
> 0xffffffffu
/ 10)
8616 enable_preemption_timer
= false;
8619 if (!enable_preemption_timer
) {
8620 vmx_x86_ops
.set_hv_timer
= NULL
;
8621 vmx_x86_ops
.cancel_hv_timer
= NULL
;
8622 vmx_x86_ops
.request_immediate_exit
= __kvm_request_immediate_exit
;
8625 kvm_caps
.supported_mce_cap
|= MCG_LMCE_P
;
8626 kvm_caps
.supported_mce_cap
|= MCG_CMCI_P
;
8628 if (pt_mode
!= PT_MODE_SYSTEM
&& pt_mode
!= PT_MODE_HOST_GUEST
)
8630 if (!enable_ept
|| !enable_pmu
|| !cpu_has_vmx_intel_pt())
8631 pt_mode
= PT_MODE_SYSTEM
;
8632 if (pt_mode
== PT_MODE_HOST_GUEST
)
8633 vmx_init_ops
.handle_intel_pt_intr
= vmx_handle_intel_pt_intr
;
8635 vmx_init_ops
.handle_intel_pt_intr
= NULL
;
8637 setup_default_sgx_lepubkeyhash();
8640 nested_vmx_setup_ctls_msrs(&vmcs_config
, vmx_capability
.ept
);
8642 r
= nested_vmx_hardware_setup(kvm_vmx_exit_handlers
);
8649 r
= alloc_kvm_area();
8651 nested_vmx_hardware_unsetup();
8653 kvm_set_posted_intr_wakeup_handler(pi_wakeup_handler
);
8658 static struct kvm_x86_init_ops vmx_init_ops __initdata
= {
8659 .hardware_setup
= hardware_setup
,
8660 .handle_intel_pt_intr
= NULL
,
8662 .runtime_ops
= &vmx_x86_ops
,
8663 .pmu_ops
= &intel_pmu_ops
,
8666 static void vmx_cleanup_l1d_flush(void)
8668 if (vmx_l1d_flush_pages
) {
8669 free_pages((unsigned long)vmx_l1d_flush_pages
, L1D_CACHE_ORDER
);
8670 vmx_l1d_flush_pages
= NULL
;
8672 /* Restore state so sysfs ignores VMX */
8673 l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
8676 static void __vmx_exit(void)
8678 allow_smaller_maxphyaddr
= false;
8680 cpu_emergency_unregister_virt_callback(vmx_emergency_disable
);
8682 vmx_cleanup_l1d_flush();
8685 static void vmx_exit(void)
8688 kvm_x86_vendor_exit();
8692 module_exit(vmx_exit
);
8694 static int __init
vmx_init(void)
8698 if (!kvm_is_vmx_supported())
8702 * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
8703 * to unwind if a later step fails.
8707 r
= kvm_x86_vendor_init(&vmx_init_ops
);
8712 * Must be called after common x86 init so enable_ept is properly set
8713 * up. Hand the parameter mitigation value in which was stored in
8714 * the pre module init parser. If no parameter was given, it will
8715 * contain 'auto' which will be turned into the default 'cond'
8718 r
= vmx_setup_l1d_flush(vmentry_l1d_flush_param
);
8722 for_each_possible_cpu(cpu
) {
8723 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu
, cpu
));
8728 cpu_emergency_register_virt_callback(vmx_emergency_disable
);
8730 vmx_check_vmcs12_offsets();
8733 * Shadow paging doesn't have a (further) performance penalty
8734 * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
8738 allow_smaller_maxphyaddr
= true;
8741 * Common KVM initialization _must_ come last, after this, /dev/kvm is
8742 * exposed to userspace!
8744 r
= kvm_init(sizeof(struct vcpu_vmx
), __alignof__(struct vcpu_vmx
),
8754 kvm_x86_vendor_exit();
8757 module_init(vmx_init
);