1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef ARCH_X86_KVM_X86_H
3 #define ARCH_X86_KVM_X86_H
5 #include <linux/kvm_host.h>
6 #include <asm/fpu/xstate.h>
8 #include <asm/pvclock.h>
9 #include "kvm_cache_regs.h"
10 #include "kvm_emulate.h"
13 /* control of guest tsc rate supported? */
15 /* maximum supported tsc_khz for guests */
16 u32 max_guest_tsc_khz
;
17 /* number of bits of the fractional part of the TSC scaling ratio */
18 u8 tsc_scaling_ratio_frac_bits
;
19 /* maximum allowed value of TSC scaling ratio */
20 u64 max_tsc_scaling_ratio
;
21 /* 1ull << kvm_caps.tsc_scaling_ratio_frac_bits */
22 u64 default_tsc_scaling_ratio
;
23 /* bus lock detection supported? */
24 bool has_bus_lock_exit
;
25 /* notify VM exit supported? */
26 bool has_notify_vmexit
;
28 u64 supported_mce_cap
;
31 u64 supported_perf_cap
;
34 void kvm_spurious_fault(void);
36 #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
38 bool failed = (consistency_check); \
40 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
45 * The first...last VMX feature MSRs that are emulated by KVM. This may or may
46 * not cover all known VMX MSRs, as KVM doesn't emulate an MSR until there's an
47 * associated feature that KVM supports for nested virtualization.
49 #define KVM_FIRST_EMULATED_VMX_MSR MSR_IA32_VMX_BASIC
50 #define KVM_LAST_EMULATED_VMX_MSR MSR_IA32_VMX_VMFUNC
52 #define KVM_DEFAULT_PLE_GAP 128
53 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
54 #define KVM_DEFAULT_PLE_WINDOW_GROW 2
55 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
56 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
57 #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
58 #define KVM_SVM_DEFAULT_PLE_WINDOW 3000
60 static inline unsigned int __grow_ple_window(unsigned int val
,
61 unsigned int base
, unsigned int modifier
, unsigned int max
)
73 return min(ret
, (u64
)max
);
76 static inline unsigned int __shrink_ple_window(unsigned int val
,
77 unsigned int base
, unsigned int modifier
, unsigned int min
)
90 #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
92 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu
*vcpu
);
93 int kvm_check_nested_events(struct kvm_vcpu
*vcpu
);
95 static inline bool kvm_vcpu_has_run(struct kvm_vcpu
*vcpu
)
97 return vcpu
->arch
.last_vmentry_cpu
!= -1;
100 static inline bool kvm_is_exception_pending(struct kvm_vcpu
*vcpu
)
102 return vcpu
->arch
.exception
.pending
||
103 vcpu
->arch
.exception_vmexit
.pending
||
104 kvm_test_request(KVM_REQ_TRIPLE_FAULT
, vcpu
);
107 static inline void kvm_clear_exception_queue(struct kvm_vcpu
*vcpu
)
109 vcpu
->arch
.exception
.pending
= false;
110 vcpu
->arch
.exception
.injected
= false;
111 vcpu
->arch
.exception_vmexit
.pending
= false;
114 static inline void kvm_queue_interrupt(struct kvm_vcpu
*vcpu
, u8 vector
,
117 vcpu
->arch
.interrupt
.injected
= true;
118 vcpu
->arch
.interrupt
.soft
= soft
;
119 vcpu
->arch
.interrupt
.nr
= vector
;
122 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu
*vcpu
)
124 vcpu
->arch
.interrupt
.injected
= false;
127 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu
*vcpu
)
129 return vcpu
->arch
.exception
.injected
|| vcpu
->arch
.interrupt
.injected
||
130 vcpu
->arch
.nmi_injected
;
133 static inline bool kvm_exception_is_soft(unsigned int nr
)
135 return (nr
== BP_VECTOR
) || (nr
== OF_VECTOR
);
138 static inline bool is_protmode(struct kvm_vcpu
*vcpu
)
140 return kvm_is_cr0_bit_set(vcpu
, X86_CR0_PE
);
143 static inline bool is_long_mode(struct kvm_vcpu
*vcpu
)
146 return !!(vcpu
->arch
.efer
& EFER_LMA
);
152 static inline bool is_64_bit_mode(struct kvm_vcpu
*vcpu
)
156 WARN_ON_ONCE(vcpu
->arch
.guest_state_protected
);
158 if (!is_long_mode(vcpu
))
160 static_call(kvm_x86_get_cs_db_l_bits
)(vcpu
, &cs_db
, &cs_l
);
164 static inline bool is_64_bit_hypercall(struct kvm_vcpu
*vcpu
)
167 * If running with protected guest state, the CS register is not
168 * accessible. The hypercall register values will have had to been
169 * provided in 64-bit mode, so assume the guest is in 64-bit.
171 return vcpu
->arch
.guest_state_protected
|| is_64_bit_mode(vcpu
);
174 static inline bool x86_exception_has_error_code(unsigned int vector
)
176 static u32 exception_has_error_code
= BIT(DF_VECTOR
) | BIT(TS_VECTOR
) |
177 BIT(NP_VECTOR
) | BIT(SS_VECTOR
) | BIT(GP_VECTOR
) |
178 BIT(PF_VECTOR
) | BIT(AC_VECTOR
);
180 return (1U << vector
) & exception_has_error_code
;
183 static inline bool mmu_is_nested(struct kvm_vcpu
*vcpu
)
185 return vcpu
->arch
.walk_mmu
== &vcpu
->arch
.nested_mmu
;
188 static inline bool is_pae(struct kvm_vcpu
*vcpu
)
190 return kvm_is_cr4_bit_set(vcpu
, X86_CR4_PAE
);
193 static inline bool is_pse(struct kvm_vcpu
*vcpu
)
195 return kvm_is_cr4_bit_set(vcpu
, X86_CR4_PSE
);
198 static inline bool is_paging(struct kvm_vcpu
*vcpu
)
200 return likely(kvm_is_cr0_bit_set(vcpu
, X86_CR0_PG
));
203 static inline bool is_pae_paging(struct kvm_vcpu
*vcpu
)
205 return !is_long_mode(vcpu
) && is_pae(vcpu
) && is_paging(vcpu
);
208 static inline u8
vcpu_virt_addr_bits(struct kvm_vcpu
*vcpu
)
210 return kvm_is_cr4_bit_set(vcpu
, X86_CR4_LA57
) ? 57 : 48;
213 static inline bool is_noncanonical_address(u64 la
, struct kvm_vcpu
*vcpu
)
215 return !__is_canonical_address(la
, vcpu_virt_addr_bits(vcpu
));
218 static inline void vcpu_cache_mmio_info(struct kvm_vcpu
*vcpu
,
219 gva_t gva
, gfn_t gfn
, unsigned access
)
221 u64 gen
= kvm_memslots(vcpu
->kvm
)->generation
;
223 if (unlikely(gen
& KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS
))
227 * If this is a shadow nested page table, the "GVA" is
230 vcpu
->arch
.mmio_gva
= mmu_is_nested(vcpu
) ? 0 : gva
& PAGE_MASK
;
231 vcpu
->arch
.mmio_access
= access
;
232 vcpu
->arch
.mmio_gfn
= gfn
;
233 vcpu
->arch
.mmio_gen
= gen
;
236 static inline bool vcpu_match_mmio_gen(struct kvm_vcpu
*vcpu
)
238 return vcpu
->arch
.mmio_gen
== kvm_memslots(vcpu
->kvm
)->generation
;
242 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
243 * clear all mmio cache info.
245 #define MMIO_GVA_ANY (~(gva_t)0)
247 static inline void vcpu_clear_mmio_info(struct kvm_vcpu
*vcpu
, gva_t gva
)
249 if (gva
!= MMIO_GVA_ANY
&& vcpu
->arch
.mmio_gva
!= (gva
& PAGE_MASK
))
252 vcpu
->arch
.mmio_gva
= 0;
255 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu
*vcpu
, unsigned long gva
)
257 if (vcpu_match_mmio_gen(vcpu
) && vcpu
->arch
.mmio_gva
&&
258 vcpu
->arch
.mmio_gva
== (gva
& PAGE_MASK
))
264 static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu
*vcpu
, gpa_t gpa
)
266 if (vcpu_match_mmio_gen(vcpu
) && vcpu
->arch
.mmio_gfn
&&
267 vcpu
->arch
.mmio_gfn
== gpa
>> PAGE_SHIFT
)
273 static inline unsigned long kvm_register_read(struct kvm_vcpu
*vcpu
, int reg
)
275 unsigned long val
= kvm_register_read_raw(vcpu
, reg
);
277 return is_64_bit_mode(vcpu
) ? val
: (u32
)val
;
280 static inline void kvm_register_write(struct kvm_vcpu
*vcpu
,
281 int reg
, unsigned long val
)
283 if (!is_64_bit_mode(vcpu
))
285 return kvm_register_write_raw(vcpu
, reg
, val
);
288 static inline bool kvm_check_has_quirk(struct kvm
*kvm
, u64 quirk
)
290 return !(kvm
->arch
.disabled_quirks
& quirk
);
293 void kvm_inject_realmode_interrupt(struct kvm_vcpu
*vcpu
, int irq
, int inc_eip
);
295 u64
get_kvmclock_ns(struct kvm
*kvm
);
297 int kvm_read_guest_virt(struct kvm_vcpu
*vcpu
,
298 gva_t addr
, void *val
, unsigned int bytes
,
299 struct x86_exception
*exception
);
301 int kvm_write_guest_virt_system(struct kvm_vcpu
*vcpu
,
302 gva_t addr
, void *val
, unsigned int bytes
,
303 struct x86_exception
*exception
);
305 int handle_ud(struct kvm_vcpu
*vcpu
);
307 void kvm_deliver_exception_payload(struct kvm_vcpu
*vcpu
,
308 struct kvm_queued_exception
*ex
);
310 void kvm_vcpu_mtrr_init(struct kvm_vcpu
*vcpu
);
311 u8
kvm_mtrr_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
);
312 int kvm_mtrr_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
313 int kvm_mtrr_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
314 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
316 bool kvm_vector_hashing_enabled(void);
317 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu
*vcpu
, gva_t gva
, u16 error_code
);
318 int x86_decode_emulated_instruction(struct kvm_vcpu
*vcpu
, int emulation_type
,
319 void *insn
, int insn_len
);
320 int x86_emulate_instruction(struct kvm_vcpu
*vcpu
, gpa_t cr2_or_gpa
,
321 int emulation_type
, void *insn
, int insn_len
);
322 fastpath_t
handle_fastpath_set_msr_irqoff(struct kvm_vcpu
*vcpu
);
324 extern u64 host_xcr0
;
326 extern u64 host_arch_capabilities
;
328 extern struct kvm_caps kvm_caps
;
330 extern bool enable_pmu
;
333 * Get a filtered version of KVM's supported XCR0 that strips out dynamic
334 * features for which the current process doesn't (yet) have permission to use.
335 * This is intended to be used only when enumerating support to userspace,
336 * e.g. in KVM_GET_SUPPORTED_CPUID and KVM_CAP_XSAVE2, it does NOT need to be
337 * used to check/restrict guest behavior as KVM rejects KVM_SET_CPUID{2} if
338 * userspace attempts to enable unpermitted features.
340 static inline u64
kvm_get_filtered_xcr0(void)
342 u64 permitted_xcr0
= kvm_caps
.supported_xcr0
;
344 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC
!= XFEATURE_MASK_XTILE_DATA
);
346 if (permitted_xcr0
& XFEATURE_MASK_USER_DYNAMIC
) {
347 permitted_xcr0
&= xstate_get_guest_group_perm();
350 * Treat XTILE_CFG as unsupported if the current process isn't
351 * allowed to use XTILE_DATA, as attempting to set XTILE_CFG in
352 * XCR0 without setting XTILE_DATA is architecturally illegal.
354 if (!(permitted_xcr0
& XFEATURE_MASK_XTILE_DATA
))
355 permitted_xcr0
&= ~XFEATURE_MASK_XTILE_CFG
;
357 return permitted_xcr0
;
360 static inline bool kvm_mpx_supported(void)
362 return (kvm_caps
.supported_xcr0
& (XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
))
363 == (XFEATURE_MASK_BNDREGS
| XFEATURE_MASK_BNDCSR
);
366 extern unsigned int min_timer_period_us
;
368 extern bool enable_vmware_backdoor
;
370 extern int pi_inject_timer
;
372 extern bool report_ignored_msrs
;
374 extern bool eager_page_split
;
376 static inline void kvm_pr_unimpl_wrmsr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
378 if (report_ignored_msrs
)
379 vcpu_unimpl(vcpu
, "Unhandled WRMSR(0x%x) = 0x%llx\n", msr
, data
);
382 static inline void kvm_pr_unimpl_rdmsr(struct kvm_vcpu
*vcpu
, u32 msr
)
384 if (report_ignored_msrs
)
385 vcpu_unimpl(vcpu
, "Unhandled RDMSR(0x%x)\n", msr
);
388 static inline u64
nsec_to_cycles(struct kvm_vcpu
*vcpu
, u64 nsec
)
390 return pvclock_scale_delta(nsec
, vcpu
->arch
.virtual_tsc_mult
,
391 vcpu
->arch
.virtual_tsc_shift
);
394 /* Same "calling convention" as do_div:
395 * - divide (n << 32) by base
399 #define do_shl32_div32(n, base) \
402 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
403 : "rm" (base), "0" (0), "1" ((u32) n)); \
408 static inline bool kvm_mwait_in_guest(struct kvm
*kvm
)
410 return kvm
->arch
.mwait_in_guest
;
413 static inline bool kvm_hlt_in_guest(struct kvm
*kvm
)
415 return kvm
->arch
.hlt_in_guest
;
418 static inline bool kvm_pause_in_guest(struct kvm
*kvm
)
420 return kvm
->arch
.pause_in_guest
;
423 static inline bool kvm_cstate_in_guest(struct kvm
*kvm
)
425 return kvm
->arch
.cstate_in_guest
;
428 static inline bool kvm_notify_vmexit_enabled(struct kvm
*kvm
)
430 return kvm
->arch
.notify_vmexit_flags
& KVM_X86_NOTIFY_VMEXIT_ENABLED
;
434 /* Values are arbitrary, but must be non-zero. */
435 KVM_HANDLING_IRQ
= 1,
439 static __always_inline
void kvm_before_interrupt(struct kvm_vcpu
*vcpu
,
440 enum kvm_intr_type intr
)
442 WRITE_ONCE(vcpu
->arch
.handling_intr_from_guest
, (u8
)intr
);
445 static __always_inline
void kvm_after_interrupt(struct kvm_vcpu
*vcpu
)
447 WRITE_ONCE(vcpu
->arch
.handling_intr_from_guest
, 0);
450 static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu
*vcpu
)
452 return vcpu
->arch
.handling_intr_from_guest
== KVM_HANDLING_NMI
;
455 static inline bool kvm_pat_valid(u64 data
)
457 if (data
& 0xF8F8F8F8F8F8F8F8ull
)
459 /* 0, 1, 4, 5, 6, 7 are valid values. */
460 return (data
| ((data
& 0x0202020202020202ull
) << 1)) == data
;
463 static inline bool kvm_dr7_valid(u64 data
)
465 /* Bits [63:32] are reserved */
466 return !(data
>> 32);
468 static inline bool kvm_dr6_valid(u64 data
)
470 /* Bits [63:32] are reserved */
471 return !(data
>> 32);
475 * Trigger machine check on the host. We assume all the MSRs are already set up
476 * by the CPU and that we still run on the same CPU as the MCE occurred on.
477 * We pass a fake environment to the machine check handler because we want
478 * the guest to be always treated like user space, no matter what context
479 * it used internally.
481 static inline void kvm_machine_check(void)
483 #if defined(CONFIG_X86_MCE)
484 struct pt_regs regs
= {
485 .cs
= 3, /* Fake ring 3 no matter what the guest ran on */
486 .flags
= X86_EFLAGS_IF
,
489 do_machine_check(®s
);
493 void kvm_load_guest_xsave_state(struct kvm_vcpu
*vcpu
);
494 void kvm_load_host_xsave_state(struct kvm_vcpu
*vcpu
);
495 int kvm_spec_ctrl_test_value(u64 value
);
496 bool __kvm_is_valid_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
497 int kvm_handle_memory_failure(struct kvm_vcpu
*vcpu
, int r
,
498 struct x86_exception
*e
);
499 int kvm_handle_invpcid(struct kvm_vcpu
*vcpu
, unsigned long type
, gva_t gva
);
500 bool kvm_msr_allowed(struct kvm_vcpu
*vcpu
, u32 index
, u32 type
);
503 * Internal error codes that are used to indicate that MSR emulation encountered
504 * an error that should result in #GP in the guest, unless userspace
507 #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
508 #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
510 #define __cr4_reserved_bits(__cpu_has, __c) \
512 u64 __reserved_bits = CR4_RESERVED_BITS; \
514 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
515 __reserved_bits |= X86_CR4_OSXSAVE; \
516 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
517 __reserved_bits |= X86_CR4_SMEP; \
518 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
519 __reserved_bits |= X86_CR4_SMAP; \
520 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
521 __reserved_bits |= X86_CR4_FSGSBASE; \
522 if (!__cpu_has(__c, X86_FEATURE_PKU)) \
523 __reserved_bits |= X86_CR4_PKE; \
524 if (!__cpu_has(__c, X86_FEATURE_LA57)) \
525 __reserved_bits |= X86_CR4_LA57; \
526 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
527 __reserved_bits |= X86_CR4_UMIP; \
528 if (!__cpu_has(__c, X86_FEATURE_VMX)) \
529 __reserved_bits |= X86_CR4_VMXE; \
530 if (!__cpu_has(__c, X86_FEATURE_PCID)) \
531 __reserved_bits |= X86_CR4_PCIDE; \
535 int kvm_sev_es_mmio_write(struct kvm_vcpu
*vcpu
, gpa_t src
, unsigned int bytes
,
537 int kvm_sev_es_mmio_read(struct kvm_vcpu
*vcpu
, gpa_t src
, unsigned int bytes
,
539 int kvm_sev_es_string_io(struct kvm_vcpu
*vcpu
, unsigned int size
,
540 unsigned int port
, void *data
, unsigned int count
,