1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
6 * derived from arch/x86/kvm/x86.c
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/kvm_host.h>
14 #include "linux/lockdep.h"
15 #include <linux/export.h>
16 #include <linux/vmalloc.h>
17 #include <linux/uaccess.h>
18 #include <linux/sched/stat.h>
20 #include <asm/processor.h>
22 #include <asm/fpu/xstate.h>
24 #include <asm/cpuid.h>
33 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
34 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
36 u32 kvm_cpu_caps
[NR_KVM_CPU_CAPS
] __read_mostly
;
37 EXPORT_SYMBOL_GPL(kvm_cpu_caps
);
39 u32
xstate_required_size(u64 xstate_bv
, bool compacted
)
42 u32 ret
= XSAVE_HDR_SIZE
+ XSAVE_HDR_OFFSET
;
44 xstate_bv
&= XFEATURE_MASK_EXTEND
;
46 if (xstate_bv
& 0x1) {
47 u32 eax
, ebx
, ecx
, edx
, offset
;
48 cpuid_count(0xD, feature_bit
, &eax
, &ebx
, &ecx
, &edx
);
49 /* ECX[1]: 64B alignment in compacted form */
51 offset
= (ecx
& 0x2) ? ALIGN(ret
, 64) : ret
;
54 ret
= max(ret
, offset
+ eax
);
66 /* Scattered Flag - For features that are scattered by cpufeatures.h. */
69 BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
70 (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0); \
74 * Magic value used by KVM when querying userspace-provided CPUID entries and
75 * doesn't care about the CPIUD index because the index of the function in
76 * question is not significant. Note, this magic value must have at least one
77 * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find()
78 * to avoid false positives when processing guest CPUID input.
80 #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull
82 static inline struct kvm_cpuid_entry2
*cpuid_entry2_find(
83 struct kvm_cpuid_entry2
*entries
, int nent
, u32 function
, u64 index
)
85 struct kvm_cpuid_entry2
*e
;
89 * KVM has a semi-arbitrary rule that querying the guest's CPUID model
90 * with IRQs disabled is disallowed. The CPUID model can legitimately
91 * have over one hundred entries, i.e. the lookup is slow, and IRQs are
92 * typically disabled in KVM only when KVM is in a performance critical
93 * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
94 * if this rule is violated, this assertion is purely to flag potential
95 * performance issues. If this fires, consider moving the lookup out
96 * of the hotpath, e.g. by caching information during CPUID updates.
98 lockdep_assert_irqs_enabled();
100 for (i
= 0; i
< nent
; i
++) {
103 if (e
->function
!= function
)
107 * If the index isn't significant, use the first entry with a
108 * matching function. It's userspace's responsibilty to not
109 * provide "duplicate" entries in all cases.
111 if (!(e
->flags
& KVM_CPUID_FLAG_SIGNIFCANT_INDEX
) || e
->index
== index
)
116 * Similarly, use the first matching entry if KVM is doing a
117 * lookup (as opposed to emulating CPUID) for a function that's
118 * architecturally defined as not having a significant index.
120 if (index
== KVM_CPUID_INDEX_NOT_SIGNIFICANT
) {
122 * Direct lookups from KVM should not diverge from what
123 * KVM defines internally (the architectural behavior).
125 WARN_ON_ONCE(cpuid_function_is_indexed(function
));
133 static int kvm_check_cpuid(struct kvm_vcpu
*vcpu
,
134 struct kvm_cpuid_entry2
*entries
,
137 struct kvm_cpuid_entry2
*best
;
141 * The existing code assumes virtual address is 48-bit or 57-bit in the
142 * canonical address checks; exit if it is ever changed.
144 best
= cpuid_entry2_find(entries
, nent
, 0x80000008,
145 KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
147 int vaddr_bits
= (best
->eax
& 0xff00) >> 8;
149 if (vaddr_bits
!= 48 && vaddr_bits
!= 57 && vaddr_bits
!= 0)
154 * Exposing dynamic xfeatures to the guest requires additional
155 * enabling in the FPU, e.g. to expand the guest XSAVE state size.
157 best
= cpuid_entry2_find(entries
, nent
, 0xd, 0);
161 xfeatures
= best
->eax
| ((u64
)best
->edx
<< 32);
162 xfeatures
&= XFEATURE_MASK_USER_DYNAMIC
;
166 return fpu_enable_guest_xfd_features(&vcpu
->arch
.guest_fpu
, xfeatures
);
169 /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
170 static int kvm_cpuid_check_equal(struct kvm_vcpu
*vcpu
, struct kvm_cpuid_entry2
*e2
,
173 struct kvm_cpuid_entry2
*orig
;
176 if (nent
!= vcpu
->arch
.cpuid_nent
)
179 for (i
= 0; i
< nent
; i
++) {
180 orig
= &vcpu
->arch
.cpuid_entries
[i
];
181 if (e2
[i
].function
!= orig
->function
||
182 e2
[i
].index
!= orig
->index
||
183 e2
[i
].flags
!= orig
->flags
||
184 e2
[i
].eax
!= orig
->eax
|| e2
[i
].ebx
!= orig
->ebx
||
185 e2
[i
].ecx
!= orig
->ecx
|| e2
[i
].edx
!= orig
->edx
)
192 static struct kvm_hypervisor_cpuid
kvm_get_hypervisor_cpuid(struct kvm_vcpu
*vcpu
,
195 struct kvm_hypervisor_cpuid cpuid
= {};
196 struct kvm_cpuid_entry2
*entry
;
199 for_each_possible_hypervisor_cpuid_base(base
) {
200 entry
= kvm_find_cpuid_entry(vcpu
, base
);
205 signature
[0] = entry
->ebx
;
206 signature
[1] = entry
->ecx
;
207 signature
[2] = entry
->edx
;
209 if (!memcmp(signature
, sig
, sizeof(signature
))) {
211 cpuid
.limit
= entry
->eax
;
220 static struct kvm_cpuid_entry2
*__kvm_find_kvm_cpuid_features(struct kvm_vcpu
*vcpu
,
221 struct kvm_cpuid_entry2
*entries
, int nent
)
223 u32 base
= vcpu
->arch
.kvm_cpuid
.base
;
228 return cpuid_entry2_find(entries
, nent
, base
| KVM_CPUID_FEATURES
,
229 KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
232 static struct kvm_cpuid_entry2
*kvm_find_kvm_cpuid_features(struct kvm_vcpu
*vcpu
)
234 return __kvm_find_kvm_cpuid_features(vcpu
, vcpu
->arch
.cpuid_entries
,
235 vcpu
->arch
.cpuid_nent
);
238 void kvm_update_pv_runtime(struct kvm_vcpu
*vcpu
)
240 struct kvm_cpuid_entry2
*best
= kvm_find_kvm_cpuid_features(vcpu
);
243 * save the feature bitmap to avoid cpuid lookup for every PV
247 vcpu
->arch
.pv_cpuid
.features
= best
->eax
;
251 * Calculate guest's supported XCR0 taking into account guest CPUID data and
252 * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0).
254 static u64
cpuid_get_supported_xcr0(struct kvm_cpuid_entry2
*entries
, int nent
)
256 struct kvm_cpuid_entry2
*best
;
258 best
= cpuid_entry2_find(entries
, nent
, 0xd, 0);
262 return (best
->eax
| ((u64
)best
->edx
<< 32)) & kvm_caps
.supported_xcr0
;
265 static void __kvm_update_cpuid_runtime(struct kvm_vcpu
*vcpu
, struct kvm_cpuid_entry2
*entries
,
268 struct kvm_cpuid_entry2
*best
;
270 best
= cpuid_entry2_find(entries
, nent
, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
272 /* Update OSXSAVE bit */
273 if (boot_cpu_has(X86_FEATURE_XSAVE
))
274 cpuid_entry_change(best
, X86_FEATURE_OSXSAVE
,
275 kvm_is_cr4_bit_set(vcpu
, X86_CR4_OSXSAVE
));
277 cpuid_entry_change(best
, X86_FEATURE_APIC
,
278 vcpu
->arch
.apic_base
& MSR_IA32_APICBASE_ENABLE
);
281 best
= cpuid_entry2_find(entries
, nent
, 7, 0);
282 if (best
&& boot_cpu_has(X86_FEATURE_PKU
) && best
->function
== 0x7)
283 cpuid_entry_change(best
, X86_FEATURE_OSPKE
,
284 kvm_is_cr4_bit_set(vcpu
, X86_CR4_PKE
));
286 best
= cpuid_entry2_find(entries
, nent
, 0xD, 0);
288 best
->ebx
= xstate_required_size(vcpu
->arch
.xcr0
, false);
290 best
= cpuid_entry2_find(entries
, nent
, 0xD, 1);
291 if (best
&& (cpuid_entry_has(best
, X86_FEATURE_XSAVES
) ||
292 cpuid_entry_has(best
, X86_FEATURE_XSAVEC
)))
293 best
->ebx
= xstate_required_size(vcpu
->arch
.xcr0
, true);
295 best
= __kvm_find_kvm_cpuid_features(vcpu
, entries
, nent
);
296 if (kvm_hlt_in_guest(vcpu
->kvm
) && best
&&
297 (best
->eax
& (1 << KVM_FEATURE_PV_UNHALT
)))
298 best
->eax
&= ~(1 << KVM_FEATURE_PV_UNHALT
);
300 if (!kvm_check_has_quirk(vcpu
->kvm
, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT
)) {
301 best
= cpuid_entry2_find(entries
, nent
, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
303 cpuid_entry_change(best
, X86_FEATURE_MWAIT
,
304 vcpu
->arch
.ia32_misc_enable_msr
&
305 MSR_IA32_MISC_ENABLE_MWAIT
);
309 void kvm_update_cpuid_runtime(struct kvm_vcpu
*vcpu
)
311 __kvm_update_cpuid_runtime(vcpu
, vcpu
->arch
.cpuid_entries
, vcpu
->arch
.cpuid_nent
);
313 EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime
);
315 static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2
*entries
, int nent
)
317 struct kvm_cpuid_entry2
*entry
;
319 entry
= cpuid_entry2_find(entries
, nent
, HYPERV_CPUID_INTERFACE
,
320 KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
321 return entry
&& entry
->eax
== HYPERV_CPUID_SIGNATURE_EAX
;
324 static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu
*vcpu
)
326 struct kvm_lapic
*apic
= vcpu
->arch
.apic
;
327 struct kvm_cpuid_entry2
*best
;
330 BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES
> KVM_MAX_NR_GOVERNED_FEATURES
);
331 bitmap_zero(vcpu
->arch
.governed_features
.enabled
,
332 KVM_MAX_NR_GOVERNED_FEATURES
);
335 * If TDP is enabled, let the guest use GBPAGES if they're supported in
336 * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
337 * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
338 * walk for performance and complexity reasons. Not to mention KVM
339 * _can't_ solve the problem because GVA->GPA walks aren't visible to
340 * KVM once a TDP translation is installed. Mimic hardware behavior so
341 * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
342 * If TDP is disabled, honor *only* guest CPUID as KVM has full control
343 * and can install smaller shadow pages if the host lacks 1GiB support.
345 allow_gbpages
= tdp_enabled
? boot_cpu_has(X86_FEATURE_GBPAGES
) :
346 guest_cpuid_has(vcpu
, X86_FEATURE_GBPAGES
);
348 kvm_governed_feature_set(vcpu
, X86_FEATURE_GBPAGES
);
350 best
= kvm_find_cpuid_entry(vcpu
, 1);
352 if (cpuid_entry_has(best
, X86_FEATURE_TSC_DEADLINE_TIMER
))
353 apic
->lapic_timer
.timer_mode_mask
= 3 << 17;
355 apic
->lapic_timer
.timer_mode_mask
= 1 << 17;
357 kvm_apic_set_version(vcpu
);
360 vcpu
->arch
.guest_supported_xcr0
=
361 cpuid_get_supported_xcr0(vcpu
->arch
.cpuid_entries
, vcpu
->arch
.cpuid_nent
);
364 * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
365 * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
366 * supported by the host.
368 vcpu
->arch
.guest_fpu
.fpstate
->user_xfeatures
= vcpu
->arch
.guest_supported_xcr0
|
371 kvm_update_pv_runtime(vcpu
);
373 vcpu
->arch
.maxphyaddr
= cpuid_query_maxphyaddr(vcpu
);
374 vcpu
->arch
.reserved_gpa_bits
= kvm_vcpu_reserved_gpa_bits_raw(vcpu
);
376 kvm_pmu_refresh(vcpu
);
377 vcpu
->arch
.cr4_guest_rsvd_bits
=
378 __cr4_reserved_bits(guest_cpuid_has
, vcpu
);
380 kvm_hv_set_cpuid(vcpu
, kvm_cpuid_has_hyperv(vcpu
->arch
.cpuid_entries
,
381 vcpu
->arch
.cpuid_nent
));
383 /* Invoke the vendor callback only after the above state is updated. */
384 static_call(kvm_x86_vcpu_after_set_cpuid
)(vcpu
);
387 * Except for the MMU, which needs to do its thing any vendor specific
388 * adjustments to the reserved GPA bits.
390 kvm_mmu_after_set_cpuid(vcpu
);
393 int cpuid_query_maxphyaddr(struct kvm_vcpu
*vcpu
)
395 struct kvm_cpuid_entry2
*best
;
397 best
= kvm_find_cpuid_entry(vcpu
, 0x80000000);
398 if (!best
|| best
->eax
< 0x80000008)
400 best
= kvm_find_cpuid_entry(vcpu
, 0x80000008);
402 return best
->eax
& 0xff;
408 * This "raw" version returns the reserved GPA bits without any adjustments for
409 * encryption technologies that usurp bits. The raw mask should be used if and
410 * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs.
412 u64
kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu
*vcpu
)
414 return rsvd_bits(cpuid_maxphyaddr(vcpu
), 63);
417 static int kvm_set_cpuid(struct kvm_vcpu
*vcpu
, struct kvm_cpuid_entry2
*e2
,
422 __kvm_update_cpuid_runtime(vcpu
, e2
, nent
);
425 * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
426 * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
427 * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
428 * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
429 * the core vCPU model on the fly. It would've been better to forbid any
430 * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
431 * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
432 * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
433 * whether the supplied CPUID data is equal to what's already set.
435 if (kvm_vcpu_has_run(vcpu
)) {
436 r
= kvm_cpuid_check_equal(vcpu
, e2
, nent
);
444 if (kvm_cpuid_has_hyperv(e2
, nent
)) {
445 r
= kvm_hv_vcpu_init(vcpu
);
450 r
= kvm_check_cpuid(vcpu
, e2
, nent
);
454 kvfree(vcpu
->arch
.cpuid_entries
);
455 vcpu
->arch
.cpuid_entries
= e2
;
456 vcpu
->arch
.cpuid_nent
= nent
;
458 vcpu
->arch
.kvm_cpuid
= kvm_get_hypervisor_cpuid(vcpu
, KVM_SIGNATURE
);
459 vcpu
->arch
.xen
.cpuid
= kvm_get_hypervisor_cpuid(vcpu
, XEN_SIGNATURE
);
460 kvm_vcpu_after_set_cpuid(vcpu
);
465 /* when an old userspace process fills a new kernel module */
466 int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu
*vcpu
,
467 struct kvm_cpuid
*cpuid
,
468 struct kvm_cpuid_entry __user
*entries
)
471 struct kvm_cpuid_entry
*e
= NULL
;
472 struct kvm_cpuid_entry2
*e2
= NULL
;
474 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
478 e
= vmemdup_user(entries
, array_size(sizeof(*e
), cpuid
->nent
));
482 e2
= kvmalloc_array(cpuid
->nent
, sizeof(*e2
), GFP_KERNEL_ACCOUNT
);
488 for (i
= 0; i
< cpuid
->nent
; i
++) {
489 e2
[i
].function
= e
[i
].function
;
490 e2
[i
].eax
= e
[i
].eax
;
491 e2
[i
].ebx
= e
[i
].ebx
;
492 e2
[i
].ecx
= e
[i
].ecx
;
493 e2
[i
].edx
= e
[i
].edx
;
496 e2
[i
].padding
[0] = 0;
497 e2
[i
].padding
[1] = 0;
498 e2
[i
].padding
[2] = 0;
501 r
= kvm_set_cpuid(vcpu
, e2
, cpuid
->nent
);
511 int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu
*vcpu
,
512 struct kvm_cpuid2
*cpuid
,
513 struct kvm_cpuid_entry2 __user
*entries
)
515 struct kvm_cpuid_entry2
*e2
= NULL
;
518 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
522 e2
= vmemdup_user(entries
, array_size(sizeof(*e2
), cpuid
->nent
));
527 r
= kvm_set_cpuid(vcpu
, e2
, cpuid
->nent
);
534 int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu
*vcpu
,
535 struct kvm_cpuid2
*cpuid
,
536 struct kvm_cpuid_entry2 __user
*entries
)
538 if (cpuid
->nent
< vcpu
->arch
.cpuid_nent
)
541 if (copy_to_user(entries
, vcpu
->arch
.cpuid_entries
,
542 vcpu
->arch
.cpuid_nent
* sizeof(struct kvm_cpuid_entry2
)))
545 cpuid
->nent
= vcpu
->arch
.cpuid_nent
;
549 /* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
550 static __always_inline
void __kvm_cpu_cap_mask(unsigned int leaf
)
552 const struct cpuid_reg cpuid
= x86_feature_cpuid(leaf
* 32);
553 struct kvm_cpuid_entry2 entry
;
555 reverse_cpuid_check(leaf
);
557 cpuid_count(cpuid
.function
, cpuid
.index
,
558 &entry
.eax
, &entry
.ebx
, &entry
.ecx
, &entry
.edx
);
560 kvm_cpu_caps
[leaf
] &= *__cpuid_entry_get_reg(&entry
, cpuid
.reg
);
563 static __always_inline
564 void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf
, u32 mask
)
566 /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
567 BUILD_BUG_ON(leaf
< NCAPINTS
);
569 kvm_cpu_caps
[leaf
] = mask
;
571 __kvm_cpu_cap_mask(leaf
);
574 static __always_inline
void kvm_cpu_cap_mask(enum cpuid_leafs leaf
, u32 mask
)
576 /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
577 BUILD_BUG_ON(leaf
>= NCAPINTS
);
579 kvm_cpu_caps
[leaf
] &= mask
;
581 __kvm_cpu_cap_mask(leaf
);
584 void kvm_set_cpu_caps(void)
587 unsigned int f_gbpages
= F(GBPAGES
);
588 unsigned int f_lm
= F(LM
);
589 unsigned int f_xfd
= F(XFD
);
591 unsigned int f_gbpages
= 0;
592 unsigned int f_lm
= 0;
593 unsigned int f_xfd
= 0;
595 memset(kvm_cpu_caps
, 0, sizeof(kvm_cpu_caps
));
597 BUILD_BUG_ON(sizeof(kvm_cpu_caps
) - (NKVMCAPINTS
* sizeof(*kvm_cpu_caps
)) >
598 sizeof(boot_cpu_data
.x86_capability
));
600 memcpy(&kvm_cpu_caps
, &boot_cpu_data
.x86_capability
,
601 sizeof(kvm_cpu_caps
) - (NKVMCAPINTS
* sizeof(*kvm_cpu_caps
)));
603 kvm_cpu_cap_mask(CPUID_1_ECX
,
605 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
606 * advertised to guests via CPUID!
608 F(XMM3
) | F(PCLMULQDQ
) | 0 /* DTES64, MONITOR */ |
609 0 /* DS-CPL, VMX, SMX, EST */ |
610 0 /* TM2 */ | F(SSSE3
) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
611 F(FMA
) | F(CX16
) | 0 /* xTPR Update */ | F(PDCM
) |
612 F(PCID
) | 0 /* Reserved, DCA */ | F(XMM4_1
) |
613 F(XMM4_2
) | F(X2APIC
) | F(MOVBE
) | F(POPCNT
) |
614 0 /* Reserved*/ | F(AES
) | F(XSAVE
) | 0 /* OSXSAVE */ | F(AVX
) |
617 /* KVM emulates x2apic in software irrespective of host support. */
618 kvm_cpu_cap_set(X86_FEATURE_X2APIC
);
620 kvm_cpu_cap_mask(CPUID_1_EDX
,
621 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
622 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
623 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SEP
) |
624 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
625 F(PAT
) | F(PSE36
) | 0 /* PSN */ | F(CLFLUSH
) |
626 0 /* Reserved, DS, ACPI */ | F(MMX
) |
627 F(FXSR
) | F(XMM
) | F(XMM2
) | F(SELFSNOOP
) |
628 0 /* HTT, TM, Reserved, PBE */
631 kvm_cpu_cap_mask(CPUID_7_0_EBX
,
632 F(FSGSBASE
) | F(SGX
) | F(BMI1
) | F(HLE
) | F(AVX2
) |
633 F(FDP_EXCPTN_ONLY
) | F(SMEP
) | F(BMI2
) | F(ERMS
) | F(INVPCID
) |
634 F(RTM
) | F(ZERO_FCS_FDS
) | 0 /*MPX*/ | F(AVX512F
) |
635 F(AVX512DQ
) | F(RDSEED
) | F(ADX
) | F(SMAP
) | F(AVX512IFMA
) |
636 F(CLFLUSHOPT
) | F(CLWB
) | 0 /*INTEL_PT*/ | F(AVX512PF
) |
637 F(AVX512ER
) | F(AVX512CD
) | F(SHA_NI
) | F(AVX512BW
) |
640 kvm_cpu_cap_mask(CPUID_7_ECX
,
641 F(AVX512VBMI
) | F(LA57
) | F(PKU
) | 0 /*OSPKE*/ | F(RDPID
) |
642 F(AVX512_VPOPCNTDQ
) | F(UMIP
) | F(AVX512_VBMI2
) | F(GFNI
) |
643 F(VAES
) | F(VPCLMULQDQ
) | F(AVX512_VNNI
) | F(AVX512_BITALG
) |
644 F(CLDEMOTE
) | F(MOVDIRI
) | F(MOVDIR64B
) | 0 /*WAITPKG*/ |
645 F(SGX_LC
) | F(BUS_LOCK_DETECT
)
647 /* Set LA57 based on hardware capability. */
648 if (cpuid_ecx(7) & F(LA57
))
649 kvm_cpu_cap_set(X86_FEATURE_LA57
);
652 * PKU not yet implemented for shadow paging and requires OSPKE
653 * to be set on the host. Clear it if that is not the case
655 if (!tdp_enabled
|| !boot_cpu_has(X86_FEATURE_OSPKE
))
656 kvm_cpu_cap_clear(X86_FEATURE_PKU
);
658 kvm_cpu_cap_mask(CPUID_7_EDX
,
659 F(AVX512_4VNNIW
) | F(AVX512_4FMAPS
) | F(SPEC_CTRL
) |
660 F(SPEC_CTRL_SSBD
) | F(ARCH_CAPABILITIES
) | F(INTEL_STIBP
) |
661 F(MD_CLEAR
) | F(AVX512_VP2INTERSECT
) | F(FSRM
) |
662 F(SERIALIZE
) | F(TSXLDTRK
) | F(AVX512_FP16
) |
663 F(AMX_TILE
) | F(AMX_INT8
) | F(AMX_BF16
) | F(FLUSH_L1D
)
666 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
667 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST
);
668 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES
);
670 if (boot_cpu_has(X86_FEATURE_IBPB
) && boot_cpu_has(X86_FEATURE_IBRS
))
671 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL
);
672 if (boot_cpu_has(X86_FEATURE_STIBP
))
673 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP
);
674 if (boot_cpu_has(X86_FEATURE_AMD_SSBD
))
675 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD
);
677 kvm_cpu_cap_mask(CPUID_7_1_EAX
,
678 F(AVX_VNNI
) | F(AVX512_BF16
) | F(CMPCCXADD
) |
679 F(FZRM
) | F(FSRS
) | F(FSRC
) |
680 F(AMX_FP16
) | F(AVX_IFMA
)
683 kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX
,
684 F(AVX_VNNI_INT8
) | F(AVX_NE_CONVERT
) | F(PREFETCHITI
) |
688 kvm_cpu_cap_mask(CPUID_D_1_EAX
,
689 F(XSAVEOPT
) | F(XSAVEC
) | F(XGETBV1
) | F(XSAVES
) | f_xfd
692 kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX
,
693 SF(SGX1
) | SF(SGX2
) | SF(SGX_EDECCSSA
)
696 kvm_cpu_cap_mask(CPUID_8000_0001_ECX
,
697 F(LAHF_LM
) | F(CMP_LEGACY
) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
698 F(CR8_LEGACY
) | F(ABM
) | F(SSE4A
) | F(MISALIGNSSE
) |
699 F(3DNOWPREFETCH
) | F(OSVW
) | 0 /* IBS */ | F(XOP
) |
700 0 /* SKINIT, WDT, LWP */ | F(FMA4
) | F(TBM
) |
701 F(TOPOEXT
) | 0 /* PERFCTR_CORE */
704 kvm_cpu_cap_mask(CPUID_8000_0001_EDX
,
705 F(FPU
) | F(VME
) | F(DE
) | F(PSE
) |
706 F(TSC
) | F(MSR
) | F(PAE
) | F(MCE
) |
707 F(CX8
) | F(APIC
) | 0 /* Reserved */ | F(SYSCALL
) |
708 F(MTRR
) | F(PGE
) | F(MCA
) | F(CMOV
) |
709 F(PAT
) | F(PSE36
) | 0 /* Reserved */ |
710 F(NX
) | 0 /* Reserved */ | F(MMXEXT
) | F(MMX
) |
711 F(FXSR
) | F(FXSR_OPT
) | f_gbpages
| F(RDTSCP
) |
712 0 /* Reserved */ | f_lm
| F(3DNOWEXT
) | F(3DNOW
)
715 if (!tdp_enabled
&& IS_ENABLED(CONFIG_X86_64
))
716 kvm_cpu_cap_set(X86_FEATURE_GBPAGES
);
718 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0007_EDX
,
722 kvm_cpu_cap_mask(CPUID_8000_0008_EBX
,
723 F(CLZERO
) | F(XSAVEERPTR
) |
724 F(WBNOINVD
) | F(AMD_IBPB
) | F(AMD_IBRS
) | F(AMD_SSBD
) | F(VIRT_SSBD
) |
725 F(AMD_SSB_NO
) | F(AMD_STIBP
) | F(AMD_STIBP_ALWAYS_ON
) |
730 * AMD has separate bits for each SPEC_CTRL bit.
731 * arch/x86/kernel/cpu/bugs.c is kind enough to
732 * record that in cpufeatures so use them.
734 if (boot_cpu_has(X86_FEATURE_IBPB
))
735 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB
);
736 if (boot_cpu_has(X86_FEATURE_IBRS
))
737 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS
);
738 if (boot_cpu_has(X86_FEATURE_STIBP
))
739 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP
);
740 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
))
741 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD
);
742 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
743 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO
);
745 * The preference is to use SPEC CTRL MSR instead of the
748 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
749 !boot_cpu_has(X86_FEATURE_AMD_SSBD
))
750 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD
);
753 * Hide all SVM features by default, SVM will set the cap bits for
754 * features it emulates and/or exposes for L1.
756 kvm_cpu_cap_mask(CPUID_8000_000A_EDX
, 0);
758 kvm_cpu_cap_mask(CPUID_8000_001F_EAX
,
759 0 /* SME */ | F(SEV
) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES
) |
762 kvm_cpu_cap_mask(CPUID_8000_0021_EAX
,
763 F(NO_NESTED_DATA_BP
) | F(LFENCE_RDTSC
) | 0 /* SmmPgCfgLock */ |
764 F(NULL_SEL_CLR_BASE
) | F(AUTOIBRS
) | 0 /* PrefetchCtlMsr */
767 if (cpu_feature_enabled(X86_FEATURE_SRSO_NO
))
768 kvm_cpu_cap_set(X86_FEATURE_SRSO_NO
);
770 kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX
,
775 * Synthesize "LFENCE is serializing" into the AMD-defined entry in
776 * KVM's supported CPUID if the feature is reported as supported by the
777 * kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
778 * before AMD joined the bandwagon, e.g. LFENCE is serializing on most
779 * CPUs that support SSE2. On CPUs that don't support AMD's leaf,
780 * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
781 * the mask with the raw host CPUID, and reporting support in AMD's
782 * leaf can make it easier for userspace to detect the feature.
784 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC
))
785 kvm_cpu_cap_set(X86_FEATURE_LFENCE_RDTSC
);
786 if (!static_cpu_has_bug(X86_BUG_NULL_SEG
))
787 kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE
);
788 kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR
);
790 kvm_cpu_cap_mask(CPUID_C000_0001_EDX
,
791 F(XSTORE
) | F(XSTORE_EN
) | F(XCRYPT
) | F(XCRYPT_EN
) |
792 F(ACE2
) | F(ACE2_EN
) | F(PHE
) | F(PHE_EN
) |
797 * Hide RDTSCP and RDPID if either feature is reported as supported but
798 * probing MSR_TSC_AUX failed. This is purely a sanity check and
799 * should never happen, but the guest will likely crash if RDTSCP or
800 * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
801 * the past. For example, the sanity check may fire if this instance of
802 * KVM is running as L1 on top of an older, broken KVM.
804 if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP
) ||
805 kvm_cpu_cap_has(X86_FEATURE_RDPID
)) &&
806 !kvm_is_supported_user_return_msr(MSR_TSC_AUX
))) {
807 kvm_cpu_cap_clear(X86_FEATURE_RDTSCP
);
808 kvm_cpu_cap_clear(X86_FEATURE_RDPID
);
811 EXPORT_SYMBOL_GPL(kvm_set_cpu_caps
);
813 struct kvm_cpuid_array
{
814 struct kvm_cpuid_entry2
*entries
;
819 static struct kvm_cpuid_entry2
*get_next_cpuid(struct kvm_cpuid_array
*array
)
821 if (array
->nent
>= array
->maxnent
)
824 return &array
->entries
[array
->nent
++];
827 static struct kvm_cpuid_entry2
*do_host_cpuid(struct kvm_cpuid_array
*array
,
828 u32 function
, u32 index
)
830 struct kvm_cpuid_entry2
*entry
= get_next_cpuid(array
);
835 memset(entry
, 0, sizeof(*entry
));
836 entry
->function
= function
;
837 entry
->index
= index
;
838 switch (function
& 0xC0000000) {
840 /* Hypervisor leaves are always synthesized by __do_cpuid_func. */
845 * 0x80000021 is sometimes synthesized by __do_cpuid_func, which
846 * would result in out-of-bounds calls to do_host_cpuid.
849 static int max_cpuid_80000000
;
850 if (!READ_ONCE(max_cpuid_80000000
))
851 WRITE_ONCE(max_cpuid_80000000
, cpuid_eax(0x80000000));
852 if (function
> READ_ONCE(max_cpuid_80000000
))
861 cpuid_count(entry
->function
, entry
->index
,
862 &entry
->eax
, &entry
->ebx
, &entry
->ecx
, &entry
->edx
);
864 if (cpuid_function_is_indexed(function
))
865 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
870 static int __do_cpuid_func_emulated(struct kvm_cpuid_array
*array
, u32 func
)
872 struct kvm_cpuid_entry2
*entry
;
874 if (array
->nent
>= array
->maxnent
)
877 entry
= &array
->entries
[array
->nent
];
878 entry
->function
= func
;
888 entry
->ecx
= F(MOVBE
);
892 entry
->flags
|= KVM_CPUID_FLAG_SIGNIFCANT_INDEX
;
894 if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP
))
895 entry
->ecx
= F(RDPID
);
905 static inline int __do_cpuid_func(struct kvm_cpuid_array
*array
, u32 function
)
907 struct kvm_cpuid_entry2
*entry
;
910 /* all calls to cpuid_count() should be made on the same cpu */
915 entry
= do_host_cpuid(array
, function
, 0);
921 /* Limited to the highest leaf implemented in KVM. */
922 entry
->eax
= min(entry
->eax
, 0x1fU
);
925 cpuid_entry_override(entry
, CPUID_1_EDX
);
926 cpuid_entry_override(entry
, CPUID_1_ECX
);
930 * On ancient CPUs, function 2 entries are STATEFUL. That is,
931 * CPUID(function=2, index=0) may return different results each
932 * time, with the least-significant byte in EAX enumerating the
933 * number of times software should do CPUID(2, 0).
935 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
936 * idiotic. Intel's SDM states that EAX & 0xff "will always
937 * return 01H. Software should ignore this value and not
938 * interpret it as an informational descriptor", while AMD's
939 * APM states that CPUID(2) is reserved.
941 * WARN if a frankenstein CPU that supports virtualization and
942 * a stateful CPUID.0x2 is encountered.
944 WARN_ON_ONCE((entry
->eax
& 0xff) > 1);
946 /* functions 4 and 0x8000001d have additional index. */
950 * Read entries until the cache type in the previous entry is
951 * zero, i.e. indicates an invalid entry.
953 for (i
= 1; entry
->eax
& 0x1f; ++i
) {
954 entry
= do_host_cpuid(array
, function
, i
);
959 case 6: /* Thermal management */
960 entry
->eax
= 0x4; /* allow ARAT */
965 /* function 7 has additional index. */
967 entry
->eax
= min(entry
->eax
, 1u);
968 cpuid_entry_override(entry
, CPUID_7_0_EBX
);
969 cpuid_entry_override(entry
, CPUID_7_ECX
);
970 cpuid_entry_override(entry
, CPUID_7_EDX
);
972 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
973 if (entry
->eax
== 1) {
974 entry
= do_host_cpuid(array
, function
, 1);
978 cpuid_entry_override(entry
, CPUID_7_1_EAX
);
979 cpuid_entry_override(entry
, CPUID_7_1_EDX
);
984 case 0xa: { /* Architectural Performance Monitoring */
985 union cpuid10_eax eax
;
986 union cpuid10_edx edx
;
988 if (!enable_pmu
|| !static_cpu_has(X86_FEATURE_ARCH_PERFMON
)) {
989 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
993 eax
.split
.version_id
= kvm_pmu_cap
.version
;
994 eax
.split
.num_counters
= kvm_pmu_cap
.num_counters_gp
;
995 eax
.split
.bit_width
= kvm_pmu_cap
.bit_width_gp
;
996 eax
.split
.mask_length
= kvm_pmu_cap
.events_mask_len
;
997 edx
.split
.num_counters_fixed
= kvm_pmu_cap
.num_counters_fixed
;
998 edx
.split
.bit_width_fixed
= kvm_pmu_cap
.bit_width_fixed
;
1000 if (kvm_pmu_cap
.version
)
1001 edx
.split
.anythread_deprecated
= 1;
1002 edx
.split
.reserved1
= 0;
1003 edx
.split
.reserved2
= 0;
1005 entry
->eax
= eax
.full
;
1006 entry
->ebx
= kvm_pmu_cap
.events_mask
;
1008 entry
->edx
= edx
.full
;
1014 * No topology; a valid topology is indicated by the presence
1017 entry
->eax
= entry
->ebx
= entry
->ecx
= 0;
1020 u64 permitted_xcr0
= kvm_get_filtered_xcr0();
1021 u64 permitted_xss
= kvm_caps
.supported_xss
;
1023 entry
->eax
&= permitted_xcr0
;
1024 entry
->ebx
= xstate_required_size(permitted_xcr0
, false);
1025 entry
->ecx
= entry
->ebx
;
1026 entry
->edx
&= permitted_xcr0
>> 32;
1027 if (!permitted_xcr0
)
1030 entry
= do_host_cpuid(array
, function
, 1);
1034 cpuid_entry_override(entry
, CPUID_D_1_EAX
);
1035 if (entry
->eax
& (F(XSAVES
)|F(XSAVEC
)))
1036 entry
->ebx
= xstate_required_size(permitted_xcr0
| permitted_xss
,
1039 WARN_ON_ONCE(permitted_xss
!= 0);
1042 entry
->ecx
&= permitted_xss
;
1043 entry
->edx
&= permitted_xss
>> 32;
1045 for (i
= 2; i
< 64; ++i
) {
1047 if (permitted_xcr0
& BIT_ULL(i
))
1049 else if (permitted_xss
& BIT_ULL(i
))
1054 entry
= do_host_cpuid(array
, function
, i
);
1059 * The supported check above should have filtered out
1060 * invalid sub-leafs. Only valid sub-leafs should
1061 * reach this point, and they should have a non-zero
1062 * save state size. Furthermore, check whether the
1063 * processor agrees with permitted_xcr0/permitted_xss
1064 * on whether this is an XCR0- or IA32_XSS-managed area.
1066 if (WARN_ON_ONCE(!entry
->eax
|| (entry
->ecx
& 0x1) != s_state
)) {
1071 if (!kvm_cpu_cap_has(X86_FEATURE_XFD
))
1072 entry
->ecx
&= ~BIT_ULL(2);
1079 if (!kvm_cpu_cap_has(X86_FEATURE_SGX
)) {
1080 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1085 * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
1086 * and max enclave sizes. The SGX sub-features and MISCSELECT
1087 * are restricted by kernel and KVM capabilities (like most
1088 * feature flags), while enclave size is unrestricted.
1090 cpuid_entry_override(entry
, CPUID_12_EAX
);
1091 entry
->ebx
&= SGX_MISC_EXINFO
;
1093 entry
= do_host_cpuid(array
, function
, 1);
1098 * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
1099 * feature flags. Advertise all supported flags, including
1100 * privileged attributes that require explicit opt-in from
1101 * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
1102 * expected to derive it from supported XCR0.
1104 entry
->eax
&= SGX_ATTR_PRIV_MASK
| SGX_ATTR_UNPRIV_MASK
;
1109 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT
)) {
1110 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1114 for (i
= 1, max_idx
= entry
->eax
; i
<= max_idx
; ++i
) {
1115 if (!do_host_cpuid(array
, function
, i
))
1119 /* Intel AMX TILE */
1121 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE
)) {
1122 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1126 for (i
= 1, max_idx
= entry
->eax
; i
<= max_idx
; ++i
) {
1127 if (!do_host_cpuid(array
, function
, i
))
1131 case 0x1e: /* TMUL information */
1132 if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE
)) {
1133 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1137 case KVM_CPUID_SIGNATURE
: {
1138 const u32
*sigptr
= (const u32
*)KVM_SIGNATURE
;
1139 entry
->eax
= KVM_CPUID_FEATURES
;
1140 entry
->ebx
= sigptr
[0];
1141 entry
->ecx
= sigptr
[1];
1142 entry
->edx
= sigptr
[2];
1145 case KVM_CPUID_FEATURES
:
1146 entry
->eax
= (1 << KVM_FEATURE_CLOCKSOURCE
) |
1147 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
1148 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
1149 (1 << KVM_FEATURE_ASYNC_PF
) |
1150 (1 << KVM_FEATURE_PV_EOI
) |
1151 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
) |
1152 (1 << KVM_FEATURE_PV_UNHALT
) |
1153 (1 << KVM_FEATURE_PV_TLB_FLUSH
) |
1154 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT
) |
1155 (1 << KVM_FEATURE_PV_SEND_IPI
) |
1156 (1 << KVM_FEATURE_POLL_CONTROL
) |
1157 (1 << KVM_FEATURE_PV_SCHED_YIELD
) |
1158 (1 << KVM_FEATURE_ASYNC_PF_INT
);
1160 if (sched_info_on())
1161 entry
->eax
|= (1 << KVM_FEATURE_STEAL_TIME
);
1168 entry
->eax
= min(entry
->eax
, 0x80000022);
1170 * Serializing LFENCE is reported in a multitude of ways, and
1171 * NullSegClearsBase is not reported in CPUID on Zen2; help
1172 * userspace by providing the CPUID leaf ourselves.
1174 * However, only do it if the host has CPUID leaf 0x8000001d.
1175 * QEMU thinks that it can query the host blindly for that
1176 * CPUID leaf if KVM reports that it supports 0x8000001d or
1177 * above. The processor merrily returns values from the
1178 * highest Intel leaf which QEMU tries to use as the guest's
1179 * 0x8000001d. Even worse, this can result in an infinite
1180 * loop if said highest leaf has no subleaves indexed by ECX.
1182 if (entry
->eax
>= 0x8000001d &&
1183 (static_cpu_has(X86_FEATURE_LFENCE_RDTSC
)
1184 || !static_cpu_has_bug(X86_BUG_NULL_SEG
)))
1185 entry
->eax
= max(entry
->eax
, 0x80000021);
1188 entry
->ebx
&= ~GENMASK(27, 16);
1189 cpuid_entry_override(entry
, CPUID_8000_0001_EDX
);
1190 cpuid_entry_override(entry
, CPUID_8000_0001_ECX
);
1193 /* Pass host L1 cache and TLB info. */
1196 /* Drop reserved bits, pass host L2 cache and TLB info. */
1197 entry
->edx
&= ~GENMASK(17, 16);
1199 case 0x80000007: /* Advanced power management */
1200 cpuid_entry_override(entry
, CPUID_8000_0007_EDX
);
1202 /* mask against host */
1203 entry
->edx
&= boot_cpu_data
.x86_power
;
1204 entry
->eax
= entry
->ebx
= entry
->ecx
= 0;
1207 unsigned g_phys_as
= (entry
->eax
>> 16) & 0xff;
1208 unsigned virt_as
= max((entry
->eax
>> 8) & 0xff, 48U);
1209 unsigned phys_as
= entry
->eax
& 0xff;
1212 * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
1213 * the guest operates in the same PA space as the host, i.e.
1214 * reductions in MAXPHYADDR for memory encryption affect shadow
1217 * If TDP is enabled but an explicit guest MAXPHYADDR is not
1218 * provided, use the raw bare metal MAXPHYADDR as reductions to
1219 * the HPAs do not affect GPAs.
1222 g_phys_as
= boot_cpu_data
.x86_phys_bits
;
1223 else if (!g_phys_as
)
1224 g_phys_as
= phys_as
;
1226 entry
->eax
= g_phys_as
| (virt_as
<< 8);
1227 entry
->ecx
&= ~(GENMASK(31, 16) | GENMASK(11, 8));
1229 cpuid_entry_override(entry
, CPUID_8000_0008_EBX
);
1233 if (!kvm_cpu_cap_has(X86_FEATURE_SVM
)) {
1234 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1237 entry
->eax
= 1; /* SVM revision 1 */
1238 entry
->ebx
= 8; /* Lets support 8 ASIDs in case we add proper
1239 ASID emulation to nested SVM */
1240 entry
->ecx
= 0; /* Reserved */
1241 cpuid_entry_override(entry
, CPUID_8000_000A_EDX
);
1244 entry
->ecx
= entry
->edx
= 0;
1247 entry
->eax
&= GENMASK(2, 0);
1248 entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1251 /* Do not return host topology information. */
1252 entry
->eax
= entry
->ebx
= entry
->ecx
= 0;
1253 entry
->edx
= 0; /* reserved */
1256 if (!kvm_cpu_cap_has(X86_FEATURE_SEV
)) {
1257 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1259 cpuid_entry_override(entry
, CPUID_8000_001F_EAX
);
1260 /* Clear NumVMPL since KVM does not support VMPL. */
1261 entry
->ebx
&= ~GENMASK(31, 12);
1263 * Enumerate '0' for "PA bits reduction", the adjusted
1264 * MAXPHYADDR is enumerated directly (see 0x80000008).
1266 entry
->ebx
&= ~GENMASK(11, 6);
1270 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1273 entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1274 cpuid_entry_override(entry
, CPUID_8000_0021_EAX
);
1276 /* AMD Extended Performance Monitoring and Debug */
1278 union cpuid_0x80000022_ebx ebx
;
1280 entry
->ecx
= entry
->edx
= 0;
1281 if (!enable_pmu
|| !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2
)) {
1282 entry
->eax
= entry
->ebx
;
1286 cpuid_entry_override(entry
, CPUID_8000_0022_EAX
);
1288 if (kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2
))
1289 ebx
.split
.num_core_pmc
= kvm_pmu_cap
.num_counters_gp
;
1290 else if (kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE
))
1291 ebx
.split
.num_core_pmc
= AMD64_NUM_COUNTERS_CORE
;
1293 ebx
.split
.num_core_pmc
= AMD64_NUM_COUNTERS
;
1295 entry
->ebx
= ebx
.full
;
1298 /*Add support for Centaur's CPUID instruction*/
1300 /*Just support up to 0xC0000004 now*/
1301 entry
->eax
= min(entry
->eax
, 0xC0000004);
1304 cpuid_entry_override(entry
, CPUID_C000_0001_EDX
);
1306 case 3: /* Processor serial number */
1307 case 5: /* MONITOR/MWAIT */
1312 entry
->eax
= entry
->ebx
= entry
->ecx
= entry
->edx
= 0;
1324 static int do_cpuid_func(struct kvm_cpuid_array
*array
, u32 func
,
1327 if (type
== KVM_GET_EMULATED_CPUID
)
1328 return __do_cpuid_func_emulated(array
, func
);
1330 return __do_cpuid_func(array
, func
);
1333 #define CENTAUR_CPUID_SIGNATURE 0xC0000000
1335 static int get_cpuid_func(struct kvm_cpuid_array
*array
, u32 func
,
1341 if (func
== CENTAUR_CPUID_SIGNATURE
&&
1342 boot_cpu_data
.x86_vendor
!= X86_VENDOR_CENTAUR
)
1345 r
= do_cpuid_func(array
, func
, type
);
1349 limit
= array
->entries
[array
->nent
- 1].eax
;
1350 for (func
= func
+ 1; func
<= limit
; ++func
) {
1351 r
= do_cpuid_func(array
, func
, type
);
1359 static bool sanity_check_entries(struct kvm_cpuid_entry2 __user
*entries
,
1360 __u32 num_entries
, unsigned int ioctl_type
)
1365 if (ioctl_type
!= KVM_GET_EMULATED_CPUID
)
1369 * We want to make sure that ->padding is being passed clean from
1370 * userspace in case we want to use it for something in the future.
1372 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
1373 * have to give ourselves satisfied only with the emulated side. /me
1376 for (i
= 0; i
< num_entries
; i
++) {
1377 if (copy_from_user(pad
, entries
[i
].padding
, sizeof(pad
)))
1380 if (pad
[0] || pad
[1] || pad
[2])
1386 int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2
*cpuid
,
1387 struct kvm_cpuid_entry2 __user
*entries
,
1390 static const u32 funcs
[] = {
1391 0, 0x80000000, CENTAUR_CPUID_SIGNATURE
, KVM_CPUID_SIGNATURE
,
1394 struct kvm_cpuid_array array
= {
1399 if (cpuid
->nent
< 1)
1401 if (cpuid
->nent
> KVM_MAX_CPUID_ENTRIES
)
1402 cpuid
->nent
= KVM_MAX_CPUID_ENTRIES
;
1404 if (sanity_check_entries(entries
, cpuid
->nent
, type
))
1407 array
.entries
= kvcalloc(cpuid
->nent
, sizeof(struct kvm_cpuid_entry2
), GFP_KERNEL
);
1411 array
.maxnent
= cpuid
->nent
;
1413 for (i
= 0; i
< ARRAY_SIZE(funcs
); i
++) {
1414 r
= get_cpuid_func(&array
, funcs
[i
], type
);
1418 cpuid
->nent
= array
.nent
;
1420 if (copy_to_user(entries
, array
.entries
,
1421 array
.nent
* sizeof(struct kvm_cpuid_entry2
)))
1425 kvfree(array
.entries
);
1429 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry_index(struct kvm_vcpu
*vcpu
,
1430 u32 function
, u32 index
)
1432 return cpuid_entry2_find(vcpu
->arch
.cpuid_entries
, vcpu
->arch
.cpuid_nent
,
1435 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index
);
1437 struct kvm_cpuid_entry2
*kvm_find_cpuid_entry(struct kvm_vcpu
*vcpu
,
1440 return cpuid_entry2_find(vcpu
->arch
.cpuid_entries
, vcpu
->arch
.cpuid_nent
,
1441 function
, KVM_CPUID_INDEX_NOT_SIGNIFICANT
);
1443 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry
);
1446 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
1447 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
1448 * returns all zeroes for any undefined leaf, whether or not the leaf is in
1449 * range. Centaur/VIA follows Intel semantics.
1451 * A leaf is considered out-of-range if its function is higher than the maximum
1452 * supported leaf of its associated class or if its associated class does not
1455 * There are three primary classes to be considered, with their respective
1456 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
1457 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
1458 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
1460 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
1461 * - Hypervisor: 0x40000000 - 0x4fffffff
1462 * - Extended: 0x80000000 - 0xbfffffff
1463 * - Centaur: 0xc0000000 - 0xcfffffff
1465 * The Hypervisor class is further subdivided into sub-classes that each act as
1466 * their own independent class associated with a 0x100 byte range. E.g. if Qemu
1467 * is advertising support for both HyperV and KVM, the resulting Hypervisor
1468 * CPUID sub-classes are:
1470 * - HyperV: 0x40000000 - 0x400000ff
1471 * - KVM: 0x40000100 - 0x400001ff
1473 static struct kvm_cpuid_entry2
*
1474 get_out_of_range_cpuid_entry(struct kvm_vcpu
*vcpu
, u32
*fn_ptr
, u32 index
)
1476 struct kvm_cpuid_entry2
*basic
, *class;
1477 u32 function
= *fn_ptr
;
1479 basic
= kvm_find_cpuid_entry(vcpu
, 0);
1483 if (is_guest_vendor_amd(basic
->ebx
, basic
->ecx
, basic
->edx
) ||
1484 is_guest_vendor_hygon(basic
->ebx
, basic
->ecx
, basic
->edx
))
1487 if (function
>= 0x40000000 && function
<= 0x4fffffff)
1488 class = kvm_find_cpuid_entry(vcpu
, function
& 0xffffff00);
1489 else if (function
>= 0xc0000000)
1490 class = kvm_find_cpuid_entry(vcpu
, 0xc0000000);
1492 class = kvm_find_cpuid_entry(vcpu
, function
& 0x80000000);
1494 if (class && function
<= class->eax
)
1498 * Leaf specific adjustments are also applied when redirecting to the
1499 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1500 * entry for CPUID.0xb.index (see below), then the output value for EDX
1501 * needs to be pulled from CPUID.0xb.1.
1503 *fn_ptr
= basic
->eax
;
1506 * The class does not exist or the requested function is out of range;
1507 * the effective CPUID entry is the max basic leaf. Note, the index of
1508 * the original requested leaf is observed!
1510 return kvm_find_cpuid_entry_index(vcpu
, basic
->eax
, index
);
1513 bool kvm_cpuid(struct kvm_vcpu
*vcpu
, u32
*eax
, u32
*ebx
,
1514 u32
*ecx
, u32
*edx
, bool exact_only
)
1516 u32 orig_function
= *eax
, function
= *eax
, index
= *ecx
;
1517 struct kvm_cpuid_entry2
*entry
;
1518 bool exact
, used_max_basic
= false;
1520 entry
= kvm_find_cpuid_entry_index(vcpu
, function
, index
);
1523 if (!entry
&& !exact_only
) {
1524 entry
= get_out_of_range_cpuid_entry(vcpu
, &function
, index
);
1525 used_max_basic
= !!entry
;
1533 if (function
== 7 && index
== 0) {
1535 if (!__kvm_get_msr(vcpu
, MSR_IA32_TSX_CTRL
, &data
, true) &&
1536 (data
& TSX_CTRL_CPUID_CLEAR
))
1537 *ebx
&= ~(F(RTM
) | F(HLE
));
1538 } else if (function
== 0x80000007) {
1539 if (kvm_hv_invtsc_suppressed(vcpu
))
1540 *edx
&= ~SF(CONSTANT_TSC
);
1543 *eax
= *ebx
= *ecx
= *edx
= 0;
1545 * When leaf 0BH or 1FH is defined, CL is pass-through
1546 * and EDX is always the x2APIC ID, even for undefined
1547 * subleaves. Index 1 will exist iff the leaf is
1548 * implemented, so we pass through CL iff leaf 1
1549 * exists. EDX can be copied from any existing index.
1551 if (function
== 0xb || function
== 0x1f) {
1552 entry
= kvm_find_cpuid_entry_index(vcpu
, function
, 1);
1554 *ecx
= index
& 0xff;
1559 trace_kvm_cpuid(orig_function
, index
, *eax
, *ebx
, *ecx
, *edx
, exact
,
1563 EXPORT_SYMBOL_GPL(kvm_cpuid
);
1565 int kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
1567 u32 eax
, ebx
, ecx
, edx
;
1569 if (cpuid_fault_enabled(vcpu
) && !kvm_require_cpl(vcpu
, 0))
1572 eax
= kvm_rax_read(vcpu
);
1573 ecx
= kvm_rcx_read(vcpu
);
1574 kvm_cpuid(vcpu
, &eax
, &ebx
, &ecx
, &edx
, false);
1575 kvm_rax_write(vcpu
, eax
);
1576 kvm_rbx_write(vcpu
, ebx
);
1577 kvm_rcx_write(vcpu
, ecx
);
1578 kvm_rdx_write(vcpu
, edx
);
1579 return kvm_skip_emulated_instruction(vcpu
);
1581 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);