static __always_inline
void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask)
{
- /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */
+ /* Use kvm_cpu_cap_init for leafs that aren't KVM-only. */
BUILD_BUG_ON(leaf < NCAPINTS);
kvm_cpu_caps[leaf] = mask;
__kvm_cpu_cap_mask(leaf);
}
-static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
+static __always_inline void kvm_cpu_cap_init(enum cpuid_leafs leaf, u32 mask)
{
/* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */
BUILD_BUG_ON(leaf >= NCAPINTS);
memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
- kvm_cpu_cap_mask(CPUID_1_ECX,
+ kvm_cpu_cap_init(CPUID_1_ECX,
F(XMM3) |
F(PCLMULQDQ) |
0 /* DTES64 */ |
/* KVM emulates x2apic in software irrespective of host support. */
kvm_cpu_cap_set(X86_FEATURE_X2APIC);
- kvm_cpu_cap_mask(CPUID_1_EDX,
+ kvm_cpu_cap_init(CPUID_1_EDX,
F(FPU) |
F(VME) |
F(DE) |
0 /* HTT, TM, Reserved, PBE */
);
- kvm_cpu_cap_mask(CPUID_7_0_EBX,
+ kvm_cpu_cap_init(CPUID_7_0_EBX,
F(FSGSBASE) |
F(SGX) |
F(BMI1) |
F(AVX512BW) |
F(AVX512VL));
- kvm_cpu_cap_mask(CPUID_7_ECX,
+ kvm_cpu_cap_init(CPUID_7_ECX,
F(AVX512VBMI) |
F(LA57) |
F(PKU) |
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
kvm_cpu_cap_clear(X86_FEATURE_PKU);
- kvm_cpu_cap_mask(CPUID_7_EDX,
+ kvm_cpu_cap_init(CPUID_7_EDX,
F(AVX512_4VNNIW) |
F(AVX512_4FMAPS) |
F(SPEC_CTRL) |
if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
- kvm_cpu_cap_mask(CPUID_7_1_EAX,
+ kvm_cpu_cap_init(CPUID_7_1_EAX,
F(SHA512) |
F(SM3) |
F(SM4) |
F(MCDT_NO)
);
- kvm_cpu_cap_mask(CPUID_D_1_EAX,
+ kvm_cpu_cap_init(CPUID_D_1_EAX,
F(XSAVEOPT) |
F(XSAVEC) |
F(XGETBV1) |
F(AVX10_512)
);
- kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
+ kvm_cpu_cap_init(CPUID_8000_0001_ECX,
F(LAHF_LM) |
F(CMP_LEGACY) |
0 /*SVM*/ |
0 /* PERFCTR_CORE */
);
- kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
+ kvm_cpu_cap_init(CPUID_8000_0001_EDX,
F(FPU) |
F(VME) |
F(DE) |
SF(CONSTANT_TSC)
);
- kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
+ kvm_cpu_cap_init(CPUID_8000_0008_EBX,
F(CLZERO) |
F(XSAVEERPTR) |
F(WBNOINVD) |
* Hide all SVM features by default, SVM will set the cap bits for
* features it emulates and/or exposes for L1.
*/
- kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
+ kvm_cpu_cap_init(CPUID_8000_000A_EDX, 0);
- kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
+ kvm_cpu_cap_init(CPUID_8000_001F_EAX,
0 /* SME */ |
0 /* SEV */ |
0 /* VM_PAGE_FLUSH */ |
F(SME_COHERENT)
);
- kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
+ kvm_cpu_cap_init(CPUID_8000_0021_EAX,
F(NO_NESTED_DATA_BP) |
F(LFENCE_RDTSC) |
0 /* SmmPgCfgLock */ |
* kernel. LFENCE_RDTSC was a Linux-defined synthetic feature long
* before AMD joined the bandwagon, e.g. LFENCE is serializing on most
* CPUs that support SSE2. On CPUs that don't support AMD's leaf,
- * kvm_cpu_cap_mask() will unfortunately drop the flag due to ANDing
+ * kvm_cpu_cap_init() will unfortunately drop the flag due to ANDing
* the mask with the raw host CPUID, and reporting support in AMD's
* leaf can make it easier for userspace to detect the feature.
*/
kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE);
kvm_cpu_cap_set(X86_FEATURE_NO_SMM_CTL_MSR);
- kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
+ kvm_cpu_cap_init(CPUID_C000_0001_EDX,
F(XSTORE) |
F(XSTORE_EN) |
F(XCRYPT) |