CPUID_8000_001F_EAX,
CPUID_8000_0021_EAX,
CPUID_LNX_5,
+ CPUID_8000_0021_ECX,
NR_CPUID_WORDS,
};
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \
REQUIRED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 23))
#define DISABLED_MASK_BIT_SET(feature_bit) \
( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \
CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \
DISABLED_MASK_CHECK || \
- BUILD_BUG_ON_ZERO(NCAPINTS != 22))
+ BUILD_BUG_ON_ZERO(NCAPINTS != 23))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 22 /* N 32-bit words worth of info */
+#define NCAPINTS 23 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */
/*
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
-#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
-#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
-#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
+#define X86_FEATURE_TSA_SQ_NO (22*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
+#define X86_FEATURE_TSA_L1_NO (22*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
+#define X86_FEATURE_CLEAR_CPU_BUF_VM (22*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
/*
* BUG word(s)
#define DISABLED_MASK19 0
#define DISABLED_MASK20 0
#define DISABLED_MASK21 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
#define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0
#define REQUIRED_MASK21 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
- { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
- { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ 0, 0, 0, 0, 0 }
};
*/
kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
+ if (cpu_feature_enabled(X86_FEATURE_VERW_CLEAR))
+ kvm_cpu_cap_set(X86_FEATURE_VERW_CLEAR);
+
+ if (cpu_feature_enabled(X86_FEATURE_TSA_SQ_NO))
+ kvm_cpu_cap_set(X86_FEATURE_TSA_SQ_NO);
+
+ if (cpu_feature_enabled(X86_FEATURE_TSA_L1_NO))
+ kvm_cpu_cap_set(X86_FEATURE_TSA_L1_NO);
+
kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
case 0x80000021:
- entry->ebx = entry->ecx = entry->edx = 0;
+ entry->ebx = entry->edx = 0;
/*
* Pass down these bits:
* EAX 0 NNDBP, Processor ignores nested data breakpoints
* EAX 2 LAS, LFENCE always serializing
+ * EAX 5 VERW_CLEAR, mitigate TSA
* EAX 6 NSCB, Null selector clear base
*
* Other defined bits are for MSRs that KVM does not expose:
* EAX 3 SPCL, SMM page configuration lock
* EAX 13 PCMSR, Prefetch control MSR
*/
- entry->eax &= BIT(0) | BIT(2) | BIT(6);
+ cpuid_entry_override(entry, CPUID_8000_0021_EAX);
+ entry->eax &= BIT(0) | BIT(2) | BIT(5) | BIT(6);
+ cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
[CPUID_7_EDX] = { 7, 0, CPUID_EDX},
[CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
+ [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
};
/*