+++ /dev/null
-From 6df764c77bf53fde074bac365d16ca7b0f47a889 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 18 Feb 2025 12:13:33 +0100
-Subject: x86/bugs: KVM: Add support for SRSO_MSR_FIX
-
-From: Borislav Petkov <bp@alien8.de>
-
-[ Upstream commit 8442df2b49ed9bcd67833ad4f091d15ac91efd00 ]
-
-Add support for
-
- CPUID Fn8000_0021_EAX[31] (SRSO_MSR_FIX). If this bit is 1, it
- indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate
- SRSO.
-
-Enable BpSpecReduce to mitigate SRSO across guest/host boundaries.
-
-Switch back to enabling the bit when virtualization is enabled and to
-clear the bit when virtualization is disabled because using a MSR slot
-would clear the bit when the guest is exited and any training the guest
-has done, would potentially influence the host kernel when execution
-enters the kernel and hasn't VMRUN the guest yet.
-
-More detail on the public thread in Link below.
-
-Co-developed-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
-Link: https://lore.kernel.org/r/20241202120416.6054-1-bp@kernel.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- Documentation/admin-guide/hw-vuln/srso.rst | 13 ++++++++++++
- arch/x86/include/asm/cpufeatures.h | 4 ++++
- arch/x86/include/asm/msr-index.h | 1 +
- arch/x86/kernel/cpu/bugs.c | 24 ++++++++++++++++++----
- arch/x86/kvm/svm/svm.c | 6 ++++++
- arch/x86/lib/msr.c | 2 ++
- 6 files changed, 46 insertions(+), 4 deletions(-)
-
-diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
-index 2ad1c05b8c883..66af95251a3d1 100644
---- a/Documentation/admin-guide/hw-vuln/srso.rst
-+++ b/Documentation/admin-guide/hw-vuln/srso.rst
-@@ -104,7 +104,20 @@ The possible values in this file are:
-
- (spec_rstack_overflow=ibpb-vmexit)
-
-+ * 'Mitigation: Reduced Speculation':
-
-+ This mitigation gets automatically enabled when the above one "IBPB on
-+ VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
-+
-+ It gets automatically enabled on machines which have the
-+ SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
-+ to the above =ibpb-vmexit mitigation because the user/kernel boundary is
-+ not affected anymore and thus "safe RET" is not needed.
-+
-+ After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
-+ is detected (functionality present on all such machines) and that
-+ practically overrides IBPB on VMEXIT as it has a lot less performance
-+ impact and takes care of the guest->host attack vector too.
-
- In order to exploit vulnerability, an attacker needs to:
-
-diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
-index b8fbd847c34af..06631474ede29 100644
---- a/arch/x86/include/asm/cpufeatures.h
-+++ b/arch/x86/include/asm/cpufeatures.h
-@@ -468,6 +468,10 @@
- #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
- #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
- #define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
-+#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
-+ * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
-+ * (SRSO_MSR_FIX in the official doc).
-+ */
-
- /*
- * Extended auxiliary flags: Linux defined - for features scattered in various
-diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
-index d4308e78a009a..9f402a7b211b9 100644
---- a/arch/x86/include/asm/msr-index.h
-+++ b/arch/x86/include/asm/msr-index.h
-@@ -729,6 +729,7 @@
-
- /* Zen4 */
- #define MSR_ZEN4_BP_CFG 0xc001102e
-+#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
- #define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
-
- /* Fam 19h MSRs */
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index e0e0ecc401947..f1954147cc5d5 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2675,6 +2675,7 @@ enum srso_mitigation {
- SRSO_MITIGATION_SAFE_RET,
- SRSO_MITIGATION_IBPB,
- SRSO_MITIGATION_IBPB_ON_VMEXIT,
-+ SRSO_MITIGATION_BP_SPEC_REDUCE,
- };
-
- enum srso_mitigation_cmd {
-@@ -2692,7 +2693,8 @@ static const char * const srso_strings[] = {
- [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
- [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
- [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
-- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
-+ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
-+ [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
- };
-
- static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
-@@ -2731,7 +2733,7 @@ static void __init srso_select_mitigation(void)
- srso_cmd == SRSO_CMD_OFF) {
- if (boot_cpu_has(X86_FEATURE_SBPB))
- x86_pred_cmd = PRED_CMD_SBPB;
-- return;
-+ goto out;
- }
-
- if (has_microcode) {
-@@ -2743,7 +2745,7 @@ static void __init srso_select_mitigation(void)
- */
- if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
- setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
-- return;
-+ goto out;
- }
-
- if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-@@ -2823,6 +2825,12 @@ static void __init srso_select_mitigation(void)
-
- ibpb_on_vmexit:
- case SRSO_CMD_IBPB_ON_VMEXIT:
-+ if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
-+ pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
-+ srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
-+ break;
-+ }
-+
- if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
- if (has_microcode) {
- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
-@@ -2844,7 +2852,15 @@ static void __init srso_select_mitigation(void)
- }
-
- out:
-- pr_info("%s\n", srso_strings[srso_mitigation]);
-+ /*
-+ * Clear the feature flag if this mitigation is not selected as that
-+ * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
-+ */
-+ if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
-+ setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
-+
-+ if (srso_mitigation != SRSO_MITIGATION_NONE)
-+ pr_info("%s\n", srso_strings[srso_mitigation]);
- }
-
- #undef pr_fmt
-diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index 282c91c6aa338..b8f12b808d83a 100644
---- a/arch/x86/kvm/svm/svm.c
-+++ b/arch/x86/kvm/svm/svm.c
-@@ -607,6 +607,9 @@ static void svm_disable_virtualization_cpu(void)
- kvm_cpu_svm_disable();
-
- amd_pmu_disable_virt();
-+
-+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
-+ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
- }
-
- static int svm_enable_virtualization_cpu(void)
-@@ -684,6 +687,9 @@ static int svm_enable_virtualization_cpu(void)
- rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
- }
-
-+ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
-+ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
-+
- return 0;
- }
-
-diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
-index 4bf4fad5b148e..5a18ecc04a6c3 100644
---- a/arch/x86/lib/msr.c
-+++ b/arch/x86/lib/msr.c
-@@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
- {
- return __flip_bit(msr, bit, true);
- }
-+EXPORT_SYMBOL_GPL(msr_set_bit);
-
- /**
- * msr_clear_bit - Clear @bit in a MSR @msr.
-@@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
- {
- return __flip_bit(msr, bit, false);
- }
-+EXPORT_SYMBOL_GPL(msr_clear_bit);
-
- #ifdef CONFIG_TRACEPOINTS
- void do_trace_write_msr(unsigned int msr, u64 val, int failed)
---
-2.39.5
-