]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/srso: Add SRSO_NO support
authorBorislav Petkov (AMD) <bp@alien8.de>
Thu, 29 Jun 2023 15:43:40 +0000 (17:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Aug 2023 18:03:50 +0000 (20:03 +0200)
Upstream commit: 1b5277c0ea0b247393a9c426769fde18cff5e2f6

Add support for the CPUID flag which denotes that the CPU is not
affected by SRSO.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kvm/cpuid.c

index 1c330be128244862bae4aa078ff0d54dae9e7199..a8c2733c816cdf01e7da3f08986589f45e488909 100644 (file)
 #define X86_FEATURE_V_TSC_AUX          (19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT       (19*32+10) /* "" AMD hardware-enforced cache coherency */
 
+#define X86_FEATURE_SBPB               (20*32+27) /* "" Selective Branch Prediction Barrier */
 #define X86_FEATURE_IBPB_BRTYPE                (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+#define X86_FEATURE_SRSO_NO            (20*32+29) /* "" CPU is not affected by SRSO */
 
 /*
  * BUG word(s)
index a070e55ba441ab78ac52f468afb26a468c7472e9..52d8c67d930810bc87ca8a92eeec6a67a3101a9b 100644 (file)
@@ -60,6 +60,7 @@
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  BIT(0)     /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_SBPB                  BIT(7)     /* Selective Branch Prediction Barrier */
 
 #define MSR_PPIN_CTL                   0x0000004e
 #define MSR_PPIN                       0x0000004f
index 690b9f983e41435f6917421c123729fa24517cef..573a137f5ac3f4dd0acbb8208e489242ad6b0d8a 100644 (file)
@@ -318,11 +318,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
                : "memory");
 }
 
+extern u64 x86_pred_cmd;
+
 static inline void indirect_branch_prediction_barrier(void)
 {
-       u64 val = PRED_CMD_IBPB;
-
-       alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+       alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
 }
 
 /* The Intel SPEC CTRL MSR base value cache */
index 04249047df868228991c823de03a2f9c8cee8063..9842f2402978d7692dc2642fc5350edf7d58a432 100644 (file)
@@ -1249,14 +1249,14 @@ bool cpu_has_ibpb_brtype_microcode(void)
 {
        u8 fam = boot_cpu_data.x86;
 
-       if (fam == 0x17) {
-               /* Zen1/2 IBPB flushes branch type predictions too. */
+       /* Zen1/2 IBPB flushes branch type predictions too. */
+       if (fam == 0x17)
                return boot_cpu_has(X86_FEATURE_AMD_IBPB);
-       } else if (fam == 0x19) {
+       /* Poke the MSR bit on Zen3/4 to check its presence. */
+       else if (fam == 0x19)
+               return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB);
+       else
                return false;
-       }
-
-       return false;
 }
 
 static void zenbleed_check_cpu(void *unused)
index 137565e7f7de46a1927297ba8b43b1bca88926d0..1be4f7186ba88acad28789c66416a218d91bed5b 100644 (file)
@@ -57,6 +57,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
 
+u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
@@ -2354,7 +2357,7 @@ static void __init srso_select_mitigation(void)
        bool has_microcode;
 
        if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
-               return;
+               goto pred_cmd;
 
        /*
         * The first check is for the kernel running as a guest in order
@@ -2367,9 +2370,18 @@ static void __init srso_select_mitigation(void)
        } else {
                /*
                 * Enable the synthetic (even if in a real CPUID leaf)
-                * flag for guests.
+                * flags for guests.
                 */
                setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+               setup_force_cpu_cap(X86_FEATURE_SBPB);
+
+               /*
+                * Zen1/2 with SMT off aren't vulnerable after the right
+                * IBPB microcode has been applied.
+                */
+               if ((boot_cpu_data.x86 < 0x19) &&
+                   (cpu_smt_control == CPU_SMT_DISABLED))
+                       setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
        }
 
        switch (srso_cmd) {
@@ -2392,16 +2404,20 @@ static void __init srso_select_mitigation(void)
                        srso_mitigation = SRSO_MITIGATION_SAFE_RET;
                } else {
                        pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
-                       return;
+                       goto pred_cmd;
                }
                break;
 
        default:
                break;
-
        }
 
        pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
+
+pred_cmd:
+       if (boot_cpu_has(X86_FEATURE_SRSO_NO) ||
+           srso_cmd == SRSO_CMD_OFF)
+               x86_pred_cmd = PRED_CMD_SBPB;
 }
 
 #undef pr_fmt
index 44dcb2c83ba4ad5b7c019c4cd225eb2e158f7bb8..f63c8427cf633f1b5cb25903323268c5d5d6c74c 100644 (file)
@@ -1414,8 +1414,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
            boot_cpu_has(X86_FEATURE_AVX))
                setup_force_cpu_bug(X86_BUG_GDS);
 
-       if (cpu_matches(cpu_vuln_blacklist, SRSO))
-               setup_force_cpu_bug(X86_BUG_SRSO);
+       if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
+               if (cpu_matches(cpu_vuln_blacklist, SRSO))
+                       setup_force_cpu_bug(X86_BUG_SRSO);
+       }
 
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
index 6047dbe048803bddddac1ed5b93117bc2ef514c6..7b4224f5ee2de81726949f049565d941fe1108b0 100644 (file)
@@ -736,6 +736,9 @@ void kvm_set_cpu_caps(void)
                F(PMM) | F(PMM_EN)
        );
 
+       if (cpu_feature_enabled(X86_FEATURE_SRSO_NO))
+               kvm_cpu_cap_set(X86_FEATURE_SRSO_NO);
+
        /*
         * Hide RDTSCP and RDPID if either feature is reported as supported but
         * probing MSR_TSC_AUX failed.  This is purely a sanity check and