]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/bugs: Move cpu_bugs_smt_update() down
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Thu, 14 Aug 2025 17:20:43 +0000 (10:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:21:46 +0000 (17:21 +0200)
Commit 6449f5baf9c78a7a442d64f4a61378a21c5db113 upstream.

cpu_bugs_smt_update() uses global variables from different mitigations. For
SMT updates it can't currently use vmscape_mitigation that is defined after
it.

Since cpu_bugs_smt_update() depends on many other mitigations, move it
after all mitigations are defined. With that, it can use vmscape_mitigation
in a moment.

No functional change.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kernel/cpu/bugs.c

index cc035afd25743d2dc260f575ed49f1c6967ea556..9e956614f9517a37f7a517c4a2e4457de6dcbfcd 100644 (file)
@@ -2202,80 +2202,6 @@ out:
        pr_info("%s\n", tsa_strings[tsa_mitigation]);
 }
 
-void cpu_bugs_smt_update(void)
-{
-       mutex_lock(&spec_ctrl_mutex);
-
-       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
-           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
-               pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
-
-       switch (spectre_v2_user_stibp) {
-       case SPECTRE_V2_USER_NONE:
-               break;
-       case SPECTRE_V2_USER_STRICT:
-       case SPECTRE_V2_USER_STRICT_PREFERRED:
-               update_stibp_strict();
-               break;
-       case SPECTRE_V2_USER_PRCTL:
-       case SPECTRE_V2_USER_SECCOMP:
-               update_indir_branch_cond();
-               break;
-       }
-
-       switch (mds_mitigation) {
-       case MDS_MITIGATION_FULL:
-       case MDS_MITIGATION_VMWERV:
-               if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
-                       pr_warn_once(MDS_MSG_SMT);
-               update_mds_branch_idle();
-               break;
-       case MDS_MITIGATION_OFF:
-               break;
-       }
-
-       switch (taa_mitigation) {
-       case TAA_MITIGATION_VERW:
-       case TAA_MITIGATION_UCODE_NEEDED:
-               if (sched_smt_active())
-                       pr_warn_once(TAA_MSG_SMT);
-               break;
-       case TAA_MITIGATION_TSX_DISABLED:
-       case TAA_MITIGATION_OFF:
-               break;
-       }
-
-       switch (mmio_mitigation) {
-       case MMIO_MITIGATION_VERW:
-       case MMIO_MITIGATION_UCODE_NEEDED:
-               if (sched_smt_active())
-                       pr_warn_once(MMIO_MSG_SMT);
-               break;
-       case MMIO_MITIGATION_OFF:
-               break;
-       }
-
-       switch (tsa_mitigation) {
-       case TSA_MITIGATION_USER_KERNEL:
-       case TSA_MITIGATION_VM:
-       case TSA_MITIGATION_FULL:
-       case TSA_MITIGATION_UCODE_NEEDED:
-               /*
-                * TSA-SQ can potentially lead to info leakage between
-                * SMT threads.
-                */
-               if (sched_smt_active())
-                       static_branch_enable(&cpu_buf_idle_clear);
-               else
-                       static_branch_disable(&cpu_buf_idle_clear);
-               break;
-       case TSA_MITIGATION_NONE:
-               break;
-       }
-
-       mutex_unlock(&spec_ctrl_mutex);
-}
-
 #undef pr_fmt
 #define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
 
@@ -3025,6 +2951,80 @@ static void __init vmscape_select_mitigation(void)
 #undef pr_fmt
 #define pr_fmt(fmt) fmt
 
+void cpu_bugs_smt_update(void)
+{
+       mutex_lock(&spec_ctrl_mutex);
+
+       if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+           spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+               pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+
+       switch (spectre_v2_user_stibp) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
+       case SPECTRE_V2_USER_STRICT_PREFERRED:
+               update_stibp_strict();
+               break;
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               update_indir_branch_cond();
+               break;
+       }
+
+       switch (mds_mitigation) {
+       case MDS_MITIGATION_FULL:
+       case MDS_MITIGATION_VMWERV:
+               if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+                       pr_warn_once(MDS_MSG_SMT);
+               update_mds_branch_idle();
+               break;
+       case MDS_MITIGATION_OFF:
+               break;
+       }
+
+       switch (taa_mitigation) {
+       case TAA_MITIGATION_VERW:
+       case TAA_MITIGATION_UCODE_NEEDED:
+               if (sched_smt_active())
+                       pr_warn_once(TAA_MSG_SMT);
+               break;
+       case TAA_MITIGATION_TSX_DISABLED:
+       case TAA_MITIGATION_OFF:
+               break;
+       }
+
+       switch (mmio_mitigation) {
+       case MMIO_MITIGATION_VERW:
+       case MMIO_MITIGATION_UCODE_NEEDED:
+               if (sched_smt_active())
+                       pr_warn_once(MMIO_MSG_SMT);
+               break;
+       case MMIO_MITIGATION_OFF:
+               break;
+       }
+
+       switch (tsa_mitigation) {
+       case TSA_MITIGATION_USER_KERNEL:
+       case TSA_MITIGATION_VM:
+       case TSA_MITIGATION_FULL:
+       case TSA_MITIGATION_UCODE_NEEDED:
+               /*
+                * TSA-SQ can potentially lead to info leakage between
+                * SMT threads.
+                */
+               if (sched_smt_active())
+                       static_branch_enable(&cpu_buf_idle_clear);
+               else
+                       static_branch_disable(&cpu_buf_idle_clear);
+               break;
+       case TSA_MITIGATION_NONE:
+               break;
+       }
+
+       mutex_unlock(&spec_ctrl_mutex);
+}
+
 #ifdef CONFIG_SYSFS
 
 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"