]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/mce: Define BSP-only init
authorYazen Ghannam <yazen.ghannam@amd.com>
Mon, 8 Sep 2025 15:40:31 +0000 (15:40 +0000)
committerBorislav Petkov (AMD) <bp@alien8.de>
Thu, 11 Sep 2025 10:22:37 +0000 (12:22 +0200)
Currently, MCA initialization is executed identically on each CPU as
they are brought online. However, a number of MCA initialization tasks
only need to be done once.

Define a function to collect all 'global' init tasks and call this from
the BSP only. Start with CPU features.

Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Link: https://lore.kernel.org/20250908-wip-mca-updates-v6-0-eef5d6c74b9c@amd.com
arch/x86/include/asm/mce.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mce/amd.c
arch/x86/kernel/cpu/mce/core.c

index 3224f3862dc86f537fff7ada05465472605993bb..31e3cb550fb3f89f84977e2a1c9c840187fa884c 100644 (file)
@@ -241,12 +241,14 @@ struct cper_ia_proc_ctx;
 
 #ifdef CONFIG_X86_MCE
 int mcheck_init(void);
+void mca_bsp_init(struct cpuinfo_x86 *c);
 void mcheck_cpu_init(struct cpuinfo_x86 *c);
 void mcheck_cpu_clear(struct cpuinfo_x86 *c);
 int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
                               u64 lapic_id);
 #else
 static inline int mcheck_init(void) { return 0; }
+static inline void mca_bsp_init(struct cpuinfo_x86 *c) {}
 static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
 static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
 static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
index 34a054181c4dc4bcb998e2395c9c7aefeb778e54..8bbfde05f04fb19778c9511f0986aff28b70c73c 100644 (file)
@@ -1784,6 +1784,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_LA57);
 
        detect_nopl();
+       mca_bsp_init(c);
 }
 
 void __init init_cpu_devs(void)
index aa13c9304ad892639595851f55fe1c5f71b434ec..3c6c19eb0a1807334bcabcc458d485f7751c3a5a 100644 (file)
@@ -653,9 +653,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
        u32 low = 0, high = 0, address = 0;
        int offset = -1;
 
-       mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
-       mce_flags.succor         = cpu_feature_enabled(X86_FEATURE_SUCCOR);
-       mce_flags.smca           = cpu_feature_enabled(X86_FEATURE_SMCA);
        mce_flags.amd_threshold  = 1;
 
        for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) {
index 9e31834b3542c33bb41454d6e2b33bf2b184935b..79f3dd7f7851a8dda58e3c9d2f70815fee0a3194 100644 (file)
@@ -1837,13 +1837,6 @@ static void __mcheck_cpu_cap_init(void)
        this_cpu_write(mce_num_banks, b);
 
        __mcheck_cpu_mce_banks_init();
-
-       /* Use accurate RIP reporting if available. */
-       if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
-               mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
-
-       if (cap & MCG_SER_P)
-               mca_cfg.ser = 1;
 }
 
 static void __mcheck_cpu_init_generic(void)
@@ -2240,6 +2233,27 @@ DEFINE_IDTENTRY_RAW(exc_machine_check)
 }
 #endif
 
+void mca_bsp_init(struct cpuinfo_x86 *c)
+{
+       u64 cap;
+
+       if (!mce_available(c))
+               return;
+
+       mce_flags.overflow_recov = cpu_feature_enabled(X86_FEATURE_OVERFLOW_RECOV);
+       mce_flags.succor         = cpu_feature_enabled(X86_FEATURE_SUCCOR);
+       mce_flags.smca           = cpu_feature_enabled(X86_FEATURE_SMCA);
+
+       rdmsrq(MSR_IA32_MCG_CAP, cap);
+
+       /* Use accurate RIP reporting if available. */
+       if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
+               mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
+
+       if (cap & MCG_SER_P)
+               mca_cfg.ser = 1;
+}
+
 /*
  * Called for each booted CPU to set up machine checks.
  * Must be called with preempt off: