]> git.ipfire.org Git - people/arne_f/kernel.git/commitdiff
x86/microcode/AMD: Check patch level only on the BSP
authorBorislav Petkov <bp@suse.de>
Fri, 20 Jan 2017 20:29:51 +0000 (21:29 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 23 Jan 2017 09:02:50 +0000 (10:02 +0100)
Check final patch levels for AMD only on the BSP. This way, we decide
early and only once whether to continue loading or to leave the loader
disabled on such systems.

Simplify a lot.

Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/20170120202955.4091-13-bp@alien8.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/microcode_amd.h
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c

index 3e3e20be829a4ea5f3f795aa91016b38b932bb44..3d57009e168bb7241d3c1fc7e24cac6daedea031 100644 (file)
@@ -54,6 +54,4 @@ static inline int __init
 save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
 void reload_ucode_amd(void) {}
 #endif
-
-extern bool check_current_patch_level(u32 *rev, bool early);
 #endif /* _ASM_X86_MICROCODE_AMD_H */
index 7727f278de58ba16950d14709d2b73547f02514b..61743476c25b343a61ec242db8603230a9cddd05 100644 (file)
@@ -207,7 +207,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
        struct cont_desc desc = { 0 };
        u8 (*patch)[PATCH_MAX_SIZE];
        struct microcode_amd *mc;
-       u32 rev, *new_rev;
+       u32 rev, dummy, *new_rev;
        bool ret = false;
 
 #ifdef CONFIG_X86_32
@@ -218,9 +218,6 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
        patch   = &amd_ucode_patch;
 #endif
 
-       if (check_current_patch_level(&rev, true))
-               return false;
-
        desc.cpuid_1_eax = cpuid_1_eax;
 
        scan_containers(ucode, size, &desc);
@@ -231,6 +228,7 @@ apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size,
        if (!mc)
                return ret;
 
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
        if (rev >= mc->hdr.patch_id)
                return ret;
 
@@ -328,13 +326,8 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
 {
        struct equiv_cpu_entry *eq;
        struct microcode_amd *mc;
-       u32 rev;
        u16 eq_id;
 
-       /* 64-bit runs with paging enabled, thus early==false. */
-       if (check_current_patch_level(&rev, false))
-               return;
-
        /* First AP hasn't cached it yet, go through the blob. */
        if (!cont.data) {
                struct cpio_data cp;
@@ -371,6 +364,10 @@ reget:
                return;
 
        if (eq_id == cont.eq_id) {
+               u32 rev, dummy;
+
+               native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
                mc = (struct microcode_amd *)amd_ucode_patch;
 
                if (mc && rev < mc->hdr.patch_id) {
@@ -436,19 +433,14 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
 void reload_ucode_amd(void)
 {
        struct microcode_amd *mc;
-       u32 rev;
-
-       /*
-        * early==false because this is a syscore ->resume path and by
-        * that time paging is long enabled.
-        */
-       if (check_current_patch_level(&rev, false))
-               return;
+       u32 rev, dummy;
 
        mc = (struct microcode_amd *)amd_ucode_patch;
        if (!mc)
                return;
 
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+
        if (rev < mc->hdr.patch_id) {
                if (!__apply_microcode_amd(mc)) {
                        ucode_new_rev = mc->hdr.patch_id;
@@ -586,60 +578,13 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
        return patch_size;
 }
 
-/*
- * Those patch levels cannot be updated to newer ones and thus should be final.
- */
-static u32 final_levels[] = {
-       0x01000098,
-       0x0100009f,
-       0x010000af,
-       0, /* T-101 terminator */
-};
-
-/*
- * Check the current patch level on this CPU.
- *
- * @rev: Use it to return the patch level. It is set to 0 in the case of
- * error.
- *
- * Returns:
- *  - true: if update should stop
- *  - false: otherwise
- */
-bool check_current_patch_level(u32 *rev, bool early)
-{
-       u32 lvl, dummy, i;
-       bool ret = false;
-       u32 *levels;
-
-       native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
-
-       if (IS_ENABLED(CONFIG_X86_32) && early)
-               levels = (u32 *)__pa_nodebug(&final_levels);
-       else
-               levels = final_levels;
-
-       for (i = 0; levels[i]; i++) {
-               if (lvl == levels[i]) {
-                       lvl = 0;
-                       ret = true;
-                       break;
-               }
-       }
-
-       if (rev)
-               *rev = lvl;
-
-       return ret;
-}
-
 static int apply_microcode_amd(int cpu)
 {
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
-       u32 rev;
+       u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
 
@@ -652,8 +597,7 @@ static int apply_microcode_amd(int cpu)
        mc_amd  = p->data;
        uci->mc = p->data;
 
-       if (check_current_patch_level(&rev, false))
-               return -1;
+       rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
index dc54518299c42351318ca72ef5623e3acabbdc38..3b74d2f315d3bde513878c64ca1379cfb1e78887 100644 (file)
@@ -69,6 +69,42 @@ struct cpu_info_ctx {
        int                     err;
 };
 
+/*
+ * Those patch levels cannot be updated to newer ones and thus should be final.
+ */
+static u32 final_levels[] = {
+       0x01000098,
+       0x0100009f,
+       0x010000af,
+       0, /* T-101 terminator */
+};
+
+/*
+ * Check the current patch level on this CPU.
+ *
+ * Returns:
+ *  - true: if update should stop
+ *  - false: otherwise
+ */
+static bool amd_check_current_patch_level(void)
+{
+       u32 lvl, dummy, i;
+       u32 *levels;
+
+       native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
+
+       if (IS_ENABLED(CONFIG_X86_32))
+               levels = (u32 *)__pa_nodebug(&final_levels);
+       else
+               levels = final_levels;
+
+       for (i = 0; levels[i]; i++) {
+               if (lvl == levels[i])
+                       return true;
+       }
+       return false;
+}
+
 static bool __init check_loader_disabled_bsp(void)
 {
        static const char *__dis_opt_str = "dis_ucode_ldr";
@@ -95,6 +131,11 @@ static bool __init check_loader_disabled_bsp(void)
        if (native_cpuid_ecx(1) & BIT(31))
                return *res;
 
+       if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
+               if (amd_check_current_patch_level())
+                       return *res;
+       }
+
        if (cmdline_find_option_bool(cmdline, option) <= 0)
                *res = false;