]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Apr 2018 14:06:07 +0000 (16:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 10 Apr 2018 14:06:07 +0000 (16:06 +0200)
added patches:
x86-cpu-add-a-microcode-loader-callback.patch
x86-cpu-check-cpu-feature-bits-after-microcode-upgrade.patch
x86-microcode-attempt-late-loading-only-when-new-microcode-is-present.patch
x86-microcode-do-not-upload-microcode-if-cpus-are-offline.patch
x86-microcode-fix-cpu-synchronization-routine.patch
x86-microcode-get-rid-of-struct-apply_microcode_ctx.patch
x86-microcode-intel-check-microcode-revision-before-updating-sibling-threads.patch
x86-microcode-intel-look-into-the-patch-cache-first.patch
x86-microcode-intel-writeback-and-invalidate-caches-before-updating-microcode.patch
x86-microcode-propagate-return-value-from-updating-functions.patch
x86-microcode-request-microcode-on-the-bsp.patch
x86-microcode-synchronize-late-microcode-loading.patch

13 files changed:
queue-4.15/series
queue-4.15/x86-cpu-add-a-microcode-loader-callback.patch [new file with mode: 0644]
queue-4.15/x86-cpu-check-cpu-feature-bits-after-microcode-upgrade.patch [new file with mode: 0644]
queue-4.15/x86-microcode-attempt-late-loading-only-when-new-microcode-is-present.patch [new file with mode: 0644]
queue-4.15/x86-microcode-do-not-upload-microcode-if-cpus-are-offline.patch [new file with mode: 0644]
queue-4.15/x86-microcode-fix-cpu-synchronization-routine.patch [new file with mode: 0644]
queue-4.15/x86-microcode-get-rid-of-struct-apply_microcode_ctx.patch [new file with mode: 0644]
queue-4.15/x86-microcode-intel-check-microcode-revision-before-updating-sibling-threads.patch [new file with mode: 0644]
queue-4.15/x86-microcode-intel-look-into-the-patch-cache-first.patch [new file with mode: 0644]
queue-4.15/x86-microcode-intel-writeback-and-invalidate-caches-before-updating-microcode.patch [new file with mode: 0644]
queue-4.15/x86-microcode-propagate-return-value-from-updating-functions.patch [new file with mode: 0644]
queue-4.15/x86-microcode-request-microcode-on-the-bsp.patch [new file with mode: 0644]
queue-4.15/x86-microcode-synchronize-late-microcode-loading.patch [new file with mode: 0644]

index af2d344cf35f83c5fea532b0be19c361be07a328..e74d5885cf0ce01ad6a17b78838d3f18388c0e51 100644 (file)
@@ -110,3 +110,15 @@ tcmu-release-blocks-for-partially-setup-cmds.patch
 thermal-int3400_thermal-fix-error-handling-in-int3400_thermal_probe.patch
 drm-i915-cnp-ignore-vbt-request-for-know-invalid-ddc-pin.patch
 drm-i915-cnp-properly-handle-vbt-ddc-pin-out-of-bounds.patch
+x86-microcode-propagate-return-value-from-updating-functions.patch
+x86-cpu-add-a-microcode-loader-callback.patch
+x86-cpu-check-cpu-feature-bits-after-microcode-upgrade.patch
+x86-microcode-get-rid-of-struct-apply_microcode_ctx.patch
+x86-microcode-intel-check-microcode-revision-before-updating-sibling-threads.patch
+x86-microcode-intel-writeback-and-invalidate-caches-before-updating-microcode.patch
+x86-microcode-do-not-upload-microcode-if-cpus-are-offline.patch
+x86-microcode-intel-look-into-the-patch-cache-first.patch
+x86-microcode-request-microcode-on-the-bsp.patch
+x86-microcode-synchronize-late-microcode-loading.patch
+x86-microcode-attempt-late-loading-only-when-new-microcode-is-present.patch
+x86-microcode-fix-cpu-synchronization-routine.patch
diff --git a/queue-4.15/x86-cpu-add-a-microcode-loader-callback.patch b/queue-4.15/x86-cpu-add-a-microcode-loader-callback.patch
new file mode 100644 (file)
index 0000000..9c8f291
--- /dev/null
@@ -0,0 +1,87 @@
+From 1008c52c09dcb23d93f8e0ea83a6246265d2cce0 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:39 +0100
+Subject: x86/CPU: Add a microcode loader callback
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 1008c52c09dcb23d93f8e0ea83a6246265d2cce0 upstream.
+
+Add a callback function which the microcode loader calls when microcode
+has been updated to a newer revision. Do the callback only when no error
+was encountered during loading.
+
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-3-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/processor.h     |    1 +
+ arch/x86/kernel/cpu/common.c         |   10 ++++++++++
+ arch/x86/kernel/cpu/microcode/core.c |    8 ++++++--
+ 3 files changed, 17 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -969,4 +969,5 @@ bool xen_set_default_idle(void);
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++void microcode_check(void);
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1749,3 +1749,13 @@ static int __init init_cpu_syscore(void)
+       return 0;
+ }
+ core_initcall(init_cpu_syscore);
++
++/*
++ * The microcode loader calls this upon late microcode load to recheck features,
++ * only when microcode has been updated. Caller holds microcode_mutex and CPU
++ * hotplug lock.
++ */
++void microcode_check(void)
++{
++      perf_check_microcode();
++}
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -509,6 +509,7 @@ static ssize_t reload_store(struct devic
+                           const char *buf, size_t size)
+ {
+       enum ucode_state tmp_ret = UCODE_OK;
++      bool do_callback = false;
+       unsigned long val;
+       ssize_t ret = 0;
+       int cpu;
+@@ -531,10 +532,13 @@ static ssize_t reload_store(struct devic
+                       if (!ret)
+                               ret = -EINVAL;
+               }
++
++              if (tmp_ret == UCODE_UPDATED)
++                      do_callback = true;
+       }
+-      if (!ret && tmp_ret == UCODE_UPDATED)
+-              perf_check_microcode();
++      if (!ret && do_callback)
++              microcode_check();
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
diff --git a/queue-4.15/x86-cpu-check-cpu-feature-bits-after-microcode-upgrade.patch b/queue-4.15/x86-cpu-check-cpu-feature-bits-after-microcode-upgrade.patch
new file mode 100644 (file)
index 0000000..7544523
--- /dev/null
@@ -0,0 +1,65 @@
+From 42ca8082e260dcfd8afa2afa6ec1940b9d41724c Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:40 +0100
+Subject: x86/CPU: Check CPU feature bits after microcode upgrade
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 42ca8082e260dcfd8afa2afa6ec1940b9d41724c upstream.
+
+With some microcode upgrades, new CPUID features can become visible on
+the CPU. Check what the kernel has mirrored now and issue a warning
+hinting at possible things the user/admin can do to make use of the
+newly visible features.
+
+Originally-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-4-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/common.c |   20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1757,5 +1757,25 @@ core_initcall(init_cpu_syscore);
+  */
+ void microcode_check(void)
+ {
++      struct cpuinfo_x86 info;
++
+       perf_check_microcode();
++
++      /* Reload CPUID max function as it might've changed. */
++      info.cpuid_level = cpuid_eax(0);
++
++      /*
++       * Copy all capability leafs to pick up the synthetic ones so that
++       * memcmp() below doesn't fail on that. The ones coming from CPUID will
++       * get overwritten in get_cpu_cap().
++       */
++      memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
++
++      get_cpu_cap(&info);
++
++      if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
++              return;
++
++      pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
++      pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+ }
diff --git a/queue-4.15/x86-microcode-attempt-late-loading-only-when-new-microcode-is-present.patch b/queue-4.15/x86-microcode-attempt-late-loading-only-when-new-microcode-is-present.patch
new file mode 100644 (file)
index 0000000..2619d6a
--- /dev/null
@@ -0,0 +1,148 @@
+From 2613f36ed965d0e5a595a1d931fd3b480e82d6fd Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 14 Mar 2018 19:36:14 +0100
+Subject: x86/microcode: Attempt late loading only when new microcode is present
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 2613f36ed965d0e5a595a1d931fd3b480e82d6fd upstream.
+
+Return UCODE_NEW from the scanning functions to denote that new microcode
+was found and only then attempt the expensive synchronization dance.
+
+Reported-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lkml.kernel.org/r/20180314183615.17629-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/microcode.h      |    1 +
+ arch/x86/kernel/cpu/microcode/amd.c   |   34 +++++++++++++++++++++-------------
+ arch/x86/kernel/cpu/microcode/core.c  |    8 +++-----
+ arch/x86/kernel/cpu/microcode/intel.c |    4 +++-
+ 4 files changed, 28 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -39,6 +39,7 @@ struct device;
+ enum ucode_state {
+       UCODE_OK        = 0,
++      UCODE_NEW,
+       UCODE_UPDATED,
+       UCODE_NFOUND,
+       UCODE_ERROR,
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(
+               return -EINVAL;
+       ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
+-      if (ret != UCODE_OK)
++      if (ret > UCODE_UPDATED)
+               return -EINVAL;
+       return 0;
+@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode
+ static enum ucode_state
+ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
+ {
++      struct ucode_patch *p;
+       enum ucode_state ret;
+       /* free old equiv table */
+       free_equiv_cpu_table();
+       ret = __load_microcode_amd(family, data, size);
+-
+-      if (ret != UCODE_OK)
++      if (ret != UCODE_OK) {
+               cleanup();
++              return ret;
++      }
+-#ifdef CONFIG_X86_32
+-      /* save BSP's matching patch for early load */
+-      if (save) {
+-              struct ucode_patch *p = find_patch(0);
+-              if (p) {
+-                      memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+-                      memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
+-                                                             PATCH_MAX_SIZE));
+-              }
++      p = find_patch(0);
++      if (!p) {
++              return ret;
++      } else {
++              if (boot_cpu_data.microcode == p->patch_id)
++                      return ret;
++
++              ret = UCODE_NEW;
+       }
+-#endif
++
++      /* save BSP's matching patch for early load */
++      if (!save)
++              return ret;
++
++      memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
++      memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
++
+       return ret;
+ }
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -607,7 +607,7 @@ static ssize_t reload_store(struct devic
+               return size;
+       tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+-      if (tmp_ret != UCODE_OK)
++      if (tmp_ret != UCODE_NEW)
+               return size;
+       get_online_cpus();
+@@ -691,10 +691,8 @@ static enum ucode_state microcode_init_c
+       if (system_state != SYSTEM_RUNNING)
+               return UCODE_NFOUND;
+-      ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
+-                                                   refresh_fw);
+-
+-      if (ustate == UCODE_OK) {
++      ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
++      if (ustate == UCODE_NEW) {
+               pr_debug("CPU%d updated upon init\n", cpu);
+               apply_microcode_on_target(cpu);
+       }
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -862,6 +862,7 @@ static enum ucode_state generic_load_mic
+       unsigned int leftover = size;
+       unsigned int curr_mc_size = 0, new_mc_size = 0;
+       unsigned int csig, cpf;
++      enum ucode_state ret = UCODE_OK;
+       while (leftover) {
+               struct microcode_header_intel mc_header;
+@@ -903,6 +904,7 @@ static enum ucode_state generic_load_mic
+                       new_mc  = mc;
+                       new_mc_size = mc_size;
+                       mc = NULL;      /* trigger new vmalloc */
++                      ret = UCODE_NEW;
+               }
+               ucode_ptr += mc_size;
+@@ -932,7 +934,7 @@ static enum ucode_state generic_load_mic
+       pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
+                cpu, new_rev, uci->cpu_sig.rev);
+-      return UCODE_OK;
++      return ret;
+ }
+ static int get_ucode_fw(void *to, const void *from, size_t n)
diff --git a/queue-4.15/x86-microcode-do-not-upload-microcode-if-cpus-are-offline.patch b/queue-4.15/x86-microcode-do-not-upload-microcode-if-cpus-are-offline.patch
new file mode 100644 (file)
index 0000000..a9760af
--- /dev/null
@@ -0,0 +1,72 @@
+From 30ec26da9967d0d785abc24073129a34c3211777 Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:43 +0100
+Subject: x86/microcode: Do not upload microcode if CPUs are offline
+
+From: Ashok Raj <ashok.raj@intel.com>
+
+commit 30ec26da9967d0d785abc24073129a34c3211777 upstream.
+
+Avoid loading microcode if any of the CPUs are offline, and issue a
+warning. Having different microcode revisions on the system at any time
+is outright dangerous.
+
+[ Borislav: Massage changelog. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-4-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-5-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c |   18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -486,6 +486,16 @@ static void __exit microcode_dev_exit(vo
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
++static int check_online_cpus(void)
++{
++      if (num_online_cpus() == num_present_cpus())
++              return 0;
++
++      pr_err("Not all CPUs online, aborting microcode update.\n");
++
++      return -EINVAL;
++}
++
+ static enum ucode_state reload_for_cpu(int cpu)
+ {
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+@@ -519,7 +529,13 @@ static ssize_t reload_store(struct devic
+               return size;
+       get_online_cpus();
++
++      ret = check_online_cpus();
++      if (ret)
++              goto put;
++
+       mutex_lock(&microcode_mutex);
++
+       for_each_online_cpu(cpu) {
+               tmp_ret = reload_for_cpu(cpu);
+               if (tmp_ret > UCODE_NFOUND) {
+@@ -538,6 +554,8 @@ static ssize_t reload_store(struct devic
+               microcode_check();
+       mutex_unlock(&microcode_mutex);
++
++put:
+       put_online_cpus();
+       if (!ret)
diff --git a/queue-4.15/x86-microcode-fix-cpu-synchronization-routine.patch b/queue-4.15/x86-microcode-fix-cpu-synchronization-routine.patch
new file mode 100644 (file)
index 0000000..222672a
--- /dev/null
@@ -0,0 +1,164 @@
+From bb8c13d61a629276a162c1d2b1a20a815cbcfbb7 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 14 Mar 2018 19:36:15 +0100
+Subject: x86/microcode: Fix CPU synchronization routine
+
+From: Borislav Petkov <bp@suse.de>
+
+commit bb8c13d61a629276a162c1d2b1a20a815cbcfbb7 upstream.
+
+Emanuel reported an issue with a hang during microcode update because my
+dumb idea to use one atomic synchronization variable for both rendezvous
+- before and after update - was simply bollocks:
+
+  microcode: microcode_reload_late: late_cpus: 4
+  microcode: __reload_late: cpu 2 entered
+  microcode: __reload_late: cpu 1 entered
+  microcode: __reload_late: cpu 3 entered
+  microcode: __reload_late: cpu 0 entered
+  microcode: __reload_late: cpu 1 left
+  microcode: Timeout while waiting for CPUs rendezvous, remaining: 1
+
+CPU1 above would finish, leave and the others will still spin waiting for
+it to join.
+
+So do two synchronization atomics instead, which makes the code a lot more
+straightforward.
+
+Also, since the update is serialized and it also takes quite some time per
+microcode engine, increase the exit timeout by the number of CPUs on the
+system.
+
+That's ok because the moment all CPUs are done, that timeout will be cut
+short.
+
+Furthermore, panic when some of the CPUs timeout when returning from a
+microcode update: we can't allow a system with not all cores updated.
+
+Also, as an optimization, do not do the exit sync if microcode wasn't
+updated.
+
+Reported-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Emanuel Czirai <xftroxgpx@protonmail.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Link: https://lkml.kernel.org/r/20180314183615.17629-2-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c |   68 +++++++++++++++++++++--------------
+ 1 file changed, 41 insertions(+), 27 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -517,7 +517,29 @@ static int check_online_cpus(void)
+       return -EINVAL;
+ }
+-static atomic_t late_cpus;
++static atomic_t late_cpus_in;
++static atomic_t late_cpus_out;
++
++static int __wait_for_cpus(atomic_t *t, long long timeout)
++{
++      int all_cpus = num_online_cpus();
++
++      atomic_inc(t);
++
++      while (atomic_read(t) < all_cpus) {
++              if (timeout < SPINUNIT) {
++                      pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
++                              all_cpus - atomic_read(t));
++                      return 1;
++              }
++
++              ndelay(SPINUNIT);
++              timeout -= SPINUNIT;
++
++              touch_nmi_watchdog();
++      }
++      return 0;
++}
+ /*
+  * Returns:
+@@ -527,30 +549,16 @@ static atomic_t late_cpus;
+  */
+ static int __reload_late(void *info)
+ {
+-      unsigned int timeout = NSEC_PER_SEC;
+-      int all_cpus = num_online_cpus();
+       int cpu = smp_processor_id();
+       enum ucode_state err;
+       int ret = 0;
+-      atomic_dec(&late_cpus);
+-
+       /*
+        * Wait for all CPUs to arrive. A load will not be attempted unless all
+        * CPUs show up.
+        * */
+-      while (atomic_read(&late_cpus)) {
+-              if (timeout < SPINUNIT) {
+-                      pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
+-                              atomic_read(&late_cpus));
+-                      return -1;
+-              }
+-
+-              ndelay(SPINUNIT);
+-              timeout -= SPINUNIT;
+-
+-              touch_nmi_watchdog();
+-      }
++      if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
++              return -1;
+       spin_lock(&update_lock);
+       apply_microcode_local(&err);
+@@ -558,15 +566,22 @@ static int __reload_late(void *info)
+       if (err > UCODE_NFOUND) {
+               pr_warn("Error reloading microcode on CPU %d\n", cpu);
+-              ret = -1;
+-      } else if (err == UCODE_UPDATED) {
++              return -1;
++      /* siblings return UCODE_OK because their engine got updated already */
++      } else if (err == UCODE_UPDATED || err == UCODE_OK) {
+               ret = 1;
++      } else {
++              return ret;
+       }
+-      atomic_inc(&late_cpus);
+-
+-      while (atomic_read(&late_cpus) != all_cpus)
+-              cpu_relax();
++      /*
++       * Increase the wait timeout to a safe value here since we're
++       * serializing the microcode update and that could take a while on a
++       * large number of CPUs. And that is fine as the *actual* timeout will
++       * be determined by the last CPU finished updating and thus cut short.
++       */
++      if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
++              panic("Timeout during microcode update!\n");
+       return ret;
+ }
+@@ -579,12 +594,11 @@ static int microcode_reload_late(void)
+ {
+       int ret;
+-      atomic_set(&late_cpus, num_online_cpus());
++      atomic_set(&late_cpus_in,  0);
++      atomic_set(&late_cpus_out, 0);
+       ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
+-      if (ret < 0)
+-              return ret;
+-      else if (ret > 0)
++      if (ret > 0)
+               microcode_check();
+       return ret;
diff --git a/queue-4.15/x86-microcode-get-rid-of-struct-apply_microcode_ctx.patch b/queue-4.15/x86-microcode-get-rid-of-struct-apply_microcode_ctx.patch
new file mode 100644 (file)
index 0000000..988e415
--- /dev/null
@@ -0,0 +1,63 @@
+From 854857f5944c59a881ff607b37ed9ed41d031a3b Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:40 +0100
+Subject: x86/microcode: Get rid of struct apply_microcode_ctx
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 854857f5944c59a881ff607b37ed9ed41d031a3b upstream.
+
+It is a useless remnant from earlier times. Use the ucode_state enum
+directly.
+
+No functional change.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-2-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c |   19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -373,26 +373,23 @@ static int collect_cpu_info(int cpu)
+       return ret;
+ }
+-struct apply_microcode_ctx {
+-      enum ucode_state err;
+-};
+-
+ static void apply_microcode_local(void *arg)
+ {
+-      struct apply_microcode_ctx *ctx = arg;
++      enum ucode_state *err = arg;
+-      ctx->err = microcode_ops->apply_microcode(smp_processor_id());
++      *err = microcode_ops->apply_microcode(smp_processor_id());
+ }
+ static int apply_microcode_on_target(int cpu)
+ {
+-      struct apply_microcode_ctx ctx = { .err = 0 };
++      enum ucode_state err;
+       int ret;
+-      ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
+-      if (!ret)
+-              ret = ctx.err;
+-
++      ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
++      if (!ret) {
++              if (err == UCODE_ERROR)
++                      ret = 1;
++      }
+       return ret;
+ }
diff --git a/queue-4.15/x86-microcode-intel-check-microcode-revision-before-updating-sibling-threads.patch b/queue-4.15/x86-microcode-intel-check-microcode-revision-before-updating-sibling-threads.patch
new file mode 100644 (file)
index 0000000..42c8d43
--- /dev/null
@@ -0,0 +1,90 @@
+From c182d2b7d0ca48e0d6ff16f7d883161238c447ed Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:41 +0100
+Subject: x86/microcode/intel: Check microcode revision before updating sibling threads
+
+From: Ashok Raj <ashok.raj@intel.com>
+
+commit c182d2b7d0ca48e0d6ff16f7d883161238c447ed upstream.
+
+After updating microcode on one of the threads of a core, the other
+thread sibling automatically gets the update since the microcode
+resources on a hyperthreaded core are shared between the two threads.
+
+Check the microcode revision on the CPU before performing a microcode
+update and thus save us the WRMSR 0x79 because it is a particularly
+expensive operation.
+
+[ Borislav: Massage changelog and coding style. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-2-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-3-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c |   27 ++++++++++++++++++++++++---
+ 1 file changed, 24 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -589,6 +589,17 @@ static int apply_microcode_early(struct
+       if (!mc)
+               return 0;
++      /*
++       * Save us the MSR write below - which is a particular expensive
++       * operation - when the other hyperthread has updated the microcode
++       * already.
++       */
++      rev = intel_get_microcode_revision();
++      if (rev >= mc->hdr.rev) {
++              uci->cpu_sig.rev = rev;
++              return UCODE_OK;
++      }
++
+       /* write microcode via MSR 0x79 */
+       native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+@@ -776,7 +787,7 @@ static enum ucode_state apply_microcode_
+ {
+       struct microcode_intel *mc;
+       struct ucode_cpu_info *uci;
+-      struct cpuinfo_x86 *c;
++      struct cpuinfo_x86 *c = &cpu_data(cpu);
+       static int prev_rev;
+       u32 rev;
+@@ -793,6 +804,18 @@ static enum ucode_state apply_microcode_
+                       return UCODE_NFOUND;
+       }
++      /*
++       * Save us the MSR write below - which is a particular expensive
++       * operation - when the other hyperthread has updated the microcode
++       * already.
++       */
++      rev = intel_get_microcode_revision();
++      if (rev >= mc->hdr.rev) {
++              uci->cpu_sig.rev = rev;
++              c->microcode = rev;
++              return UCODE_OK;
++      }
++
+       /* write microcode via MSR 0x79 */
+       wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+@@ -813,8 +836,6 @@ static enum ucode_state apply_microcode_
+               prev_rev = rev;
+       }
+-      c = &cpu_data(cpu);
+-
+       uci->cpu_sig.rev = rev;
+       c->microcode = rev;
diff --git a/queue-4.15/x86-microcode-intel-look-into-the-patch-cache-first.patch b/queue-4.15/x86-microcode-intel-look-into-the-patch-cache-first.patch
new file mode 100644 (file)
index 0000000..1866f41
--- /dev/null
@@ -0,0 +1,55 @@
+From d8c3b52c00a05036e0a6b315b4b17921a7b67997 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:44 +0100
+Subject: x86/microcode/intel: Look into the patch cache first
+
+From: Borislav Petkov <bp@suse.de>
+
+commit d8c3b52c00a05036e0a6b315b4b17921a7b67997 upstream.
+
+The cache might contain a newer patch - look in there first.
+
+A follow-on change will make sure newest patches are loaded into the
+cache of microcode patches.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-6-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -791,9 +791,9 @@ static int collect_cpu_info(int cpu_num,
+ static enum ucode_state apply_microcode_intel(int cpu)
+ {
+-      struct microcode_intel *mc;
+-      struct ucode_cpu_info *uci;
++      struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
++      struct microcode_intel *mc;
+       static int prev_rev;
+       u32 rev;
+@@ -801,11 +801,10 @@ static enum ucode_state apply_microcode_
+       if (WARN_ON(raw_smp_processor_id() != cpu))
+               return UCODE_ERROR;
+-      uci = ucode_cpu_info + cpu;
+-      mc = uci->mc;
++      /* Look for a newer patch in our cache: */
++      mc = find_patch(uci);
+       if (!mc) {
+-              /* Look for a newer patch in our cache: */
+-              mc = find_patch(uci);
++              mc = uci->mc;
+               if (!mc)
+                       return UCODE_NFOUND;
+       }
diff --git a/queue-4.15/x86-microcode-intel-writeback-and-invalidate-caches-before-updating-microcode.patch b/queue-4.15/x86-microcode-intel-writeback-and-invalidate-caches-before-updating-microcode.patch
new file mode 100644 (file)
index 0000000..6c206a9
--- /dev/null
@@ -0,0 +1,58 @@
+From 91df9fdf51492aec9fed6b4cbd33160886740f47 Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:42 +0100
+Subject: x86/microcode/intel: Writeback and invalidate caches before updating microcode
+
+From: Ashok Raj <ashok.raj@intel.com>
+
+commit 91df9fdf51492aec9fed6b4cbd33160886740f47 upstream.
+
+Updating microcode is less error prone when caches have been flushed and
+depending on what exactly the microcode is updating. For example, some
+of the issues around certain Broadwell parts can be addressed by doing a
+full cache flush.
+
+[ Borislav: Massage it and use native_wbinvd() in both cases. ]
+
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: http://lkml.kernel.org/r/1519352533-15992-3-git-send-email-ashok.raj@intel.com
+Link: https://lkml.kernel.org/r/20180228102846.13447-4-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -600,6 +600,12 @@ static int apply_microcode_early(struct
+               return UCODE_OK;
+       }
++      /*
++       * Writeback and invalidate caches before updating microcode to avoid
++       * internal issues depending on what the microcode is updating.
++       */
++      native_wbinvd();
++
+       /* write microcode via MSR 0x79 */
+       native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+@@ -816,6 +822,12 @@ static enum ucode_state apply_microcode_
+               return UCODE_OK;
+       }
++      /*
++       * Writeback and invalidate caches before updating microcode to avoid
++       * internal issues depending on what the microcode is updating.
++       */
++      native_wbinvd();
++
+       /* write microcode via MSR 0x79 */
+       wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
diff --git a/queue-4.15/x86-microcode-propagate-return-value-from-updating-functions.patch b/queue-4.15/x86-microcode-propagate-return-value-from-updating-functions.patch
new file mode 100644 (file)
index 0000000..271f89d
--- /dev/null
@@ -0,0 +1,229 @@
+From 3f1f576a195aa266813cbd4ca70291deb61e0129 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 16 Feb 2018 12:26:38 +0100
+Subject: x86/microcode: Propagate return value from updating functions
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 3f1f576a195aa266813cbd4ca70291deb61e0129 upstream.
+
+... so that callers can know when microcode was updated and act
+accordingly.
+
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180216112640.11554-2-bp@alien8.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/microcode.h      |    9 +++++++--
+ arch/x86/kernel/cpu/microcode/amd.c   |   10 +++++-----
+ arch/x86/kernel/cpu/microcode/core.c  |   33 +++++++++++++++++----------------
+ arch/x86/kernel/cpu/microcode/intel.c |   10 +++++-----
+ 4 files changed, 34 insertions(+), 28 deletions(-)
+
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -37,7 +37,12 @@ struct cpu_signature {
+ struct device;
+-enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
++enum ucode_state {
++      UCODE_OK        = 0,
++      UCODE_UPDATED,
++      UCODE_NFOUND,
++      UCODE_ERROR,
++};
+ struct microcode_ops {
+       enum ucode_state (*request_microcode_user) (int cpu,
+@@ -54,7 +59,7 @@ struct microcode_ops {
+        * are being called.
+        * See also the "Synchronization" section in microcode_core.c.
+        */
+-      int (*apply_microcode) (int cpu);
++      enum ucode_state (*apply_microcode) (int cpu);
+       int (*collect_cpu_info) (int cpu, struct cpu_signature *csig);
+ };
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -498,7 +498,7 @@ static unsigned int verify_patch_size(u8
+       return patch_size;
+ }
+-static int apply_microcode_amd(int cpu)
++static enum ucode_state apply_microcode_amd(int cpu)
+ {
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+       struct microcode_amd *mc_amd;
+@@ -512,7 +512,7 @@ static int apply_microcode_amd(int cpu)
+       p = find_patch(cpu);
+       if (!p)
+-              return 0;
++              return UCODE_NFOUND;
+       mc_amd  = p->data;
+       uci->mc = p->data;
+@@ -523,13 +523,13 @@ static int apply_microcode_amd(int cpu)
+       if (rev >= mc_amd->hdr.patch_id) {
+               c->microcode = rev;
+               uci->cpu_sig.rev = rev;
+-              return 0;
++              return UCODE_OK;
+       }
+       if (__apply_microcode_amd(mc_amd)) {
+               pr_err("CPU%d: update failed for patch_level=0x%08x\n",
+                       cpu, mc_amd->hdr.patch_id);
+-              return -1;
++              return UCODE_ERROR;
+       }
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+               mc_amd->hdr.patch_id);
+@@ -537,7 +537,7 @@ static int apply_microcode_amd(int cpu)
+       uci->cpu_sig.rev = mc_amd->hdr.patch_id;
+       c->microcode = mc_amd->hdr.patch_id;
+-      return 0;
++      return UCODE_UPDATED;
+ }
+ static int install_equiv_cpu_table(const u8 *buf)
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -374,7 +374,7 @@ static int collect_cpu_info(int cpu)
+ }
+ struct apply_microcode_ctx {
+-      int err;
++      enum ucode_state err;
+ };
+ static void apply_microcode_local(void *arg)
+@@ -489,31 +489,29 @@ static void __exit microcode_dev_exit(vo
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
+-static int reload_for_cpu(int cpu)
++static enum ucode_state reload_for_cpu(int cpu)
+ {
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+       enum ucode_state ustate;
+-      int err = 0;
+       if (!uci->valid)
+-              return err;
++              return UCODE_OK;
+       ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
+-      if (ustate == UCODE_OK)
+-              apply_microcode_on_target(cpu);
+-      else
+-              if (ustate == UCODE_ERROR)
+-                      err = -EINVAL;
+-      return err;
++      if (ustate != UCODE_OK)
++              return ustate;
++
++      return apply_microcode_on_target(cpu);
+ }
+ static ssize_t reload_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t size)
+ {
++      enum ucode_state tmp_ret = UCODE_OK;
+       unsigned long val;
++      ssize_t ret = 0;
+       int cpu;
+-      ssize_t ret = 0, tmp_ret;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+@@ -526,15 +524,18 @@ static ssize_t reload_store(struct devic
+       mutex_lock(&microcode_mutex);
+       for_each_online_cpu(cpu) {
+               tmp_ret = reload_for_cpu(cpu);
+-              if (tmp_ret != 0)
++              if (tmp_ret > UCODE_NFOUND) {
+                       pr_warn("Error reloading microcode on CPU %d\n", cpu);
+-              /* save retval of the first encountered reload error */
+-              if (!ret)
+-                      ret = tmp_ret;
++                      /* set retval for the first encountered reload error */
++                      if (!ret)
++                              ret = -EINVAL;
++              }
+       }
+-      if (!ret)
++
++      if (!ret && tmp_ret == UCODE_UPDATED)
+               perf_check_microcode();
++
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -772,7 +772,7 @@ static int collect_cpu_info(int cpu_num,
+       return 0;
+ }
+-static int apply_microcode_intel(int cpu)
++static enum ucode_state apply_microcode_intel(int cpu)
+ {
+       struct microcode_intel *mc;
+       struct ucode_cpu_info *uci;
+@@ -782,7 +782,7 @@ static int apply_microcode_intel(int cpu
+       /* We should bind the task to the CPU */
+       if (WARN_ON(raw_smp_processor_id() != cpu))
+-              return -1;
++              return UCODE_ERROR;
+       uci = ucode_cpu_info + cpu;
+       mc = uci->mc;
+@@ -790,7 +790,7 @@ static int apply_microcode_intel(int cpu
+               /* Look for a newer patch in our cache: */
+               mc = find_patch(uci);
+               if (!mc)
+-                      return 0;
++                      return UCODE_NFOUND;
+       }
+       /* write microcode via MSR 0x79 */
+@@ -801,7 +801,7 @@ static int apply_microcode_intel(int cpu
+       if (rev != mc->hdr.rev) {
+               pr_err("CPU%d update to revision 0x%x failed\n",
+                      cpu, mc->hdr.rev);
+-              return -1;
++              return UCODE_ERROR;
+       }
+       if (rev != prev_rev) {
+@@ -818,7 +818,7 @@ static int apply_microcode_intel(int cpu
+       uci->cpu_sig.rev = rev;
+       c->microcode = rev;
+-      return 0;
++      return UCODE_UPDATED;
+ }
+ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
diff --git a/queue-4.15/x86-microcode-request-microcode-on-the-bsp.patch b/queue-4.15/x86-microcode-request-microcode-on-the-bsp.patch
new file mode 100644 (file)
index 0000000..d4a1ceb
--- /dev/null
@@ -0,0 +1,68 @@
+From cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 28 Feb 2018 11:28:45 +0100
+Subject: x86/microcode: Request microcode on the BSP
+
+From: Borislav Petkov <bp@suse.de>
+
+commit cfb52a5a09c8ae3a1dafb44ce549fde5b69e8117 upstream.
+
+... so that any newer version can land in the cache and can later be
+fished out by the application functions. Do that before grabbing the
+hotplug lock.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-7-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c |   11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -499,15 +499,10 @@ static int check_online_cpus(void)
+ static enum ucode_state reload_for_cpu(int cpu)
+ {
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+-      enum ucode_state ustate;
+       if (!uci->valid)
+               return UCODE_OK;
+-      ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
+-      if (ustate != UCODE_OK)
+-              return ustate;
+-
+       return apply_microcode_on_target(cpu);
+ }
+@@ -515,11 +510,11 @@ static ssize_t reload_store(struct devic
+                           struct device_attribute *attr,
+                           const char *buf, size_t size)
+ {
++      int cpu, bsp = boot_cpu_data.cpu_index;
+       enum ucode_state tmp_ret = UCODE_OK;
+       bool do_callback = false;
+       unsigned long val;
+       ssize_t ret = 0;
+-      int cpu;
+       ret = kstrtoul(buf, 0, &val);
+       if (ret)
+@@ -528,6 +523,10 @@ static ssize_t reload_store(struct devic
+       if (val != 1)
+               return size;
++      tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
++      if (tmp_ret != UCODE_OK)
++              return size;
++
+       get_online_cpus();
+       ret = check_online_cpus();
diff --git a/queue-4.15/x86-microcode-synchronize-late-microcode-loading.patch b/queue-4.15/x86-microcode-synchronize-late-microcode-loading.patch
new file mode 100644 (file)
index 0000000..37624c1
--- /dev/null
@@ -0,0 +1,214 @@
+From a5321aec6412b20b5ad15db2d6b916c05349dbff Mon Sep 17 00:00:00 2001
+From: Ashok Raj <ashok.raj@intel.com>
+Date: Wed, 28 Feb 2018 11:28:46 +0100
+Subject: x86/microcode: Synchronize late microcode loading
+
+From: Ashok Raj <ashok.raj@intel.com>
+
+commit a5321aec6412b20b5ad15db2d6b916c05349dbff upstream.
+
+Original idea by Ashok, completely rewritten by Borislav.
+
+Before you read any further: the early loading method is still the
+preferred one and you should always do that. The following patch is
+improving the late loading mechanism for long running jobs and cloud use
+cases.
+
+Gather all cores and serialize the microcode update on them by doing it
+one-by-one to make the late update process as reliable as possible and
+avoid potential issues caused by the microcode update.
+
+[ Borislav: Rewrite completely. ]
+
+Co-developed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Ashok Raj <ashok.raj@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tom Lendacky <thomas.lendacky@amd.com>
+Tested-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Arjan Van De Ven <arjan.van.de.ven@intel.com>
+Link: https://lkml.kernel.org/r/20180228102846.13447-8-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/core.c |  118 +++++++++++++++++++++++++++--------
+ 1 file changed, 92 insertions(+), 26 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/core.c
++++ b/arch/x86/kernel/cpu/microcode/core.c
+@@ -22,13 +22,16 @@
+ #define pr_fmt(fmt) "microcode: " fmt
+ #include <linux/platform_device.h>
++#include <linux/stop_machine.h>
+ #include <linux/syscore_ops.h>
+ #include <linux/miscdevice.h>
+ #include <linux/capability.h>
+ #include <linux/firmware.h>
+ #include <linux/kernel.h>
++#include <linux/delay.h>
+ #include <linux/mutex.h>
+ #include <linux/cpu.h>
++#include <linux/nmi.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
+@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
+  */
+ static DEFINE_MUTEX(microcode_mutex);
++/*
++ * Serialize late loading so that CPUs get updated one-by-one.
++ */
++static DEFINE_SPINLOCK(update_lock);
++
+ struct ucode_cpu_info         ucode_cpu_info[NR_CPUS];
+ struct cpu_info_ctx {
+@@ -486,6 +494,19 @@ static void __exit microcode_dev_exit(vo
+ /* fake device for request_firmware */
+ static struct platform_device *microcode_pdev;
++/*
++ * Late loading dance. Why the heavy-handed stomp_machine effort?
++ *
++ * - HT siblings must be idle and not execute other code while the other sibling
++ *   is loading microcode in order to avoid any negative interactions caused by
++ *   the loading.
++ *
++ * - In addition, microcode update on the cores must be serialized until this
++ *   requirement can be relaxed in the future. Right now, this is conservative
++ *   and good.
++ */
++#define SPINUNIT 100 /* 100 nsec */
++
+ static int check_online_cpus(void)
+ {
+       if (num_online_cpus() == num_present_cpus())
+@@ -496,23 +517,85 @@ static int check_online_cpus(void)
+       return -EINVAL;
+ }
+-static enum ucode_state reload_for_cpu(int cpu)
++static atomic_t late_cpus;
++
++/*
++ * Returns:
++ * < 0 - on error
++ *   0 - no update done
++ *   1 - microcode was updated
++ */
++static int __reload_late(void *info)
+ {
+-      struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
++      unsigned int timeout = NSEC_PER_SEC;
++      int all_cpus = num_online_cpus();
++      int cpu = smp_processor_id();
++      enum ucode_state err;
++      int ret = 0;
++
++      atomic_dec(&late_cpus);
++
++      /*
++       * Wait for all CPUs to arrive. A load will not be attempted unless all
++       * CPUs show up.
++       * */
++      while (atomic_read(&late_cpus)) {
++              if (timeout < SPINUNIT) {
++                      pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
++                              atomic_read(&late_cpus));
++                      return -1;
++              }
++
++              ndelay(SPINUNIT);
++              timeout -= SPINUNIT;
++
++              touch_nmi_watchdog();
++      }
++
++      spin_lock(&update_lock);
++      apply_microcode_local(&err);
++      spin_unlock(&update_lock);
++
++      if (err > UCODE_NFOUND) {
++              pr_warn("Error reloading microcode on CPU %d\n", cpu);
++              ret = -1;
++      } else if (err == UCODE_UPDATED) {
++              ret = 1;
++      }
+-      if (!uci->valid)
+-              return UCODE_OK;
++      atomic_inc(&late_cpus);
+-      return apply_microcode_on_target(cpu);
++      while (atomic_read(&late_cpus) != all_cpus)
++              cpu_relax();
++
++      return ret;
++}
++
++/*
++ * Reload microcode late on all CPUs. Wait for a sec until they
++ * all gather together.
++ */
++static int microcode_reload_late(void)
++{
++      int ret;
++
++      atomic_set(&late_cpus, num_online_cpus());
++
++      ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
++      if (ret < 0)
++              return ret;
++      else if (ret > 0)
++              microcode_check();
++
++      return ret;
+ }
+ static ssize_t reload_store(struct device *dev,
+                           struct device_attribute *attr,
+                           const char *buf, size_t size)
+ {
+-      int cpu, bsp = boot_cpu_data.cpu_index;
+       enum ucode_state tmp_ret = UCODE_OK;
+-      bool do_callback = false;
++      int bsp = boot_cpu_data.cpu_index;
+       unsigned long val;
+       ssize_t ret = 0;
+@@ -534,30 +617,13 @@ static ssize_t reload_store(struct devic
+               goto put;
+       mutex_lock(&microcode_mutex);
+-
+-      for_each_online_cpu(cpu) {
+-              tmp_ret = reload_for_cpu(cpu);
+-              if (tmp_ret > UCODE_NFOUND) {
+-                      pr_warn("Error reloading microcode on CPU %d\n", cpu);
+-
+-                      /* set retval for the first encountered reload error */
+-                      if (!ret)
+-                              ret = -EINVAL;
+-              }
+-
+-              if (tmp_ret == UCODE_UPDATED)
+-                      do_callback = true;
+-      }
+-
+-      if (!ret && do_callback)
+-              microcode_check();
+-
++      ret = microcode_reload_late();
+       mutex_unlock(&microcode_mutex);
+ put:
+       put_online_cpus();
+-      if (!ret)
++      if (ret >= 0)
+               ret = size;
+       return ret;