]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Jan 2018 09:25:19 +0000 (10:25 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 24 Jan 2018 09:25:19 +0000 (10:25 +0100)
added patches:
x86-cpu-intel-introduce-macros-for-intel-family-numbers.patch
x86-microcode-intel-fix-bdw-late-loading-revision-check.patch
x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch

queue-4.4/series
queue-4.4/x86-cpu-intel-introduce-macros-for-intel-family-numbers.patch [new file with mode: 0644]
queue-4.4/x86-microcode-intel-fix-bdw-late-loading-revision-check.patch [new file with mode: 0644]
queue-4.4/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch [new file with mode: 0644]

index 86a0b29f1fe1769e93b0f936facc574c3a59a565..3b09eef29f3c1e74552b8e5c9d42b20509e578e0 100644 (file)
@@ -2,3 +2,6 @@ x86-asm-32-make-sync_core-handle-missing-cpuid-on-all-32-bit-kernels.patch
 usbip-prevent-vhci_hcd-driver-from-leaking-a-socket-pointer-address.patch
 usbip-fix-implicit-fallthrough-warning.patch
 usbip-fix-potential-format-overflow-in-userspace-tools.patch
+x86-microcode-intel-fix-bdw-late-loading-revision-check.patch
+x86-cpu-intel-introduce-macros-for-intel-family-numbers.patch
+x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
diff --git a/queue-4.4/x86-cpu-intel-introduce-macros-for-intel-family-numbers.patch b/queue-4.4/x86-cpu-intel-introduce-macros-for-intel-family-numbers.patch
new file mode 100644 (file)
index 0000000..30fe720
--- /dev/null
@@ -0,0 +1,145 @@
+From 970442c599b22ccd644ebfe94d1d303bf6f87c05 Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave@sr71.net>
+Date: Thu, 2 Jun 2016 17:19:27 -0700
+Subject: x86/cpu/intel: Introduce macros for Intel family numbers
+
+From: Dave Hansen <dave@sr71.net>
+
+commit 970442c599b22ccd644ebfe94d1d303bf6f87c05 upstream.
+
+Problem:
+
+We have a boatload of open-coded family-6 model numbers.  Half of
+them have these model numbers in hex and the other half in
+decimal.  This makes grepping for them tons of fun, if you were
+to try.
+
+Solution:
+
+Consolidate all the magic numbers.  Put all the definitions in
+one header.
+
+The names here are closely derived from the comments describing
+the models from arch/x86/events/intel/core.c.  We could easily
+make them shorter by doing things like s/SANDYBRIDGE/SNB/, but
+they seemed fine even with the longer versions to me.
+
+Do not take any of these names too literally, like "DESKTOP"
+or "MOBILE".  These are all colloquial names and not precise
+descriptions of everywhere a given model will show up.
+
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Darren Hart <dvhart@infradead.org>
+Cc: Dave Hansen <dave@sr71.net>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: Doug Thompson <dougthompson@xmission.com>
+Cc: Eduardo Valentin <edubezval@gmail.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
+Cc: Kan Liang <kan.liang@intel.com>
+Cc: Len Brown <lenb@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+Cc: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
+Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Ulf Hansson <ulf.hansson@linaro.org>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+Cc: Zhang Rui <rui.zhang@intel.com>
+Cc: jacob.jun.pan@intel.com
+Cc: linux-acpi@vger.kernel.org
+Cc: linux-edac@vger.kernel.org
+Cc: linux-mmc@vger.kernel.org
+Cc: linux-pm@vger.kernel.org
+Cc: platform-driver-x86@vger.kernel.org
+Link: http://lkml.kernel.org/r/20160603001927.F2A7D828@viggo.jf.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/intel-family.h |   68 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 68 insertions(+)
+
+--- /dev/null
++++ b/arch/x86/include/asm/intel-family.h
+@@ -0,0 +1,68 @@
++#ifndef _ASM_X86_INTEL_FAMILY_H
++#define _ASM_X86_INTEL_FAMILY_H
++
++/*
++ * "Big Core" Processors (Branded as Core, Xeon, etc...)
++ *
++ * The "_X" parts are generally the EP and EX Xeons, or the
++ * "Extreme" ones, like Broadwell-E.
++ *
++ * Things ending in "2" are usually because we have no better
++ * name for them.  There's no processor called "WESTMERE2".
++ */
++
++#define INTEL_FAM6_CORE_YONAH         0x0E
++#define INTEL_FAM6_CORE2_MEROM                0x0F
++#define INTEL_FAM6_CORE2_MEROM_L      0x16
++#define INTEL_FAM6_CORE2_PENRYN               0x17
++#define INTEL_FAM6_CORE2_DUNNINGTON   0x1D
++
++#define INTEL_FAM6_NEHALEM            0x1E
++#define INTEL_FAM6_NEHALEM_EP         0x1A
++#define INTEL_FAM6_NEHALEM_EX         0x2E
++#define INTEL_FAM6_WESTMERE           0x25
++#define INTEL_FAM6_WESTMERE2          0x1F
++#define INTEL_FAM6_WESTMERE_EP                0x2C
++#define INTEL_FAM6_WESTMERE_EX                0x2F
++
++#define INTEL_FAM6_SANDYBRIDGE                0x2A
++#define INTEL_FAM6_SANDYBRIDGE_X      0x2D
++#define INTEL_FAM6_IVYBRIDGE          0x3A
++#define INTEL_FAM6_IVYBRIDGE_X                0x3E
++
++#define INTEL_FAM6_HASWELL_CORE               0x3C
++#define INTEL_FAM6_HASWELL_X          0x3F
++#define INTEL_FAM6_HASWELL_ULT                0x45
++#define INTEL_FAM6_HASWELL_GT3E               0x46
++
++#define INTEL_FAM6_BROADWELL_CORE     0x3D
++#define INTEL_FAM6_BROADWELL_XEON_D   0x56
++#define INTEL_FAM6_BROADWELL_GT3E     0x47
++#define INTEL_FAM6_BROADWELL_X                0x4F
++
++#define INTEL_FAM6_SKYLAKE_MOBILE     0x4E
++#define INTEL_FAM6_SKYLAKE_DESKTOP    0x5E
++#define INTEL_FAM6_SKYLAKE_X          0x55
++#define INTEL_FAM6_KABYLAKE_MOBILE    0x8E
++#define INTEL_FAM6_KABYLAKE_DESKTOP   0x9E
++
++/* "Small Core" Processors (Atom) */
++
++#define INTEL_FAM6_ATOM_PINEVIEW      0x1C
++#define INTEL_FAM6_ATOM_LINCROFT      0x26
++#define INTEL_FAM6_ATOM_PENWELL               0x27
++#define INTEL_FAM6_ATOM_CLOVERVIEW    0x35
++#define INTEL_FAM6_ATOM_CEDARVIEW     0x36
++#define INTEL_FAM6_ATOM_SILVERMONT1   0x37 /* BayTrail/BYT / Valleyview */
++#define INTEL_FAM6_ATOM_SILVERMONT2   0x4D /* Avaton/Rangely */
++#define INTEL_FAM6_ATOM_AIRMONT               0x4C /* CherryTrail / Braswell */
++#define INTEL_FAM6_ATOM_MERRIFIELD1   0x4A /* Tangier */
++#define INTEL_FAM6_ATOM_MERRIFIELD2   0x5A /* Annidale */
++#define INTEL_FAM6_ATOM_GOLDMONT      0x5C
++#define INTEL_FAM6_ATOM_DENVERTON     0x5F /* Goldmont Microserver */
++
++/* Xeon Phi */
++
++#define INTEL_FAM6_XEON_PHI_KNL               0x57 /* Knights Landing */
++
++#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/queue-4.4/x86-microcode-intel-fix-bdw-late-loading-revision-check.patch b/queue-4.4/x86-microcode-intel-fix-bdw-late-loading-revision-check.patch
new file mode 100644 (file)
index 0000000..973197c
--- /dev/null
@@ -0,0 +1,31 @@
+From ben.hutchings@codethink.co.uk  Wed Jan 24 10:19:07 2018
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Date: Wed, 24 Jan 2018 02:31:19 +0000
+Subject: x86/microcode/intel: Fix BDW late-loading revision check
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org
+Message-ID: <20180124023119.kaendz4jiuejowxr@xylophone.i.decadent.org.uk>
+Content-Disposition: inline
+
+From: Ben Hutchings <ben.hutchings@codethink.co.uk>
+
+The backport of commit b94b73733171 ("x86/microcode/intel: Extend BDW
+late-loading with a revision check") to 4.4-stable deleted a "return true"
+statement.  This bug is not present upstream or other stable branches.
+
+Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/intel.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -1005,6 +1005,7 @@ static bool is_blacklisted(unsigned int
+           c->microcode < 0x0b000021) {
+               pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+               pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
++              return true;
+       }
+       return false;
diff --git a/queue-4.4/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch b/queue-4.4/x86-retpoline-fill-rsb-on-context-switch-for-affected-cpus.patch
new file mode 100644 (file)
index 0000000..8c6c0ef
--- /dev/null
@@ -0,0 +1,207 @@
+From c995efd5a740d9cbafbf58bde4973e8b50b4d761 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Fri, 12 Jan 2018 17:49:25 +0000
+Subject: x86/retpoline: Fill RSB on context switch for affected CPUs
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit c995efd5a740d9cbafbf58bde4973e8b50b4d761 upstream.
+
+On context switch from a shallow call stack to a deeper one, as the CPU
+does 'ret' up the deeper side it may encounter RSB entries (predictions for
+where the 'ret' goes to) which were populated in userspace.
+
+This is problematic if neither SMEP nor KPTI (the latter of which marks
+userspace pages as NX for the kernel) are active, as malicious code in
+userspace may then be executed speculatively.
+
+Overwrite the CPU's return prediction stack with calls which are predicted
+to return to an infinite loop, to "capture" speculation if this
+happens. This is required both for retpoline, and also in conjunction with
+IBRS for !SMEP && !KPTI.
+
+On Skylake+ the problem is slightly different, and an *underflow* of the
+RSB may cause errant branch predictions to occur. So there it's not so much
+overwrite, as *filling* the RSB to attempt to prevent it getting
+empty. This is only a partial solution for Skylake+ since there are many
+other conditions which may result in the RSB becoming empty. The full
+solution on Skylake+ is to use IBRS, which will prevent the problem even
+when the RSB becomes empty. With IBRS, the RSB-stuffing will not be
+required on context switch.
+
+[ tglx: Added missing vendor check and slighty massaged comments and
+       changelog ]
+
+[js] backport to 4.4 -- __switch_to_asm does not exist there, we
+     have to patch the switch_to macros for both x86_32 and x86_64.
+
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: gnomes@lxorguk.ukuu.org.uk
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: thomas.lendacky@amd.com
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Kees Cook <keescook@google.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/1515779365-9032-1-git-send-email-dwmw@amazon.co.uk
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeature.h |    1 +
+ arch/x86/include/asm/switch_to.h  |   38 ++++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/bugs.c        |   36 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 75 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -199,6 +199,7 @@
+ #define X86_FEATURE_HWP_EPP   ( 7*32+13) /* Intel HWP_EPP */
+ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
+ #define X86_FEATURE_INTEL_PT  ( 7*32+15) /* Intel Processor Trace */
++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
+ #define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
+--- a/arch/x86/include/asm/switch_to.h
++++ b/arch/x86/include/asm/switch_to.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_SWITCH_TO_H
+ #define _ASM_X86_SWITCH_TO_H
++#include <asm/nospec-branch.h>
++
+ struct task_struct; /* one of the stranger aspects of C forward declarations */
+ __visible struct task_struct *__switch_to(struct task_struct *prev,
+                                          struct task_struct *next);
+@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct
+ #define __switch_canary_iparam
+ #endif        /* CC_STACKPROTECTOR */
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++#define __retpoline_fill_return_buffer                                        \
++      ALTERNATIVE("jmp 910f",                                         \
++              __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
++              X86_FEATURE_RSB_CTXSW)                                  \
++      "910:\n\t"
++#else
++#define __retpoline_fill_return_buffer
++#endif
++
+ /*
+  * Saving eflags is important. It switches not only IOPL between tasks,
+  * it also protects other tasks from NT leaking through sysenter etc.
+@@ -46,6 +65,7 @@ do {                                                                 \
+                    "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
+                    "pushl %[next_ip]\n\t"     /* restore EIP   */     \
+                    __switch_canary                                    \
++                   __retpoline_fill_return_buffer                     \
+                    "jmp __switch_to\n"        /* regparm call  */     \
+                    "1:\t"                                             \
+                    "popl %%ebp\n\t"           /* restore EBP   */     \
+@@ -100,6 +120,23 @@ do {                                                                      \
+ #define __switch_canary_iparam
+ #endif        /* CC_STACKPROTECTOR */
++#ifdef CONFIG_RETPOLINE
++      /*
++       * When switching from a shallower to a deeper call stack
++       * the RSB may either underflow or use entries populated
++       * with userspace addresses. On CPUs where those concerns
++       * exist, overwrite the RSB with entries which capture
++       * speculative execution to prevent attack.
++       */
++#define __retpoline_fill_return_buffer                                        \
++      ALTERNATIVE("jmp 910f",                                         \
++              __stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
++              X86_FEATURE_RSB_CTXSW)                                  \
++      "910:\n\t"
++#else
++#define __retpoline_fill_return_buffer
++#endif
++
+ /*
+  * There is no need to save or restore flags, because flags are always
+  * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
+@@ -112,6 +149,7 @@ do {                                                                       \
+            "call __switch_to\n\t"                                       \
+            "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
+            __switch_canary                                              \
++           __retpoline_fill_return_buffer                               \
+            "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
+            "movq %%rax,%%rdi\n\t"                                       \
+            "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"                 \
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -22,6 +22,7 @@
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
++#include <asm/intel-family.h>
+ static void __init spectre_v2_select_mitigation(void);
+@@ -154,6 +155,23 @@ disable:
+       return SPECTRE_V2_CMD_NONE;
+ }
++/* Check for Skylake-like CPUs (for RSB handling) */
++static bool __init is_skylake_era(void)
++{
++      if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
++          boot_cpu_data.x86 == 6) {
++              switch (boot_cpu_data.x86_model) {
++              case INTEL_FAM6_SKYLAKE_MOBILE:
++              case INTEL_FAM6_SKYLAKE_DESKTOP:
++              case INTEL_FAM6_SKYLAKE_X:
++              case INTEL_FAM6_KABYLAKE_MOBILE:
++              case INTEL_FAM6_KABYLAKE_DESKTOP:
++                      return true;
++              }
++      }
++      return false;
++}
++
+ static void __init spectre_v2_select_mitigation(void)
+ {
+       enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
+@@ -212,6 +230,24 @@ retpoline_auto:
+       spectre_v2_enabled = mode;
+       pr_info("%s\n", spectre_v2_strings[mode]);
++
++      /*
++       * If neither SMEP or KPTI are available, there is a risk of
++       * hitting userspace addresses in the RSB after a context switch
++       * from a shallow call stack to a deeper one. To prevent this fill
++       * the entire RSB, even when using IBRS.
++       *
++       * Skylake era CPUs have a separate issue with *underflow* of the
++       * RSB, when they will predict 'ret' targets from the generic BTB.
++       * The proper mitigation for this is IBRS. If IBRS is not supported
++       * or deactivated in favour of retpolines the RSB fill on context
++       * switch is required.
++       */
++      if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
++           !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
++              setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
++              pr_info("Filling RSB on context switch\n");
++      }
+ }
+ #undef pr_fmt