]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 9 Jun 2020 17:16:26 +0000 (19:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 9 Jun 2020 17:16:26 +0000 (19:16 +0200)
added patches:
x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch
x86-cpu-add-table-argument-to-cpu_matches.patch
x86-speculation-add-ivy-bridge-to-affected-list.patch
x86-speculation-add-special-register-buffer-data-sampling-srbds-mitigation.patch
x86-speculation-add-srbds-vulnerability-and-mitigation-documentation.patch

queue-4.19/series
queue-4.19/x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch [new file with mode: 0644]
queue-4.19/x86-cpu-add-table-argument-to-cpu_matches.patch [new file with mode: 0644]
queue-4.19/x86-speculation-add-ivy-bridge-to-affected-list.patch [new file with mode: 0644]
queue-4.19/x86-speculation-add-special-register-buffer-data-sampling-srbds-mitigation.patch [new file with mode: 0644]
queue-4.19/x86-speculation-add-srbds-vulnerability-and-mitigation-documentation.patch [new file with mode: 0644]

index 485a51c3d966d571e47169d7dcb21d88d1b37495..47c8a74bce9ef37db68dc75eeb8a9fe3b5cf04c0 100644 (file)
@@ -16,3 +16,8 @@ tty-hvc_console-fix-crashes-on-parallel-open-close.patch
 staging-rtl8712-fix-ieee80211_addba_param_buf_size_mask.patch
 cdc-acm-heed-quirk-also-in-error-handling.patch
 nvmem-qfprom-remove-incorrect-write-support.patch
+x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch
+x86-cpu-add-table-argument-to-cpu_matches.patch
+x86-speculation-add-special-register-buffer-data-sampling-srbds-mitigation.patch
+x86-speculation-add-srbds-vulnerability-and-mitigation-documentation.patch
+x86-speculation-add-ivy-bridge-to-affected-list.patch
diff --git a/queue-4.19/x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch b/queue-4.19/x86-cpu-add-a-steppings-field-to-struct-x86_cpu_id.patch
new file mode 100644 (file)
index 0000000..6bf5cb5
--- /dev/null
@@ -0,0 +1,118 @@
+From foo@baz Tue 09 Jun 2020 07:03:49 PM CEST
+From: Mark Gross <mgross@linux.intel.com>
+Date: Thu, 16 Apr 2020 17:23:10 +0200
+Subject: x86/cpu: Add a steppings field to struct x86_cpu_id
+
+From: Mark Gross <mgross@linux.intel.com>
+
+commit e9d7144597b10ff13ff2264c059f7d4a7fbc89ac upstream
+
+Intel uses the same family/model for several CPUs. Sometimes the
+stepping must be checked to tell them apart.
+
+On x86 there can be at most 16 steppings. Add a steppings bitmask to
+x86_cpu_id and a X86_MATCH_VENDOR_FAMILY_MODEL_STEPPING_FEATURE macro
+and support for matching against family/model/stepping.
+
+ [ bp: Massage.
+   tglx: Lightweight variant for backporting ]
+
+Signed-off-by: Mark Gross <mgross@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpu_device_id.h |   27 +++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/match.c          |    7 ++++++-
+ include/linux/mod_devicetable.h      |    6 ++++++
+ 3 files changed, 39 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpu_device_id.h
++++ b/arch/x86/include/asm/cpu_device_id.h
+@@ -9,6 +9,33 @@
+ #include <linux/mod_devicetable.h>
++#define X86_STEPPINGS(mins, maxs)    GENMASK(maxs, mins)
++
++/**
++ * X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
++ * @_vendor:  The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
++ *            The name is expanded to X86_VENDOR_@_vendor
++ * @_family:  The family number or X86_FAMILY_ANY
++ * @_model:   The model number, model constant or X86_MODEL_ANY
++ * @_steppings:       Bitmask for steppings, stepping constant or X86_STEPPING_ANY
++ * @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
++ * @_data:    Driver specific data or NULL. The internal storage
++ *            format is unsigned long. The supplied value, pointer
++ *            etc. is casted to unsigned long internally.
++ *
++ * Backport version to keep the SRBDS pile consistant. No shorter variants
++ * required for this.
++ */
++#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
++                                                  _steppings, _feature, _data) { \
++      .vendor         = X86_VENDOR_##_vendor,                         \
++      .family         = _family,                                      \
++      .model          = _model,                                       \
++      .steppings      = _steppings,                                   \
++      .feature        = _feature,                                     \
++      .driver_data    = (unsigned long) _data                         \
++}
++
+ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
+ #endif
+--- a/arch/x86/kernel/cpu/match.c
++++ b/arch/x86/kernel/cpu/match.c
+@@ -34,13 +34,18 @@ const struct x86_cpu_id *x86_match_cpu(c
+       const struct x86_cpu_id *m;
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+-      for (m = match; m->vendor | m->family | m->model | m->feature; m++) {
++      for (m = match;
++           m->vendor | m->family | m->model | m->steppings | m->feature;
++           m++) {
+               if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
+                       continue;
+               if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
+                       continue;
+               if (m->model != X86_MODEL_ANY && c->x86_model != m->model)
+                       continue;
++              if (m->steppings != X86_STEPPING_ANY &&
++                  !(BIT(c->x86_stepping) & m->steppings))
++                      continue;
+               if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
+                       continue;
+               return m;
+--- a/include/linux/mod_devicetable.h
++++ b/include/linux/mod_devicetable.h
+@@ -621,6 +621,10 @@ struct mips_cdmm_device_id {
+ /*
+  * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
+  * Although gcc seems to ignore this error, clang fails without this define.
++ *
++ * Note: The ordering of the struct is different from upstream because the
++ * static initializers in kernels < 5.7 still use C89 style while upstream
++ * has been converted to proper C99 initializers.
+  */
+ #define x86cpu_device_id x86_cpu_id
+ struct x86_cpu_id {
+@@ -629,6 +633,7 @@ struct x86_cpu_id {
+       __u16 model;
+       __u16 feature;  /* bit index */
+       kernel_ulong_t driver_data;
++      __u16 steppings;
+ };
+ #define X86_FEATURE_MATCH(x) \
+@@ -637,6 +642,7 @@ struct x86_cpu_id {
+ #define X86_VENDOR_ANY 0xffff
+ #define X86_FAMILY_ANY 0
+ #define X86_MODEL_ANY  0
++#define X86_STEPPING_ANY 0
+ #define X86_FEATURE_ANY 0     /* Same as FPU, you can't test for that */
+ /*
diff --git a/queue-4.19/x86-cpu-add-table-argument-to-cpu_matches.patch b/queue-4.19/x86-cpu-add-table-argument-to-cpu_matches.patch
new file mode 100644 (file)
index 0000000..e74d3e9
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Tue 09 Jun 2020 07:03:49 PM CEST
+From: Mark Gross <mgross@linux.intel.com>
+Date: Thu, 16 Apr 2020 17:32:42 +0200
+Subject: x86/cpu: Add 'table' argument to cpu_matches()
+
+From: Mark Gross <mgross@linux.intel.com>
+
+commit 93920f61c2ad7edb01e63323832585796af75fc9 upstream
+
+To make cpu_matches() reusable for other matching tables, have it take a
+pointer to a x86_cpu_id table as an argument.
+
+ [ bp: Flip arguments order. ]
+
+Signed-off-by: Mark Gross <mgross@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c |   23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1013,9 +1013,9 @@ static const __initconst struct x86_cpu_
+       {}
+ };
+-static bool __init cpu_matches(unsigned long which)
++static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
+ {
+-      const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
++      const struct x86_cpu_id *m = x86_match_cpu(table);
+       return m && !!(m->driver_data & which);
+ }
+@@ -1035,29 +1035,32 @@ static void __init cpu_set_bug_bits(stru
+       u64 ia32_cap = x86_read_arch_cap_msr();
+       /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+-      if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
++      if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
++          !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+               setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+-      if (cpu_matches(NO_SPECULATION))
++      if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
+               return;
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+       setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+-      if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
++      if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
++          !(ia32_cap & ARCH_CAP_SSB_NO) &&
+          !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
+               setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+       if (ia32_cap & ARCH_CAP_IBRS_ALL)
+               setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+-      if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
++      if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
++          !(ia32_cap & ARCH_CAP_MDS_NO)) {
+               setup_force_cpu_bug(X86_BUG_MDS);
+-              if (cpu_matches(MSBDS_ONLY))
++              if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
+                       setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+       }
+-      if (!cpu_matches(NO_SWAPGS))
++      if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
+               setup_force_cpu_bug(X86_BUG_SWAPGS);
+       /*
+@@ -1075,7 +1078,7 @@ static void __init cpu_set_bug_bits(stru
+            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
+-      if (cpu_matches(NO_MELTDOWN))
++      if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+       /* Rogue Data Cache Load? No! */
+@@ -1084,7 +1087,7 @@ static void __init cpu_set_bug_bits(stru
+       setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+-      if (cpu_matches(NO_L1TF))
++      if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
+               return;
+       setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/queue-4.19/x86-speculation-add-ivy-bridge-to-affected-list.patch b/queue-4.19/x86-speculation-add-ivy-bridge-to-affected-list.patch
new file mode 100644 (file)
index 0000000..ab87b88
--- /dev/null
@@ -0,0 +1,41 @@
+From foo@baz Tue 09 Jun 2020 07:03:49 PM CEST
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Date: Mon, 27 Apr 2020 20:46:13 +0200
+Subject: x86/speculation: Add Ivy Bridge to affected list
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+commit 3798cc4d106e91382bfe016caa2edada27c2bb3f upstream
+
+Make the docs match the code.
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
++++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
+@@ -27,6 +27,8 @@ by software using TSX_CTRL_MSR otherwise
+   =============  ============  ========
+   common name    Family_Model  Stepping
+   =============  ============  ========
++  IvyBridge      06_3AH        All
++
+   Haswell        06_3CH        All
+   Haswell_L      06_45H        All
+   Haswell_G      06_46H        All
+@@ -37,9 +39,8 @@ by software using TSX_CTRL_MSR otherwise
+   Skylake_L      06_4EH        All
+   Skylake        06_5EH        All
+-  Kabylake_L     06_8EH        <=0xC
+-
+-  Kabylake       06_9EH        <=0xD
++  Kabylake_L     06_8EH        <= 0xC
++  Kabylake       06_9EH        <= 0xD
+   =============  ============  ========
+ Related CVEs
diff --git a/queue-4.19/x86-speculation-add-special-register-buffer-data-sampling-srbds-mitigation.patch b/queue-4.19/x86-speculation-add-special-register-buffer-data-sampling-srbds-mitigation.patch
new file mode 100644 (file)
index 0000000..b1c4b4e
--- /dev/null
@@ -0,0 +1,370 @@
+From foo@baz Tue 09 Jun 2020 07:03:49 PM CEST
+From: Mark Gross <mgross@linux.intel.com>
+Date: Thu, 16 Apr 2020 17:54:04 +0200
+Subject: x86/speculation: Add Special Register Buffer Data Sampling (SRBDS) mitigation
+
+From: Mark Gross <mgross@linux.intel.com>
+
+commit 7e5b3c267d256822407a22fdce6afdf9cd13f9fb upstream
+
+SRBDS is an MDS-like speculative side channel that can leak bits from the
+random number generator (RNG) across cores and threads. New microcode
+serializes the processor access during the execution of RDRAND and
+RDSEED. This ensures that the shared buffer is overwritten before it is
+released for reuse.
+
+While it is present on all affected CPU models, the microcode mitigation
+is not needed on models that enumerate ARCH_CAPABILITIES[MDS_NO] in the
+cases where TSX is not supported or has been disabled with TSX_CTRL.
+
+The mitigation is activated by default on affected processors and it
+increases latency for RDRAND and RDSEED instructions. Among other
+effects this will reduce throughput from /dev/urandom.
+
+* Enable administrator to configure the mitigation off when desired using
+  either mitigations=off or srbds=off.
+
+* Export vulnerability status via sysfs
+
+* Rename file-scoped macros to apply for non-whitelist table initializations.
+
+ [ bp: Massage,
+   - s/VULNBL_INTEL_STEPPING/VULNBL_INTEL_STEPPINGS/g,
+   - do not read arch cap MSR a second time in tsx_fused_off() - just pass it in,
+   - flip check in cpu_set_bug_bits() to save an indentation level,
+   - reflow comments.
+   jpoimboe: s/Mitigated/Mitigation/ in user-visible strings
+   tglx: Dropped the fused off magic for now
+ ]
+
+Signed-off-by: Mark Gross <mgross@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Tested-by: Neelima Krishnan <neelima.krishnan@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu |    1 
+ Documentation/admin-guide/kernel-parameters.txt    |   20 +++
+ arch/x86/include/asm/cpufeatures.h                 |    2 
+ arch/x86/include/asm/msr-index.h                   |    4 
+ arch/x86/kernel/cpu/bugs.c                         |  106 +++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c                       |   31 ++++++
+ arch/x86/kernel/cpu/cpu.h                          |    1 
+ drivers/base/cpu.c                                 |    8 +
+ 8 files changed, 173 insertions(+)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -478,6 +478,7 @@ What:              /sys/devices/system/cpu/vulnerabi
+               /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
+               /sys/devices/system/cpu/vulnerabilities/l1tf
+               /sys/devices/system/cpu/vulnerabilities/mds
++              /sys/devices/system/cpu/vulnerabilities/srbds
+               /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+               /sys/devices/system/cpu/vulnerabilities/itlb_multihit
+ Date:         January 2018
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -4415,6 +4415,26 @@
+       spia_pedr=
+       spia_peddr=
++      srbds=          [X86,INTEL]
++                      Control the Special Register Buffer Data Sampling
++                      (SRBDS) mitigation.
++
++                      Certain CPUs are vulnerable to an MDS-like
++                      exploit which can leak bits from the random
++                      number generator.
++
++                      By default, this issue is mitigated by
++                      microcode.  However, the microcode fix can cause
++                      the RDRAND and RDSEED instructions to become
++                      much slower.  Among other effects, this will
++                      result in reduced throughput from /dev/urandom.
++
++                      The microcode mitigation can be disabled with
++                      the following option:
++
++                      off:    Disable mitigation and remove
++                              performance impact to RDRAND and RDSEED
++
+       srcutree.counter_wrap_check [KNL]
+                       Specifies how frequently to check for
+                       grace-period sequence counter wrap for the
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -347,6 +347,7 @@
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+ #define X86_FEATURE_AVX512_4VNNIW     (18*32+ 2) /* AVX-512 Neural Network Instructions */
+ #define X86_FEATURE_AVX512_4FMAPS     (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
++#define X86_FEATURE_SRBDS_CTRL                (18*32+ 9) /* "" SRBDS mitigation MSR available */
+ #define X86_FEATURE_TSX_FORCE_ABORT   (18*32+13) /* "" TSX_FORCE_ABORT */
+ #define X86_FEATURE_MD_CLEAR          (18*32+10) /* VERW clears CPU buffers */
+ #define X86_FEATURE_PCONFIG           (18*32+18) /* Intel PCONFIG */
+@@ -391,5 +392,6 @@
+ #define X86_BUG_SWAPGS                        X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
+ #define X86_BUG_TAA                   X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
+ #define X86_BUG_ITLB_MULTIHIT         X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
++#define X86_BUG_SRBDS                 X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -110,6 +110,10 @@
+ #define TSX_CTRL_RTM_DISABLE          BIT(0)  /* Disable RTM feature */
+ #define TSX_CTRL_CPUID_CLEAR          BIT(1)  /* Disable TSX enumeration */
++/* SRBDS support */
++#define MSR_IA32_MCU_OPT_CTRL         0x00000123
++#define RNGDS_MITG_DIS                        BIT(0)
++
+ #define MSR_IA32_SYSENTER_CS          0x00000174
+ #define MSR_IA32_SYSENTER_ESP         0x00000175
+ #define MSR_IA32_SYSENTER_EIP         0x00000176
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -41,6 +41,7 @@ static void __init l1tf_select_mitigatio
+ static void __init mds_select_mitigation(void);
+ static void __init mds_print_mitigation(void);
+ static void __init taa_select_mitigation(void);
++static void __init srbds_select_mitigation(void);
+ /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
+ u64 x86_spec_ctrl_base;
+@@ -108,6 +109,7 @@ void __init check_bugs(void)
+       l1tf_select_mitigation();
+       mds_select_mitigation();
+       taa_select_mitigation();
++      srbds_select_mitigation();
+       /*
+        * As MDS and TAA mitigations are inter-related, print MDS
+@@ -391,6 +393,97 @@ static int __init tsx_async_abort_parse_
+ early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
+ #undef pr_fmt
++#define pr_fmt(fmt)   "SRBDS: " fmt
++
++enum srbds_mitigations {
++      SRBDS_MITIGATION_OFF,
++      SRBDS_MITIGATION_UCODE_NEEDED,
++      SRBDS_MITIGATION_FULL,
++      SRBDS_MITIGATION_TSX_OFF,
++      SRBDS_MITIGATION_HYPERVISOR,
++};
++
++static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
++
++static const char * const srbds_strings[] = {
++      [SRBDS_MITIGATION_OFF]          = "Vulnerable",
++      [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
++      [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
++      [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
++      [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
++};
++
++static bool srbds_off;
++
++void update_srbds_msr(void)
++{
++      u64 mcu_ctrl;
++
++      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
++              return;
++
++      if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++              return;
++
++      if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
++              return;
++
++      rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++
++      switch (srbds_mitigation) {
++      case SRBDS_MITIGATION_OFF:
++      case SRBDS_MITIGATION_TSX_OFF:
++              mcu_ctrl |= RNGDS_MITG_DIS;
++              break;
++      case SRBDS_MITIGATION_FULL:
++              mcu_ctrl &= ~RNGDS_MITG_DIS;
++              break;
++      default:
++              break;
++      }
++
++      wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
++}
++
++static void __init srbds_select_mitigation(void)
++{
++      u64 ia32_cap;
++
++      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
++              return;
++
++      /*
++       * Check to see if this is one of the MDS_NO systems supporting
++       * TSX that are only exposed to SRBDS when TSX is enabled.
++       */
++      ia32_cap = x86_read_arch_cap_msr();
++      if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM))
++              srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
++      else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
++              srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
++      else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
++              srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
++      else if (cpu_mitigations_off() || srbds_off)
++              srbds_mitigation = SRBDS_MITIGATION_OFF;
++
++      update_srbds_msr();
++      pr_info("%s\n", srbds_strings[srbds_mitigation]);
++}
++
++static int __init srbds_parse_cmdline(char *str)
++{
++      if (!str)
++              return -EINVAL;
++
++      if (!boot_cpu_has_bug(X86_BUG_SRBDS))
++              return 0;
++
++      srbds_off = !strcmp(str, "off");
++      return 0;
++}
++early_param("srbds", srbds_parse_cmdline);
++
++#undef pr_fmt
+ #define pr_fmt(fmt)     "Spectre V1 : " fmt
+ enum spectre_v1_mitigation {
+@@ -1491,6 +1584,11 @@ static char *ibpb_state(void)
+       return "";
+ }
++static ssize_t srbds_show_state(char *buf)
++{
++      return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
+ {
+@@ -1535,6 +1633,9 @@ static ssize_t cpu_show_common(struct de
+       case X86_BUG_ITLB_MULTIHIT:
+               return itlb_multihit_show_state(buf);
++      case X86_BUG_SRBDS:
++              return srbds_show_state(buf);
++
+       default:
+               break;
+       }
+@@ -1581,4 +1682,9 @@ ssize_t cpu_show_itlb_multihit(struct de
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
+ }
++
++ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
++{
++      return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1013,6 +1013,27 @@ static const __initconst struct x86_cpu_
+       {}
+ };
++#define VULNBL_INTEL_STEPPINGS(model, steppings, issues)                 \
++      X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,             \
++                                          INTEL_FAM6_##model, steppings, \
++                                          X86_FEATURE_ANY, issues)
++
++#define SRBDS         BIT(0)
++
++static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
++      VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(HASWELL_CORE,    X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(HASWELL_ULT,     X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(HASWELL_GT3E,    X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(BROADWELL_GT3E,  X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(BROADWELL_CORE,  X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(SKYLAKE_MOBILE,  X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(SKYLAKE_DESKTOP, X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(KABYLAKE_MOBILE, X86_STEPPINGS(0x0, 0xC),        SRBDS),
++      VULNBL_INTEL_STEPPINGS(KABYLAKE_DESKTOP,X86_STEPPINGS(0x0, 0xD),        SRBDS),
++      {}
++};
++
+ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
+ {
+       const struct x86_cpu_id *m = x86_match_cpu(table);
+@@ -1078,6 +1099,15 @@ static void __init cpu_set_bug_bits(stru
+            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
++      /*
++       * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
++       * in the vulnerability blacklist.
++       */
++      if ((cpu_has(c, X86_FEATURE_RDRAND) ||
++           cpu_has(c, X86_FEATURE_RDSEED)) &&
++          cpu_matches(cpu_vuln_blacklist, SRBDS))
++                  setup_force_cpu_bug(X86_BUG_SRBDS);
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+@@ -1522,6 +1552,7 @@ void identify_secondary_cpu(struct cpuin
+       mtrr_ap_init();
+       validate_apic_and_package_id(c);
+       x86_spec_ctrl_setup_ap();
++      update_srbds_msr();
+ }
+ static __init int setup_noclflush(char *arg)
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -80,6 +80,7 @@ extern void detect_ht(struct cpuinfo_x86
+ unsigned int aperfmperf_get_khz(int cpu);
+ extern void x86_spec_ctrl_setup_ap(void);
++extern void update_srbds_msr(void);
+ extern u64 x86_read_arch_cap_msr(void);
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -565,6 +565,12 @@ ssize_t __weak cpu_show_itlb_multihit(st
+       return sprintf(buf, "Not affected\n");
+ }
++ssize_t __weak cpu_show_srbds(struct device *dev,
++                            struct device_attribute *attr, char *buf)
++{
++      return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+@@ -573,6 +579,7 @@ static DEVICE_ATTR(l1tf, 0444, cpu_show_
+ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
+ static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
+ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
++static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+       &dev_attr_meltdown.attr,
+@@ -583,6 +590,7 @@ static struct attribute *cpu_root_vulner
+       &dev_attr_mds.attr,
+       &dev_attr_tsx_async_abort.attr,
+       &dev_attr_itlb_multihit.attr,
++      &dev_attr_srbds.attr,
+       NULL
+ };
diff --git a/queue-4.19/x86-speculation-add-srbds-vulnerability-and-mitigation-documentation.patch b/queue-4.19/x86-speculation-add-srbds-vulnerability-and-mitigation-documentation.patch
new file mode 100644 (file)
index 0000000..c71ec33
--- /dev/null
@@ -0,0 +1,183 @@
+From foo@baz Tue 09 Jun 2020 07:03:49 PM CEST
+From: Mark Gross <mgross@linux.intel.com>
+Date: Thu, 16 Apr 2020 18:21:51 +0200
+Subject: x86/speculation: Add SRBDS vulnerability and mitigation documentation
+
+From: Mark Gross <mgross@linux.intel.com>
+
+commit 7222a1b5b87417f22265c92deea76a6aecd0fb0f upstream
+
+Add documentation for the SRBDS vulnerability and its mitigation.
+
+ [ bp: Massage.
+   jpoimboe: sysfs table strings. ]
+
+Signed-off-by: Mark Gross <mgross@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Tony Luck <tony.luck@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/index.rst                                 |    1 
+ Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst |  148 ++++++++++
+ 2 files changed, 149 insertions(+)
+ create mode 100644 Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
+
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -14,3 +14,4 @@ are configurable at compile, boot or run
+    mds
+    tsx_async_abort
+    multihit.rst
++   special-register-buffer-data-sampling.rst
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst
+@@ -0,0 +1,148 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++SRBDS - Special Register Buffer Data Sampling
++=============================================
++
++SRBDS is a hardware vulnerability that allows MDS :doc:`mds` techniques to
++infer values returned from special register accesses.  Special register
++accesses are accesses to off core registers.  According to Intel's evaluation,
++the special register reads that have a security expectation of privacy are
++RDRAND, RDSEED and SGX EGETKEY.
++
++When RDRAND, RDSEED and EGETKEY instructions are used, the data is moved
++to the core through the special register mechanism that is susceptible
++to MDS attacks.
++
++Affected processors
++--------------------
++Core models (desktop, mobile, Xeon-E3) that implement RDRAND and/or RDSEED may
++be affected.
++
++A processor is affected by SRBDS if its Family_Model and stepping is
++in the following list, with the exception of the listed processors
++exporting MDS_NO while Intel TSX is available yet not enabled. The
++latter class of processors are only affected when Intel TSX is enabled
++by software using TSX_CTRL_MSR otherwise they are not affected.
++
++  =============  ============  ========
++  common name    Family_Model  Stepping
++  =============  ============  ========
++  Haswell        06_3CH        All
++  Haswell_L      06_45H        All
++  Haswell_G      06_46H        All
++
++  Broadwell_G    06_47H        All
++  Broadwell      06_3DH        All
++
++  Skylake_L      06_4EH        All
++  Skylake        06_5EH        All
++
++  Kabylake_L     06_8EH        <=0xC
++
++  Kabylake       06_9EH        <=0xD
++  =============  ============  ========
++
++Related CVEs
++------------
++
++The following CVE entry is related to this SRBDS issue:
++
++    ==============  =====  =====================================
++    CVE-2020-0543   SRBDS  Special Register Buffer Data Sampling
++    ==============  =====  =====================================
++
++Attack scenarios
++----------------
++An unprivileged user can extract values returned from RDRAND and RDSEED
++executed on another core or sibling thread using MDS techniques.
++
++
++Mitigation mechanism
++-------------------
++Intel will release microcode updates that modify the RDRAND, RDSEED, and
++EGETKEY instructions to overwrite secret special register data in the shared
++staging buffer before the secret data can be accessed by another logical
++processor.
++
++During execution of the RDRAND, RDSEED, or EGETKEY instructions, off-core
++accesses from other logical processors will be delayed until the special
++register read is complete and the secret data in the shared staging buffer is
++overwritten.
++
++This has three effects on performance:
++
++#. RDRAND, RDSEED, or EGETKEY instructions have higher latency.
++
++#. Executing RDRAND at the same time on multiple logical processors will be
++   serialized, resulting in an overall reduction in the maximum RDRAND
++   bandwidth.
++
++#. Executing RDRAND, RDSEED or EGETKEY will delay memory accesses from other
++   logical processors that miss their core caches, with an impact similar to
++   legacy locked cache-line-split accesses.
++
++The microcode updates provide an opt-out mechanism (RNGDS_MITG_DIS) to disable
++the mitigation for RDRAND and RDSEED instructions executed outside of Intel
++Software Guard Extensions (Intel SGX) enclaves. On logical processors that
++disable the mitigation using this opt-out mechanism, RDRAND and RDSEED do not
++take longer to execute and do not impact performance of sibling logical
++processors memory accesses. The opt-out mechanism does not affect Intel SGX
++enclaves (including execution of RDRAND or RDSEED inside an enclave, as well
++as EGETKEY execution).
++
++IA32_MCU_OPT_CTRL MSR Definition
++--------------------------------
++Along with the mitigation for this issue, Intel added a new thread-scope
++IA32_MCU_OPT_CTRL MSR, (address 0x123). The presence of this MSR and
++RNGDS_MITG_DIS (bit 0) is enumerated by CPUID.(EAX=07H,ECX=0).EDX[SRBDS_CTRL =
++9]==1. This MSR is introduced through the microcode update.
++
++Setting IA32_MCU_OPT_CTRL[0] (RNGDS_MITG_DIS) to 1 for a logical processor
++disables the mitigation for RDRAND and RDSEED executed outside of an Intel SGX
++enclave on that logical processor. Opting out of the mitigation for a
++particular logical processor does not affect the RDRAND and RDSEED mitigations
++for other logical processors.
++
++Note that inside of an Intel SGX enclave, the mitigation is applied regardless
++of the value of RNGDS_MITG_DS.
++
++Mitigation control on the kernel command line
++---------------------------------------------
++The kernel command line allows control over the SRBDS mitigation at boot time
++with the option "srbds=".  The option for this is:
++
++  ============= =============================================================
++  off           This option disables SRBDS mitigation for RDRAND and RDSEED on
++                affected platforms.
++  ============= =============================================================
++
++SRBDS System Information
++-----------------------
++The Linux kernel provides vulnerability status information through sysfs.  For
++SRBDS this can be accessed by the following sysfs file:
++/sys/devices/system/cpu/vulnerabilities/srbds
++
++The possible values contained in this file are:
++
++ ============================== =============================================
++ Not affected                   Processor not vulnerable
++ Vulnerable                     Processor vulnerable and mitigation disabled
++ Vulnerable: No microcode       Processor vulnerable and microcode is missing
++                                mitigation
++ Mitigation: Microcode          Processor is vulnerable and mitigation is in
++                                effect.
++ Mitigation: TSX disabled       Processor is only vulnerable when TSX is
++                                enabled while this system was booted with TSX
++                                disabled.
++ Unknown: Dependent on
++ hypervisor status              Running on virtual guest processor that is
++                                affected but with no way to know if host
++                                processor is mitigated or vulnerable.
++ ============================== =============================================
++
++SRBDS Default mitigation
++------------------------
++This new microcode serializes processor access during execution of RDRAND,
++RDSEED ensures that the shared buffer is overwritten before it is released for
++reuse.  Use the "srbds=off" kernel command line to disable the mitigation for
++RDRAND and RDSEED.