From: Greg Kroah-Hartman Date: Tue, 14 May 2019 17:06:15 +0000 (+0200) Subject: 5.0-stable patches X-Git-Tag: v5.1.2~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b07c49f810ed63a3ba8e1750af3f211728622348;p=thirdparty%2Fkernel%2Fstable-queue.git 5.0-stable patches added patches: 0001-x86-msr-index-Cleanup-bit-defines.patch 0002-x86-speculation-Consolidate-CPU-whitelists.patch 0003-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch 0004-x86-speculation-mds-Add-BUG_MSBDS_ONLY.patch 0005-x86-kvm-Expose-X86_FEATURE_MD_CLEAR-to-guests.patch 0006-x86-speculation-mds-Add-mds_clear_cpu_buffers.patch 0007-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch 0008-x86-kvm-vmx-Add-MDS-protection-when-L1D-Flush-is-not.patch 0009-x86-speculation-mds-Conditionally-clear-CPU-buffers-.patch 0010-x86-speculation-mds-Add-mitigation-control-for-MDS.patch 0011-x86-speculation-mds-Add-sysfs-reporting-for-MDS.patch 0012-x86-speculation-mds-Add-mitigation-mode-VMWERV.patch 0013-Documentation-Move-L1TF-to-separate-directory.patch 0014-Documentation-Add-MDS-vulnerability-documentation.patch 0015-x86-speculation-mds-Add-mds-full-nosmt-cmdline-optio.patch 0016-x86-speculation-Move-arch_smt_update-call-to-after-m.patch 0017-x86-speculation-mds-Add-SMT-warning-message.patch 0018-x86-speculation-mds-Fix-comment.patch 0019-x86-speculation-mds-Print-SMT-vulnerable-on-MSBDS-wi.patch 0020-cpu-speculation-Add-mitigations-cmdline-option.patch 0021-x86-speculation-Support-mitigations-cmdline-option.patch 0022-powerpc-speculation-Support-mitigations-cmdline-opti.patch 0023-s390-speculation-Support-mitigations-cmdline-option.patch 0024-x86-speculation-mds-Add-mitigations-support-for-MDS.patch 0025-x86-mds-Add-MDSUM-variant-to-the-MDS-documentation.patch 0026-Documentation-Correct-the-possible-MDS-sysfs-values.patch 0027-x86-speculation-mds-Fix-documentation-typo.patch --- diff --git a/queue-5.0/0001-x86-msr-index-Cleanup-bit-defines.patch b/queue-5.0/0001-x86-msr-index-Cleanup-bit-defines.patch new file mode 100644 index 00000000000..74420d114aa --- /dev/null +++ b/queue-5.0/0001-x86-msr-index-Cleanup-bit-defines.patch @@ -0,0 +1,123 @@ +From adf43fbf52857d5a364a6d2b804ada52bb8f0723 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Thu, 21 Feb 2019 12:36:50 +0100 +Subject: [PATCH 01/27] x86/msr-index: Cleanup bit defines + +commit d8eabc37310a92df40d07c5a8afc53cebf996716 upstream + +Greg pointed out that speculation related bit defines are using (1 << N) +format instead of BIT(N). Aside of that (1 << N) is wrong as it should use +1UL at least. + +Clean it up. + +[ Josh Poimboeuf: Fix tools build ] + +Reported-by: Greg Kroah-Hartman +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/msr-index.h | 34 ++++++++++--------- + tools/power/x86/turbostat/Makefile | 2 +- + .../power/x86/x86_energy_perf_policy/Makefile | 2 +- + 3 files changed, 20 insertions(+), 18 deletions(-) + +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index ca5bc0eacb95..4f1e8b28daa0 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -2,6 +2,8 @@ + #ifndef _ASM_X86_MSR_INDEX_H + #define _ASM_X86_MSR_INDEX_H + ++#include ++ + /* + * CPU model specific register (MSR) numbers. + * +@@ -40,14 +42,14 @@ + /* Intel MSRs. Some also available on other CPUs */ + + #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ +-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ ++#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */ + #define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */ +-#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ ++#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ + #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ +-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ ++#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ + + #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ +-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ ++#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ + + #define MSR_PPIN_CTL 0x0000004e + #define MSR_PPIN 0x0000004f +@@ -69,20 +71,20 @@ + #define MSR_MTRRcap 0x000000fe + + #define MSR_IA32_ARCH_CAPABILITIES 0x0000010a +-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */ +-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */ +-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */ +-#define ARCH_CAP_SSB_NO (1 << 4) /* +- * Not susceptible to Speculative Store Bypass +- * attack, so no Speculative Store Bypass +- * control required. +- */ ++#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */ ++#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */ ++#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */ ++#define ARCH_CAP_SSB_NO BIT(4) /* ++ * Not susceptible to Speculative Store Bypass ++ * attack, so no Speculative Store Bypass ++ * control required. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b +-#define L1D_FLUSH (1 << 0) /* +- * Writeback and invalidate the +- * L1 data cache. +- */ ++#define L1D_FLUSH BIT(0) /* ++ * Writeback and invalidate the ++ * L1 data cache. ++ */ + + #define MSR_IA32_BBL_CR_CTL 0x00000119 + #define MSR_IA32_BBL_CR_CTL3 0x0000011e +diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile +index 1598b4fa0b11..045f5f7d68ab 100644 +--- a/tools/power/x86/turbostat/Makefile ++++ b/tools/power/x86/turbostat/Makefile +@@ -9,7 +9,7 @@ ifeq ("$(origin O)", "command line") + endif + + turbostat : turbostat.c +-override CFLAGS += -Wall ++override CFLAGS += -Wall -I../../../include + override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' + override CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"' + +diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile +index ae7a0e09b722..1fdeef864e7c 100644 +--- a/tools/power/x86/x86_energy_perf_policy/Makefile ++++ b/tools/power/x86/x86_energy_perf_policy/Makefile +@@ -9,7 +9,7 @@ ifeq ("$(origin O)", "command line") + endif + + x86_energy_perf_policy : x86_energy_perf_policy.c +-override CFLAGS += -Wall ++override CFLAGS += -Wall -I../../../include + override CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"' + + %: %.c +-- +2.21.0 + diff --git a/queue-5.0/0002-x86-speculation-Consolidate-CPU-whitelists.patch b/queue-5.0/0002-x86-speculation-Consolidate-CPU-whitelists.patch new file mode 100644 index 00000000000..c8351724c16 --- /dev/null +++ b/queue-5.0/0002-x86-speculation-Consolidate-CPU-whitelists.patch @@ -0,0 +1,178 @@ +From ea14078054cff48062700c792f30ee7e493c566c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 27 Feb 2019 10:10:23 +0100 +Subject: [PATCH 02/27] x86/speculation: Consolidate CPU whitelists + +commit 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d upstream + +The CPU vulnerability whitelists have some overlap and there are more +whitelists coming along. + +Use the driver_data field in the x86_cpu_id struct to denote the +whitelisted vulnerabilities and combine all whitelists into one. + +Suggested-by: Linus Torvalds +Signed-off-by: Thomas Gleixner +Reviewed-by: Frederic Weisbecker +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/common.c | 110 +++++++++++++++++++---------------- + 1 file changed, 60 insertions(+), 50 deletions(-) + +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index cb28e98a0659..26ec15034f86 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -948,61 +948,72 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + #endif + } + +-static const __initconst struct x86_cpu_id cpu_no_speculation[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, +- { X86_VENDOR_CENTAUR, 5 }, +- { X86_VENDOR_INTEL, 5 }, +- { X86_VENDOR_NSC, 5 }, +- { X86_VENDOR_ANY, 4 }, ++#define NO_SPECULATION BIT(0) ++#define NO_MELTDOWN BIT(1) ++#define NO_SSB BIT(2) ++#define NO_L1TF BIT(3) ++ ++#define VULNWL(_vendor, _family, _model, _whitelist) \ ++ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } ++ ++#define VULNWL_INTEL(model, whitelist) \ ++ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) ++ ++#define VULNWL_AMD(family, whitelist) \ ++ VULNWL(AMD, family, X86_MODEL_ANY, whitelist) ++ ++#define VULNWL_HYGON(family, whitelist) \ ++ VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) ++ ++static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { ++ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), ++ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), ++ ++ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), ++ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), ++ ++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF), ++ ++ VULNWL_INTEL(CORE_YONAH, NO_SSB), ++ ++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF), ++ ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ ++ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), ++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), + {} + }; + +-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { +- { X86_VENDOR_AMD }, +- { X86_VENDOR_HYGON }, +- {} +-}; +- +-/* Only list CPUs which speculate but are non susceptible to SSB */ +-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +- { X86_VENDOR_AMD, 0x12, }, +- { X86_VENDOR_AMD, 0x11, }, +- { X86_VENDOR_AMD, 0x10, }, +- { X86_VENDOR_AMD, 0xf, }, +- {} +-}; ++static bool __init cpu_matches(unsigned long which) ++{ ++ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); + +-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { +- /* in addition to cpu_no_speculation */ +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, +- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, +- {} +-}; ++ return m && !!(m->driver_data & which); ++} + + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + { + u64 ia32_cap = 0; + +- if (x86_match_cpu(cpu_no_speculation)) ++ if (cpu_matches(NO_SPECULATION)) + return; + + setup_force_cpu_bug(X86_BUG_SPECTRE_V1); +@@ -1011,15 +1022,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) + rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); + +- if (!x86_match_cpu(cpu_no_spec_store_bypass) && +- !(ia32_cap & ARCH_CAP_SSB_NO) && ++ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && + !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) + setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); + + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (x86_match_cpu(cpu_no_meltdown)) ++ if (cpu_matches(NO_MELTDOWN)) + return; + + /* Rogue Data Cache Load? No! */ +@@ -1028,7 +1038,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + +- if (x86_match_cpu(cpu_no_l1tf)) ++ if (cpu_matches(NO_L1TF)) + return; + + setup_force_cpu_bug(X86_BUG_L1TF); +-- +2.21.0 + diff --git a/queue-5.0/0003-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch b/queue-5.0/0003-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch new file mode 100644 index 00000000000..e70ff753a42 --- /dev/null +++ b/queue-5.0/0003-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch @@ -0,0 +1,160 @@ +From 85e507546d6f6cc4fd77dbcbffc67cecc7d8428f Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Fri, 18 Jan 2019 16:50:16 -0800 +Subject: [PATCH 03/27] x86/speculation/mds: Add basic bug infrastructure for + MDS + +commit ed5194c2732c8084af9fd159c146ea92bf137128 upstream + +Microarchitectural Data Sampling (MDS), is a class of side channel attacks +on internal buffers in Intel CPUs. The variants are: + + - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) + - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) + - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) + +MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a +dependent load (store-to-load forwarding) as an optimization. The forward +can also happen to a faulting or assisting load operation for a different +memory address, which can be exploited under certain conditions. Store +buffers are partitioned between Hyper-Threads so cross thread forwarding is +not possible. But if a thread enters or exits a sleep state the store +buffer is repartitioned which can expose data from one thread to the other. + +MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage +L1 miss situations and to hold data which is returned or sent in response +to a memory or I/O operation. Fill buffers can forward data to a load +operation and also write data to the cache. When the fill buffer is +deallocated it can retain the stale data of the preceding operations which +can then be forwarded to a faulting or assisting load operation, which can +be exploited under certain conditions. Fill buffers are shared between +Hyper-Threads so cross thread leakage is possible. + +MLDPS leaks Load Port Data. Load ports are used to perform load operations +from memory or I/O. The received data is then forwarded to the register +file or a subsequent operation. In some implementations the Load Port can +contain stale data from a previous operation which can be forwarded to +faulting or assisting loads under certain conditions, which again can be +exploited eventually. Load ports are shared between Hyper-Threads so cross +thread leakage is possible. + +All variants have the same mitigation for single CPU thread case (SMT off), +so the kernel can treat them as one MDS issue. + +Add the basic infrastructure to detect if the current CPU is affected by +MDS. + +[ tglx: Rewrote changelog ] + +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 2 ++ + arch/x86/include/asm/msr-index.h | 5 +++++ + arch/x86/kernel/cpu/common.c | 25 ++++++++++++++++--------- + 3 files changed, 23 insertions(+), 9 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 981ff9479648..71375c827f4f 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -344,6 +344,7 @@ + /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ + #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ + #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ ++#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ + #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ + #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ + #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ +@@ -382,5 +383,6 @@ + #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ ++#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 4f1e8b28daa0..20f7da552e90 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -79,6 +79,11 @@ + * attack, so no Speculative Store Bypass + * control required. + */ ++#define ARCH_CAP_MDS_NO BIT(5) /* ++ * Not susceptible to ++ * Microarchitectural Data ++ * Sampling (MDS) vulnerabilities. ++ */ + + #define MSR_IA32_FLUSH_CMD 0x0000010b + #define L1D_FLUSH BIT(0) /* +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 26ec15034f86..e34817bca504 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -952,6 +952,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + #define NO_MELTDOWN BIT(1) + #define NO_SSB BIT(2) + #define NO_L1TF BIT(3) ++#define NO_MDS BIT(4) + + #define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } +@@ -971,6 +972,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), + ++ /* Intel Family 6 */ + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), +@@ -987,18 +989,20 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + VULNWL_INTEL(CORE_YONAH, NO_SSB), + + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF), +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF), + +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF), +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF), ++ ++ /* AMD Family 0xf - 0x12 */ ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), + + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), +- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF), ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), ++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), + {} + }; + +@@ -1029,6 +1033,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + ++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) ++ setup_force_cpu_bug(X86_BUG_MDS); ++ + if (cpu_matches(NO_MELTDOWN)) + return; + +-- +2.21.0 + diff --git a/queue-5.0/0004-x86-speculation-mds-Add-BUG_MSBDS_ONLY.patch b/queue-5.0/0004-x86-speculation-mds-Add-BUG_MSBDS_ONLY.patch new file mode 100644 index 00000000000..ab619150b91 --- /dev/null +++ b/queue-5.0/0004-x86-speculation-mds-Add-BUG_MSBDS_ONLY.patch @@ -0,0 +1,94 @@ +From b5569319aa1605a6ec6e10b0d4f0539dfb37ddc4 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Fri, 1 Mar 2019 20:21:08 +0100 +Subject: [PATCH 04/27] x86/speculation/mds: Add BUG_MSBDS_ONLY + +commit e261f209c3666e842fd645a1e31f001c3a26def9 upstream + +This bug bit is set on CPUs which are only affected by Microarchitectural +Store Buffer Data Sampling (MSBDS) and not by any other MDS variant. + +This is important because the Store Buffers are partitioned between +Hyper-Threads so cross thread forwarding is not possible. But if a thread +enters or exits a sleep state the store buffer is repartitioned which can +expose data from one thread to the other. This transition can be mitigated. + +That means that for CPUs which are only affected by MSBDS SMT can be +enabled, if the CPU is not affected by other SMT sensitive vulnerabilities, +e.g. L1TF. The XEON PHI variants fall into that category. Also the +Silvermont/Airmont ATOMs, but for them it's not really relevant as they do +not support SMT, but mark them for completeness sake. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/cpufeatures.h | 1 + + arch/x86/kernel/cpu/common.c | 20 ++++++++++++-------- + 2 files changed, 13 insertions(+), 8 deletions(-) + +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h +index 71375c827f4f..75f27ee2c263 100644 +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -384,5 +384,6 @@ + #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ + #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ + #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ ++#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ + + #endif /* _ASM_X86_CPUFEATURES_H */ +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index e34817bca504..132a63dc5a76 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -953,6 +953,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) + #define NO_SSB BIT(2) + #define NO_L1TF BIT(3) + #define NO_MDS BIT(4) ++#define MSBDS_ONLY BIT(5) + + #define VULNWL(_vendor, _family, _model, _whitelist) \ + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } +@@ -979,16 +980,16 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { + VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), + VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), + +- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF), +- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF), +- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF), +- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF), ++ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY), ++ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY), + + VULNWL_INTEL(CORE_YONAH, NO_SSB), + +- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF), ++ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY), + + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), +@@ -1033,8 +1034,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) + if (ia32_cap & ARCH_CAP_IBRS_ALL) + setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + +- if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) ++ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { + setup_force_cpu_bug(X86_BUG_MDS); ++ if (cpu_matches(MSBDS_ONLY)) ++ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); ++ } + + if (cpu_matches(NO_MELTDOWN)) + return; +-- +2.21.0 + diff --git a/queue-5.0/0005-x86-kvm-Expose-X86_FEATURE_MD_CLEAR-to-guests.patch b/queue-5.0/0005-x86-kvm-Expose-X86_FEATURE_MD_CLEAR-to-guests.patch new file mode 100644 index 00000000000..268e378c0c9 --- /dev/null +++ b/queue-5.0/0005-x86-kvm-Expose-X86_FEATURE_MD_CLEAR-to-guests.patch @@ -0,0 +1,47 @@ +From 44e83212c81e9088be89b4c2219ba609c6639ad1 Mon Sep 17 00:00:00 2001 +From: Andi Kleen +Date: Fri, 18 Jan 2019 16:50:23 -0800 +Subject: [PATCH 05/27] x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests + +commit 6c4dbbd14730c43f4ed808a9c42ca41625925c22 upstream + +X86_FEATURE_MD_CLEAR is a new CPUID bit which is set when microcode +provides the mechanism to invoke a flush of various exploitable CPU buffers +by invoking the VERW instruction. + +Hand it through to guests so they can adjust their mitigations. + +This also requires corresponding qemu changes, which are available +separately. + +[ tglx: Massaged changelog ] + +Signed-off-by: Andi Kleen +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kvm/cpuid.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c +index c07958b59f50..39501e7afdb4 100644 +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -410,7 +410,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, + /* cpuid 7.0.edx*/ + const u32 kvm_cpuid_7_0_edx_x86_features = + F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | +- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP); ++ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | ++ F(MD_CLEAR); + + /* all calls to cpuid_count() should be made on the same cpu */ + get_cpu(); +-- +2.21.0 + diff --git a/queue-5.0/0006-x86-speculation-mds-Add-mds_clear_cpu_buffers.patch b/queue-5.0/0006-x86-speculation-mds-Add-mds_clear_cpu_buffers.patch new file mode 100644 index 00000000000..09992b92a47 --- /dev/null +++ b/queue-5.0/0006-x86-speculation-mds-Add-mds_clear_cpu_buffers.patch @@ -0,0 +1,235 @@ +From ccbaead4a038a80682732de5b35841a4184678c5 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:13:06 +0100 +Subject: [PATCH 06/27] x86/speculation/mds: Add mds_clear_cpu_buffers() + +commit 6a9e529272517755904b7afa639f6db59ddb793e upstream + +The Microarchitectural Data Sampling (MDS) vulernabilities are mitigated by +clearing the affected CPU buffers. The mechanism for clearing the buffers +uses the unused and obsolete VERW instruction in combination with a +microcode update which triggers a CPU buffer clear when VERW is executed. + +Provide a inline function with the assembly magic. The argument of the VERW +instruction must be a memory operand as documented: + + "MD_CLEAR enumerates that the memory-operand variant of VERW (for + example, VERW m16) has been extended to also overwrite buffers affected + by MDS. This buffer overwriting functionality is not guaranteed for the + register operand variant of VERW." + +Documentation also recommends to use a writable data segment selector: + + "The buffer overwriting occurs regardless of the result of the VERW + permission check, as well as when the selector is null or causes a + descriptor load segment violation. However, for lowest latency we + recommend using a selector that indicates a valid writable data + segment." + +Add x86 specific documentation about MDS and the internal workings of the +mitigation. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/index.rst | 1 + + Documentation/x86/conf.py | 10 +++ + Documentation/x86/index.rst | 8 +++ + Documentation/x86/mds.rst | 99 ++++++++++++++++++++++++++++ + arch/x86/include/asm/nospec-branch.h | 25 +++++++ + 5 files changed, 143 insertions(+) + create mode 100644 Documentation/x86/conf.py + create mode 100644 Documentation/x86/index.rst + create mode 100644 Documentation/x86/mds.rst + +diff --git a/Documentation/index.rst b/Documentation/index.rst +index c858c2e66e36..63864826dcd6 100644 +--- a/Documentation/index.rst ++++ b/Documentation/index.rst +@@ -101,6 +101,7 @@ implementation. + :maxdepth: 2 + + sh/index ++ x86/index + + Filesystem Documentation + ------------------------ +diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py +new file mode 100644 +index 000000000000..33c5c3142e20 +--- /dev/null ++++ b/Documentation/x86/conf.py +@@ -0,0 +1,10 @@ ++# -*- coding: utf-8; mode: python -*- ++ ++project = "X86 architecture specific documentation" ++ ++tags.add("subproject") ++ ++latex_documents = [ ++ ('index', 'x86.tex', project, ++ 'The kernel development community', 'manual'), ++] +diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst +new file mode 100644 +index 000000000000..ef389dcf1b1d +--- /dev/null ++++ b/Documentation/x86/index.rst +@@ -0,0 +1,8 @@ ++========================== ++x86 architecture specifics ++========================== ++ ++.. toctree:: ++ :maxdepth: 1 ++ ++ mds +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +new file mode 100644 +index 000000000000..1096738d50f2 +--- /dev/null ++++ b/Documentation/x86/mds.rst +@@ -0,0 +1,99 @@ ++Microarchitectural Data Sampling (MDS) mitigation ++================================================= ++ ++.. _mds: ++ ++Overview ++-------- ++ ++Microarchitectural Data Sampling (MDS) is a family of side channel attacks ++on internal buffers in Intel CPUs. The variants are: ++ ++ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) ++ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) ++ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) ++ ++MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a ++dependent load (store-to-load forwarding) as an optimization. The forward ++can also happen to a faulting or assisting load operation for a different ++memory address, which can be exploited under certain conditions. Store ++buffers are partitioned between Hyper-Threads so cross thread forwarding is ++not possible. But if a thread enters or exits a sleep state the store ++buffer is repartitioned which can expose data from one thread to the other. ++ ++MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage ++L1 miss situations and to hold data which is returned or sent in response ++to a memory or I/O operation. Fill buffers can forward data to a load ++operation and also write data to the cache. When the fill buffer is ++deallocated it can retain the stale data of the preceding operations which ++can then be forwarded to a faulting or assisting load operation, which can ++be exploited under certain conditions. Fill buffers are shared between ++Hyper-Threads so cross thread leakage is possible. ++ ++MLPDS leaks Load Port Data. Load ports are used to perform load operations ++from memory or I/O. The received data is then forwarded to the register ++file or a subsequent operation. In some implementations the Load Port can ++contain stale data from a previous operation which can be forwarded to ++faulting or assisting loads under certain conditions, which again can be ++exploited eventually. Load ports are shared between Hyper-Threads so cross ++thread leakage is possible. ++ ++ ++Exposure assumptions ++-------------------- ++ ++It is assumed that attack code resides in user space or in a guest with one ++exception. The rationale behind this assumption is that the code construct ++needed for exploiting MDS requires: ++ ++ - to control the load to trigger a fault or assist ++ ++ - to have a disclosure gadget which exposes the speculatively accessed ++ data for consumption through a side channel. ++ ++ - to control the pointer through which the disclosure gadget exposes the ++ data ++ ++The existence of such a construct in the kernel cannot be excluded with ++100% certainty, but the complexity involved makes it extremly unlikely. ++ ++There is one exception, which is untrusted BPF. The functionality of ++untrusted BPF is limited, but it needs to be thoroughly investigated ++whether it can be used to create such a construct. ++ ++ ++Mitigation strategy ++------------------- ++ ++All variants have the same mitigation strategy at least for the single CPU ++thread case (SMT off): Force the CPU to clear the affected buffers. ++ ++This is achieved by using the otherwise unused and obsolete VERW ++instruction in combination with a microcode update. The microcode clears ++the affected CPU buffers when the VERW instruction is executed. ++ ++For virtualization there are two ways to achieve CPU buffer ++clearing. Either the modified VERW instruction or via the L1D Flush ++command. The latter is issued when L1TF mitigation is enabled so the extra ++VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to ++be issued. ++ ++If the VERW instruction with the supplied segment selector argument is ++executed on a CPU without the microcode update there is no side effect ++other than a small number of pointlessly wasted CPU cycles. ++ ++This does not protect against cross Hyper-Thread attacks except for MSBDS ++which is only exploitable cross Hyper-thread when one of the Hyper-Threads ++enters a C-state. ++ ++The kernel provides a function to invoke the buffer clearing: ++ ++ mds_clear_cpu_buffers() ++ ++The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state ++(idle) transitions. ++ ++According to current knowledge additional mitigations inside the kernel ++itself are not required because the necessary gadgets to expose the leaked ++data cannot be controlled in a way which allows exploitation from malicious ++user space or VM guests. +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index dad12b767ba0..67cb9b2082b1 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -318,6 +318,31 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++#include ++ ++/** ++ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * This uses the otherwise unused and obsolete VERW instruction in ++ * combination with microcode which triggers a CPU buffer flush when the ++ * instruction is executed. ++ */ ++static inline void mds_clear_cpu_buffers(void) ++{ ++ static const u16 ds = __KERNEL_DS; ++ ++ /* ++ * Has to be the memory-operand variant because only that ++ * guarantees the CPU buffer flush functionality according to ++ * documentation. The register-operand variant does not. ++ * Works with any segment selector, but a valid writable ++ * data segment is the fastest variant. ++ * ++ * "cc" clobber is required because VERW modifies ZF. ++ */ ++ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +-- +2.21.0 + diff --git a/queue-5.0/0007-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch b/queue-5.0/0007-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch new file mode 100644 index 00000000000..75488bae48b --- /dev/null +++ b/queue-5.0/0007-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch @@ -0,0 +1,207 @@ +From e031f1053fcf49b960b5089a7f923beb04d56b50 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:42:51 +0100 +Subject: [PATCH 07/27] x86/speculation/mds: Clear CPU buffers on exit to user + +commit 04dcbdb8057827b043b3c71aa397c4c63e67d086 upstream + +Add a static key which controls the invocation of the CPU buffer clear +mechanism on exit to user space and add the call into +prepare_exit_to_usermode() and do_nmi() right before actually returning. + +Add documentation which kernel to user space transition this covers and +explain why some corner cases are not mitigated. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/x86/mds.rst | 52 ++++++++++++++++++++++++++++ + arch/x86/entry/common.c | 3 ++ + arch/x86/include/asm/nospec-branch.h | 13 +++++++ + arch/x86/kernel/cpu/bugs.c | 3 ++ + arch/x86/kernel/nmi.c | 4 +++ + arch/x86/kernel/traps.c | 8 +++++ + 6 files changed, 83 insertions(+) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 1096738d50f2..54d935bf283b 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -97,3 +97,55 @@ According to current knowledge additional mitigations inside the kernel + itself are not required because the necessary gadgets to expose the leaked + data cannot be controlled in a way which allows exploitation from malicious + user space or VM guests. ++ ++Mitigation points ++----------------- ++ ++1. Return to user space ++^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ When transitioning from kernel to user space the CPU buffers are flushed ++ on affected CPUs when the mitigation is not disabled on the kernel ++ command line. The migitation is enabled through the static key ++ mds_user_clear. ++ ++ The mitigation is invoked in prepare_exit_to_usermode() which covers ++ most of the kernel to user space transitions. There are a few exceptions ++ which are not invoking prepare_exit_to_usermode() on return to user ++ space. These exceptions use the paranoid exit code. ++ ++ - Non Maskable Interrupt (NMI): ++ ++ Access to sensible data like keys, credentials in the NMI context is ++ mostly theoretical: The CPU can do prefetching or execute a ++ misspeculated code path and thereby fetching data which might end up ++ leaking through a buffer. ++ ++ But for mounting other attacks the kernel stack address of the task is ++ already valuable information. So in full mitigation mode, the NMI is ++ mitigated on the return from do_nmi() to provide almost complete ++ coverage. ++ ++ - Double fault (#DF): ++ ++ A double fault is usually fatal, but the ESPFIX workaround, which can ++ be triggered from user space through modify_ldt(2) is a recoverable ++ double fault. #DF uses the paranoid exit path, so explicit mitigation ++ in the double fault handler is required. ++ ++ - Machine Check Exception (#MC): ++ ++ Another corner case is a #MC which hits between the CPU buffer clear ++ invocation and the actual return to user. As this still is in kernel ++ space it takes the paranoid exit path which does not clear the CPU ++ buffers. So the #MC handler repopulates the buffers to some ++ extent. Machine checks are not reliably controllable and the window is ++ extremly small so mitigation would just tick a checkbox that this ++ theoretical corner case is covered. To keep the amount of special ++ cases small, ignore #MC. ++ ++ - Debug Exception (#DB): ++ ++ This takes the paranoid exit path only when the INT1 breakpoint is in ++ kernel space. #DB on a user space address takes the regular exit path, ++ so no extra mitigation required. +diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c +index 7bc105f47d21..19f650d729f5 100644 +--- a/arch/x86/entry/common.c ++++ b/arch/x86/entry/common.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -212,6 +213,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) + #endif + + user_enter_irqoff(); ++ ++ mds_user_clear_cpu_buffers(); + } + + #define SYSCALL_EXIT_WORK_FLAGS \ +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 67cb9b2082b1..65b747286d96 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -318,6 +318,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++DECLARE_STATIC_KEY_FALSE(mds_user_clear); ++ + #include + + /** +@@ -343,6 +345,17 @@ static inline void mds_clear_cpu_buffers(void) + asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc"); + } + ++/** ++ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * Clear CPU buffers if the corresponding static key is enabled ++ */ ++static inline void mds_user_clear_cpu_buffers(void) ++{ ++ if (static_branch_likely(&mds_user_clear)) ++ mds_clear_cpu_buffers(); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 482383c2b184..c2cfd45f709e 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -63,6 +63,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + /* Control unconditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + ++/* Control MDS CPU buffer clear before returning to user space */ ++DEFINE_STATIC_KEY_FALSE(mds_user_clear); ++ + void __init check_bugs(void) + { + identify_boot_cpu(); +diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c +index 18bc9b51ac9b..086cf1d1d71d 100644 +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #define CREATE_TRACE_POINTS + #include +@@ -533,6 +534,9 @@ do_nmi(struct pt_regs *regs, long error_code) + write_cr2(this_cpu_read(nmi_cr2)); + if (this_cpu_dec_return(nmi_state)) + goto nmi_restart; ++ ++ if (user_mode(regs)) ++ mds_user_clear_cpu_buffers(); + } + NOKPROBE_SYMBOL(do_nmi); + +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index 9b7c4ca8f0a7..85fe1870f873 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -58,6 +58,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -366,6 +367,13 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&gpregs->orig_ax; + ++ /* ++ * This situation can be triggered by userspace via ++ * modify_ldt(2) and the return does not take the regular ++ * user space exit, so a CPU buffer clear is required when ++ * MDS mitigation is enabled. ++ */ ++ mds_user_clear_cpu_buffers(); + return; + } + #endif +-- +2.21.0 + diff --git a/queue-5.0/0008-x86-kvm-vmx-Add-MDS-protection-when-L1D-Flush-is-not.patch b/queue-5.0/0008-x86-kvm-vmx-Add-MDS-protection-when-L1D-Flush-is-not.patch new file mode 100644 index 00000000000..e7cb762e097 --- /dev/null +++ b/queue-5.0/0008-x86-kvm-vmx-Add-MDS-protection-when-L1D-Flush-is-not.patch @@ -0,0 +1,60 @@ +From 91c9e9a8d6a0187870a7c9e8284be38423318a08 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 27 Feb 2019 12:48:14 +0100 +Subject: [PATCH 08/27] x86/kvm/vmx: Add MDS protection when L1D Flush is not + active + +commit 650b68a0622f933444a6d66936abb3103029413b upstream + +CPUs which are affected by L1TF and MDS mitigate MDS with the L1D Flush on +VMENTER when updated microcode is installed. + +If a CPU is not affected by L1TF or if the L1D Flush is not in use, then +MDS mitigation needs to be invoked explicitly. + +For these cases, follow the host mitigation state and invoke the MDS +mitigation before VMENTER. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 1 + + arch/x86/kvm/vmx/vmx.c | 3 +++ + 2 files changed, 4 insertions(+) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index c2cfd45f709e..bef397b4c2f8 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -65,6 +65,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + + /* Control MDS CPU buffer clear before returning to user space */ + DEFINE_STATIC_KEY_FALSE(mds_user_clear); ++EXPORT_SYMBOL_GPL(mds_user_clear); + + void __init check_bugs(void) + { +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index da6fdd5434a1..dadb6a6a9b2a 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6356,8 +6356,11 @@ static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) + evmcs_rsp = static_branch_unlikely(&enable_evmcs) ? + (unsigned long)¤t_evmcs->host_rsp : 0; + ++ /* L1D Flush includes CPU buffer clear to mitigate MDS */ + if (static_branch_unlikely(&vmx_l1d_should_flush)) + vmx_l1d_flush(vcpu); ++ else if (static_branch_unlikely(&mds_user_clear)) ++ mds_clear_cpu_buffers(); + + asm( + /* Store host registers */ +-- +2.21.0 + diff --git a/queue-5.0/0009-x86-speculation-mds-Conditionally-clear-CPU-buffers-.patch b/queue-5.0/0009-x86-speculation-mds-Conditionally-clear-CPU-buffers-.patch new file mode 100644 index 00000000000..b6c43eead79 --- /dev/null +++ b/queue-5.0/0009-x86-speculation-mds-Conditionally-clear-CPU-buffers-.patch @@ -0,0 +1,227 @@ +From c13d5738b6681167eaab57fbf66fa20b9ee819c8 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 23:04:01 +0100 +Subject: [PATCH 09/27] x86/speculation/mds: Conditionally clear CPU buffers on + idle entry + +commit 07f07f55a29cb705e221eda7894dd67ab81ef343 upstream + +Add a static key which controls the invocation of the CPU buffer clear +mechanism on idle entry. This is independent of other MDS mitigations +because the idle entry invocation to mitigate the potential leakage due to +store buffer repartitioning is only necessary on SMT systems. + +Add the actual invocations to the different halt/mwait variants which +covers all usage sites. mwaitx is not patched as it's not available on +Intel CPUs. + +The buffer clear is only invoked before entering the C-State to prevent +that stale data from the idling CPU is spilled to the Hyper-Thread sibling +after the Store buffer got repartitioned and all entries are available to +the non idle sibling. + +When coming out of idle the store buffer is partitioned again so each +sibling has half of it available. Now CPU which returned from idle could be +speculatively exposed to contents of the sibling, but the buffers are +flushed either on exit to user space or on VMENTER. + +When later on conditional buffer clearing is implemented on top of this, +then there is no action required either because before returning to user +space the context switch will set the condition flag which causes a flush +on the return to user path. + +Note, that the buffer clearing on idle is only sensible on CPUs which are +solely affected by MSBDS and not any other variant of MDS because the other +MDS variants cannot be mitigated when SMT is enabled, so the buffer +clearing on idle would be a window dressing exercise. + +This intentionally does not handle the case in the acpi/processor_idle +driver which uses the legacy IO port interface for C-State transitions for +two reasons: + + - The acpi/processor_idle driver was replaced by the intel_idle driver + almost a decade ago. Anything Nehalem upwards supports it and defaults + to that new driver. + + - The legacy IO port interface is likely to be used on older and therefore + unaffected CPUs or on systems which do not receive microcode updates + anymore, so there is no point in adding that. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Frederic Weisbecker +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/x86/mds.rst | 42 ++++++++++++++++++++++++++++ + arch/x86/include/asm/irqflags.h | 4 +++ + arch/x86/include/asm/mwait.h | 7 +++++ + arch/x86/include/asm/nospec-branch.h | 12 ++++++++ + arch/x86/kernel/cpu/bugs.c | 3 ++ + 5 files changed, 68 insertions(+) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 54d935bf283b..87ce8ac9f36e 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -149,3 +149,45 @@ Mitigation points + This takes the paranoid exit path only when the INT1 breakpoint is in + kernel space. #DB on a user space address takes the regular exit path, + so no extra mitigation required. ++ ++ ++2. C-State transition ++^^^^^^^^^^^^^^^^^^^^^ ++ ++ When a CPU goes idle and enters a C-State the CPU buffers need to be ++ cleared on affected CPUs when SMT is active. This addresses the ++ repartitioning of the store buffer when one of the Hyper-Threads enters ++ a C-State. ++ ++ When SMT is inactive, i.e. either the CPU does not support it or all ++ sibling threads are offline CPU buffer clearing is not required. ++ ++ The idle clearing is enabled on CPUs which are only affected by MSBDS ++ and not by any other MDS variant. The other MDS variants cannot be ++ protected against cross Hyper-Thread attacks because the Fill Buffer and ++ the Load Ports are shared. So on CPUs affected by other variants, the ++ idle clearing would be a window dressing exercise and is therefore not ++ activated. ++ ++ The invocation is controlled by the static key mds_idle_clear which is ++ switched depending on the chosen mitigation mode and the SMT state of ++ the system. ++ ++ The buffer clear is only invoked before entering the C-State to prevent ++ that stale data from the idling CPU from spilling to the Hyper-Thread ++ sibling after the store buffer got repartitioned and all entries are ++ available to the non idle sibling. ++ ++ When coming out of idle the store buffer is partitioned again so each ++ sibling has half of it available. The back from idle CPU could be then ++ speculatively exposed to contents of the sibling. The buffers are ++ flushed either on exit to user space or on VMENTER so malicious code ++ in user space or the guest cannot speculatively access them. ++ ++ The mitigation is hooked into all variants of halt()/mwait(), but does ++ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver ++ has been superseded by the intel_idle driver around 2010 and is ++ preferred on all affected CPUs which are expected to gain the MD_CLEAR ++ functionality in microcode. Aside of that the IO-Port mechanism is a ++ legacy interface which is only used on older systems which are either ++ not affected or do not receive microcode updates anymore. +diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h +index 058e40fed167..8a0e56e1dcc9 100644 +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -6,6 +6,8 @@ + + #ifndef __ASSEMBLY__ + ++#include ++ + /* Provide __cpuidle; we can't safely include */ + #define __cpuidle __attribute__((__section__(".cpuidle.text"))) + +@@ -54,11 +56,13 @@ static inline void native_irq_enable(void) + + static inline __cpuidle void native_safe_halt(void) + { ++ mds_idle_clear_cpu_buffers(); + asm volatile("sti; hlt": : :"memory"); + } + + static inline __cpuidle void native_halt(void) + { ++ mds_idle_clear_cpu_buffers(); + asm volatile("hlt": : :"memory"); + } + +diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h +index 39a2fb29378a..eb0f80ce8524 100644 +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -6,6 +6,7 @@ + #include + + #include ++#include + + #define MWAIT_SUBSTATE_MASK 0xf + #define MWAIT_CSTATE_MASK 0xf +@@ -40,6 +41,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx, + + static inline void __mwait(unsigned long eax, unsigned long ecx) + { ++ mds_idle_clear_cpu_buffers(); ++ + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +@@ -74,6 +77,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) + static inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) + { ++ /* No MDS buffer clear as this is AMD/HYGON only */ ++ + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" + :: "a" (eax), "b" (ebx), "c" (ecx)); +@@ -81,6 +86,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx, + + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) + { ++ mds_idle_clear_cpu_buffers(); ++ + trace_hardirqs_on(); + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h +index 65b747286d96..4e970390110f 100644 +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -319,6 +319,7 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + + DECLARE_STATIC_KEY_FALSE(mds_user_clear); ++DECLARE_STATIC_KEY_FALSE(mds_idle_clear); + + #include + +@@ -356,6 +357,17 @@ static inline void mds_user_clear_cpu_buffers(void) + mds_clear_cpu_buffers(); + } + ++/** ++ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * ++ * Clear CPU buffers if the corresponding static key is enabled ++ */ ++static inline void mds_idle_clear_cpu_buffers(void) ++{ ++ if (static_branch_likely(&mds_idle_clear)) ++ mds_clear_cpu_buffers(); ++} ++ + #endif /* __ASSEMBLY__ */ + + /* +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index bef397b4c2f8..10d309f99f11 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -66,6 +66,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + /* Control MDS CPU buffer clear before returning to user space */ + DEFINE_STATIC_KEY_FALSE(mds_user_clear); + EXPORT_SYMBOL_GPL(mds_user_clear); ++/* Control MDS CPU buffer clear before idling (halt, mwait) */ ++DEFINE_STATIC_KEY_FALSE(mds_idle_clear); ++EXPORT_SYMBOL_GPL(mds_idle_clear); + + void __init check_bugs(void) + { +-- +2.21.0 + diff --git a/queue-5.0/0010-x86-speculation-mds-Add-mitigation-control-for-MDS.patch b/queue-5.0/0010-x86-speculation-mds-Add-mitigation-control-for-MDS.patch new file mode 100644 index 00000000000..a4028c5cdfb --- /dev/null +++ b/queue-5.0/0010-x86-speculation-mds-Add-mitigation-control-for-MDS.patch @@ -0,0 +1,195 @@ +From 7b94c822616919ced2f176b507b031c019ec4ac4 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 22:04:08 +0100 +Subject: [PATCH 10/27] x86/speculation/mds: Add mitigation control for MDS + +commit bc1241700acd82ec69fde98c5763ce51086269f8 upstream + +Now that the mitigations are in place, add a command line parameter to +control the mitigation, a mitigation selector function and a SMT update +mechanism. + +This is the minimal straight forward initial implementation which just +provides an always on/off mode. The command line parameter is: + + mds=[full|off] + +This is consistent with the existing mitigations for other speculative +hardware vulnerabilities. + +The idle invocation is dynamically updated according to the SMT state of +the system similar to the dynamic update of the STIBP mitigation. The idle +mitigation is limited to CPUs which are only affected by MSBDS and not any +other variant, because the other variants cannot be mitigated on SMT +enabled systems. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + .../admin-guide/kernel-parameters.txt | 22 ++++++ + arch/x86/include/asm/processor.h | 5 ++ + arch/x86/kernel/cpu/bugs.c | 70 +++++++++++++++++++ + 3 files changed, 97 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 858b6c0b9a15..dddb024eb523 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2356,6 +2356,28 @@ + Format: , + Specifies range of consoles to be captured by the MDA. + ++ mds= [X86,INTEL] ++ Control mitigation for the Micro-architectural Data ++ Sampling (MDS) vulnerability. ++ ++ Certain CPUs are vulnerable to an exploit against CPU ++ internal buffers which can forward information to a ++ disclosure gadget under certain conditions. ++ ++ In vulnerable processors, the speculatively ++ forwarded data can be used in a cache side channel ++ attack, to access data to which the attacker does ++ not have direct access. ++ ++ This parameter controls the MDS mitigation. The ++ options are: ++ ++ full - Enable MDS mitigation on vulnerable CPUs ++ off - Unconditionally disable MDS mitigation ++ ++ Not specifying this option is equivalent to ++ mds=full. ++ + mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory + Amount of memory to be used when the kernel is not able + to see the whole system memory or for test. +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 33051436c864..1f0295783325 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -992,4 +992,9 @@ enum l1tf_mitigations { + + extern enum l1tf_mitigations l1tf_mitigation; + ++enum mds_mitigations { ++ MDS_MITIGATION_OFF, ++ MDS_MITIGATION_FULL, ++}; ++ + #endif /* _ASM_X86_PROCESSOR_H */ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 10d309f99f11..90f102c85a29 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -37,6 +37,7 @@ + static void __init spectre_v2_select_mitigation(void); + static void __init ssb_select_mitigation(void); + static void __init l1tf_select_mitigation(void); ++static void __init mds_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR that always has to be preserved. */ + u64 x86_spec_ctrl_base; +@@ -108,6 +109,8 @@ void __init check_bugs(void) + + l1tf_select_mitigation(); + ++ mds_select_mitigation(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -213,6 +216,50 @@ static void x86_amd_ssb_disable(void) + wrmsrl(MSR_AMD64_LS_CFG, msrval); + } + ++#undef pr_fmt ++#define pr_fmt(fmt) "MDS: " fmt ++ ++/* Default mitigation for L1TF-affected CPUs */ ++static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; ++ ++static const char * const mds_strings[] = { ++ [MDS_MITIGATION_OFF] = "Vulnerable", ++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers" ++}; ++ ++static void __init mds_select_mitigation(void) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MDS)) { ++ mds_mitigation = MDS_MITIGATION_OFF; ++ return; ++ } ++ ++ if (mds_mitigation == MDS_MITIGATION_FULL) { ++ if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) ++ static_branch_enable(&mds_user_clear); ++ else ++ mds_mitigation = MDS_MITIGATION_OFF; ++ } ++ pr_info("%s\n", mds_strings[mds_mitigation]); ++} ++ ++static int __init mds_cmdline(char *str) ++{ ++ if (!boot_cpu_has_bug(X86_BUG_MDS)) ++ return 0; ++ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) ++ mds_mitigation = MDS_MITIGATION_OFF; ++ else if (!strcmp(str, "full")) ++ mds_mitigation = MDS_MITIGATION_FULL; ++ ++ return 0; ++} ++early_param("mds", mds_cmdline); ++ + #undef pr_fmt + #define pr_fmt(fmt) "Spectre V2 : " fmt + +@@ -617,6 +664,26 @@ static void update_indir_branch_cond(void) + static_branch_disable(&switch_to_cond_stibp); + } + ++/* Update the static key controlling the MDS CPU buffer clear in idle */ ++static void update_mds_branch_idle(void) ++{ ++ /* ++ * Enable the idle clearing if SMT is active on CPUs which are ++ * affected only by MSBDS and not any other MDS variant. ++ * ++ * The other variants cannot be mitigated when SMT is enabled, so ++ * clearing the buffers on idle just to prevent the Store Buffer ++ * repartitioning leak would be a window dressing exercise. ++ */ ++ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) ++ return; ++ ++ if (sched_smt_active()) ++ static_branch_enable(&mds_idle_clear); ++ else ++ static_branch_disable(&mds_idle_clear); ++} ++ + void arch_smt_update(void) + { + /* Enhanced IBRS implies STIBP. No update required. */ +@@ -638,6 +705,9 @@ void arch_smt_update(void) + break; + } + ++ if (mds_mitigation == MDS_MITIGATION_FULL) ++ update_mds_branch_idle(); ++ + mutex_unlock(&spec_ctrl_mutex); + } + +-- +2.21.0 + diff --git a/queue-5.0/0011-x86-speculation-mds-Add-sysfs-reporting-for-MDS.patch b/queue-5.0/0011-x86-speculation-mds-Add-sysfs-reporting-for-MDS.patch new file mode 100644 index 00000000000..58e2c8a5bd6 --- /dev/null +++ b/queue-5.0/0011-x86-speculation-mds-Add-sysfs-reporting-for-MDS.patch @@ -0,0 +1,131 @@ +From d73522ef5e741a0c239eaa5451ea8cc24ff5baaa Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Mon, 18 Feb 2019 22:51:43 +0100 +Subject: [PATCH 11/27] x86/speculation/mds: Add sysfs reporting for MDS + +commit 8a4b06d391b0a42a373808979b5028f5c84d9c6a upstream + +Add the sysfs reporting file for MDS. It exposes the vulnerability and +mitigation state similar to the existing files for the other speculative +hardware vulnerabilities. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + .../ABI/testing/sysfs-devices-system-cpu | 1 + + arch/x86/kernel/cpu/bugs.c | 25 +++++++++++++++++++ + drivers/base/cpu.c | 8 ++++++ + include/linux/cpu.h | 2 ++ + 4 files changed, 36 insertions(+) + +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 9605dbd4b5b5..2db5c3407fd6 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -484,6 +484,7 @@ What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass + /sys/devices/system/cpu/vulnerabilities/l1tf ++ /sys/devices/system/cpu/vulnerabilities/mds + Date: January 2018 + Contact: Linux kernel mailing list + Description: Information about CPU vulnerabilities +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 90f102c85a29..60eab526f98d 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1172,6 +1172,22 @@ static ssize_t l1tf_show_state(char *buf) + } + #endif + ++static ssize_t mds_show_state(char *buf) ++{ ++ if (!hypervisor_is_type(X86_HYPER_NATIVE)) { ++ return sprintf(buf, "%s; SMT Host state unknown\n", ++ mds_strings[mds_mitigation]); ++ } ++ ++ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { ++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], ++ sched_smt_active() ? "mitigated" : "disabled"); ++ } ++ ++ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], ++ sched_smt_active() ? "vulnerable" : "disabled"); ++} ++ + static char *stibp_state(void) + { + if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) +@@ -1238,6 +1254,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr + if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) + return l1tf_show_state(buf); + break; ++ ++ case X86_BUG_MDS: ++ return mds_show_state(buf); ++ + default: + break; + } +@@ -1269,4 +1289,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b + { + return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); + } ++ ++ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_MDS); ++} + #endif +diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c +index eb9443d5bae1..2fd6ca1021c2 100644 +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -546,11 +546,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev, + return sprintf(buf, "Not affected\n"); + } + ++ssize_t __weak cpu_show_mds(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "Not affected\n"); ++} ++ + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); + static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); + static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); + static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); + static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); ++static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -558,6 +565,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_spectre_v2.attr, + &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, ++ &dev_attr_mds.attr, + NULL + }; + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 5041357d0297..3c87ad888ed3 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -57,6 +57,8 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev, + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_mds(struct device *dev, ++ struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, +-- +2.21.0 + diff --git a/queue-5.0/0012-x86-speculation-mds-Add-mitigation-mode-VMWERV.patch b/queue-5.0/0012-x86-speculation-mds-Add-mitigation-mode-VMWERV.patch new file mode 100644 index 00000000000..d08df72b7bb --- /dev/null +++ b/queue-5.0/0012-x86-speculation-mds-Add-mitigation-mode-VMWERV.patch @@ -0,0 +1,133 @@ +From 1bd4c08ad894489c4e5c6ff3d55fd7518e3b9cc6 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Wed, 20 Feb 2019 09:40:40 +0100 +Subject: [PATCH 12/27] x86/speculation/mds: Add mitigation mode VMWERV + +commit 22dd8365088b6403630b82423cf906491859b65e upstream + +In virtualized environments it can happen that the host has the microcode +update which utilizes the VERW instruction to clear CPU buffers, but the +hypervisor is not yet updated to expose the X86_FEATURE_MD_CLEAR CPUID bit +to guests. + +Introduce an internal mitigation mode VMWERV which enables the invocation +of the CPU buffer clearing even if X86_FEATURE_MD_CLEAR is not set. If the +system has no updated microcode this results in a pointless execution of +the VERW instruction wasting a few CPU cycles. If the microcode is updated, +but not exposed to a guest then the CPU buffers will be cleared. + +That said: Virtual Machines Will Eventually Receive Vaccine + +Signed-off-by: Thomas Gleixner +Reviewed-by: Borislav Petkov +Reviewed-by: Jon Masters +Tested-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/x86/mds.rst | 27 +++++++++++++++++++++++++++ + arch/x86/include/asm/processor.h | 1 + + arch/x86/kernel/cpu/bugs.c | 18 ++++++++++++------ + 3 files changed, 40 insertions(+), 6 deletions(-) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 87ce8ac9f36e..3d6f943f1afb 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -93,11 +93,38 @@ The kernel provides a function to invoke the buffer clearing: + The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state + (idle) transitions. + ++As a special quirk to address virtualization scenarios where the host has ++the microcode updated, but the hypervisor does not (yet) expose the ++MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the ++hope that it might actually clear the buffers. The state is reflected ++accordingly. ++ + According to current knowledge additional mitigations inside the kernel + itself are not required because the necessary gadgets to expose the leaked + data cannot be controlled in a way which allows exploitation from malicious + user space or VM guests. + ++Kernel internal mitigation modes ++-------------------------------- ++ ++ ======= ============================================================ ++ off Mitigation is disabled. Either the CPU is not affected or ++ mds=off is supplied on the kernel command line ++ ++ full Mitigation is eanbled. CPU is affected and MD_CLEAR is ++ advertised in CPUID. ++ ++ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not ++ advertised in CPUID. That is mainly for virtualization ++ scenarios where the host has the updated microcode but the ++ hypervisor does not expose MD_CLEAR in CPUID. It's a best ++ effort approach without guarantee. ++ ======= ============================================================ ++ ++If the CPU is affected and mds=off is not supplied on the kernel command ++line then the kernel selects the appropriate mitigation mode depending on ++the availability of the MD_CLEAR CPUID bit. ++ + Mitigation points + ----------------- + +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 1f0295783325..aca1ef8cc79f 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -995,6 +995,7 @@ extern enum l1tf_mitigations l1tf_mitigation; + enum mds_mitigations { + MDS_MITIGATION_OFF, + MDS_MITIGATION_FULL, ++ MDS_MITIGATION_VMWERV, + }; + + #endif /* _ASM_X86_PROCESSOR_H */ +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 60eab526f98d..12ab114b294e 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -224,7 +224,8 @@ static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL + + static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", +- [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers" ++ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", ++ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", + }; + + static void __init mds_select_mitigation(void) +@@ -235,10 +236,9 @@ static void __init mds_select_mitigation(void) + } + + if (mds_mitigation == MDS_MITIGATION_FULL) { +- if (boot_cpu_has(X86_FEATURE_MD_CLEAR)) +- static_branch_enable(&mds_user_clear); +- else +- mds_mitigation = MDS_MITIGATION_OFF; ++ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) ++ mds_mitigation = MDS_MITIGATION_VMWERV; ++ static_branch_enable(&mds_user_clear); + } + pr_info("%s\n", mds_strings[mds_mitigation]); + } +@@ -705,8 +705,14 @@ void arch_smt_update(void) + break; + } + +- if (mds_mitigation == MDS_MITIGATION_FULL) ++ switch (mds_mitigation) { ++ case MDS_MITIGATION_FULL: ++ case MDS_MITIGATION_VMWERV: + update_mds_branch_idle(); ++ break; ++ case MDS_MITIGATION_OFF: ++ break; ++ } + + mutex_unlock(&spec_ctrl_mutex); + } +-- +2.21.0 + diff --git a/queue-5.0/0013-Documentation-Move-L1TF-to-separate-directory.patch b/queue-5.0/0013-Documentation-Move-L1TF-to-separate-directory.patch new file mode 100644 index 00000000000..3d9dbbc260f --- /dev/null +++ b/queue-5.0/0013-Documentation-Move-L1TF-to-separate-directory.patch @@ -0,0 +1,127 @@ +From a4e152e31bda2e991116a478101fa0f403398036 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 19 Feb 2019 11:10:49 +0100 +Subject: [PATCH 13/27] Documentation: Move L1TF to separate directory + +commit 65fd4cb65b2dad97feb8330b6690445910b56d6a upstream + +Move L!TF to a separate directory so the MDS stuff can be added at the +side. Otherwise the all hardware vulnerabilites have their own top level +entry. Should have done that right away. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Greg Kroah-Hartman +Reviewed-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 2 +- + Documentation/admin-guide/hw-vuln/index.rst | 12 ++++++++++++ + Documentation/admin-guide/{ => hw-vuln}/l1tf.rst | 0 + Documentation/admin-guide/index.rst | 6 ++---- + Documentation/admin-guide/kernel-parameters.txt | 2 +- + arch/x86/kernel/cpu/bugs.c | 2 +- + arch/x86/kvm/vmx/vmx.c | 4 ++-- + 7 files changed, 19 insertions(+), 9 deletions(-) + create mode 100644 Documentation/admin-guide/hw-vuln/index.rst + rename Documentation/admin-guide/{ => hw-vuln}/l1tf.rst (100%) + +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 2db5c3407fd6..744c6d764b0c 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -498,7 +498,7 @@ Description: Information about CPU vulnerabilities + "Mitigation: $M" CPU is affected and mitigation $M is in effect + + Details about the l1tf file can be found in +- Documentation/admin-guide/l1tf.rst ++ Documentation/admin-guide/hw-vuln/l1tf.rst + + What: /sys/devices/system/cpu/smt + /sys/devices/system/cpu/smt/active +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +new file mode 100644 +index 000000000000..8ce2009f1981 +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -0,0 +1,12 @@ ++======================== ++Hardware vulnerabilities ++======================== ++ ++This section describes CPU vulnerabilities and provides an overview of the ++possible mitigations along with guidance for selecting mitigations if they ++are configurable at compile, boot or run time. ++ ++.. toctree:: ++ :maxdepth: 1 ++ ++ l1tf +diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/hw-vuln/l1tf.rst +similarity index 100% +rename from Documentation/admin-guide/l1tf.rst +rename to Documentation/admin-guide/hw-vuln/l1tf.rst +diff --git a/Documentation/admin-guide/index.rst b/Documentation/admin-guide/index.rst +index 0a491676685e..42247516962a 100644 +--- a/Documentation/admin-guide/index.rst ++++ b/Documentation/admin-guide/index.rst +@@ -17,14 +17,12 @@ etc. + kernel-parameters + devices + +-This section describes CPU vulnerabilities and provides an overview of the +-possible mitigations along with guidance for selecting mitigations if they +-are configurable at compile, boot or run time. ++This section describes CPU vulnerabilities and their mitigations. + + .. toctree:: + :maxdepth: 1 + +- l1tf ++ hw-vuln/index + + Here is a set of documents aimed at users who are trying to track down + problems and bugs in particular. +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index dddb024eb523..9afcb240a673 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2114,7 +2114,7 @@ + + Default is 'flush'. + +- For details see: Documentation/admin-guide/l1tf.rst ++ For details see: Documentation/admin-guide/hw-vuln/l1tf.rst + + l2cr= [PPC] + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 12ab114b294e..bcc110a85830 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1107,7 +1107,7 @@ static void __init l1tf_select_mitigation(void) + pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", + half_pa); + pr_info("However, doing so will make a part of your RAM unusable.\n"); +- pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n"); ++ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); + return; + } + +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index dadb6a6a9b2a..df6e325b288b 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -6800,8 +6800,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) + return ERR_PTR(err); + } + +-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" +-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n" ++#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" ++#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html for details.\n" + + static int vmx_vm_init(struct kvm *kvm) + { +-- +2.21.0 + diff --git a/queue-5.0/0014-Documentation-Add-MDS-vulnerability-documentation.patch b/queue-5.0/0014-Documentation-Add-MDS-vulnerability-documentation.patch new file mode 100644 index 00000000000..281e4f199c0 --- /dev/null +++ b/queue-5.0/0014-Documentation-Add-MDS-vulnerability-documentation.patch @@ -0,0 +1,385 @@ +From 345912eeaef04d2fc0b9168e49d7e59ba6eb2487 Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Tue, 19 Feb 2019 00:02:31 +0100 +Subject: [PATCH 14/27] Documentation: Add MDS vulnerability documentation + +commit 5999bbe7a6ea3c62029532ec84dc06003a1fa258 upstream + +Add the initial MDS vulnerability documentation. + +Signed-off-by: Thomas Gleixner +Reviewed-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + .../ABI/testing/sysfs-devices-system-cpu | 3 +- + Documentation/admin-guide/hw-vuln/index.rst | 1 + + Documentation/admin-guide/hw-vuln/l1tf.rst | 1 + + Documentation/admin-guide/hw-vuln/mds.rst | 307 ++++++++++++++++++ + .../admin-guide/kernel-parameters.txt | 2 + + 5 files changed, 312 insertions(+), 2 deletions(-) + create mode 100644 Documentation/admin-guide/hw-vuln/mds.rst + +diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu +index 744c6d764b0c..141a7bb58b80 100644 +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -497,8 +497,7 @@ Description: Information about CPU vulnerabilities + "Vulnerable" CPU is affected and no mitigation in effect + "Mitigation: $M" CPU is affected and mitigation $M is in effect + +- Details about the l1tf file can be found in +- Documentation/admin-guide/hw-vuln/l1tf.rst ++ See also: Documentation/admin-guide/hw-vuln/index.rst + + What: /sys/devices/system/cpu/smt + /sys/devices/system/cpu/smt/active +diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst +index 8ce2009f1981..ffc064c1ec68 100644 +--- a/Documentation/admin-guide/hw-vuln/index.rst ++++ b/Documentation/admin-guide/hw-vuln/index.rst +@@ -10,3 +10,4 @@ are configurable at compile, boot or run time. + :maxdepth: 1 + + l1tf ++ mds +diff --git a/Documentation/admin-guide/hw-vuln/l1tf.rst b/Documentation/admin-guide/hw-vuln/l1tf.rst +index 9af977384168..31653a9f0e1b 100644 +--- a/Documentation/admin-guide/hw-vuln/l1tf.rst ++++ b/Documentation/admin-guide/hw-vuln/l1tf.rst +@@ -445,6 +445,7 @@ The default is 'cond'. If 'l1tf=full,force' is given on the kernel command + line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush + module parameter is ignored and writes to the sysfs file are rejected. + ++.. _mitigation_selection: + + Mitigation selection guide + -------------------------- +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst +new file mode 100644 +index 000000000000..1de29d28903d +--- /dev/null ++++ b/Documentation/admin-guide/hw-vuln/mds.rst +@@ -0,0 +1,307 @@ ++MDS - Microarchitectural Data Sampling ++====================================== ++ ++Microarchitectural Data Sampling is a hardware vulnerability which allows ++unprivileged speculative access to data which is available in various CPU ++internal buffers. ++ ++Affected processors ++------------------- ++ ++This vulnerability affects a wide range of Intel processors. The ++vulnerability is not present on: ++ ++ - Processors from AMD, Centaur and other non Intel vendors ++ ++ - Older processor models, where the CPU family is < 6 ++ ++ - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus) ++ ++ - Intel processors which have the ARCH_CAP_MDS_NO bit set in the ++ IA32_ARCH_CAPABILITIES MSR. ++ ++Whether a processor is affected or not can be read out from the MDS ++vulnerability file in sysfs. See :ref:`mds_sys_info`. ++ ++Not all processors are affected by all variants of MDS, but the mitigation ++is identical for all of them so the kernel treats them as a single ++vulnerability. ++ ++Related CVEs ++------------ ++ ++The following CVE entries are related to the MDS vulnerability: ++ ++ ============== ===== ============================================== ++ CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling ++ CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling ++ CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling ++ ============== ===== ============================================== ++ ++Problem ++------- ++ ++When performing store, load, L1 refill operations, processors write data ++into temporary microarchitectural structures (buffers). The data in the ++buffer can be forwarded to load operations as an optimization. ++ ++Under certain conditions, usually a fault/assist caused by a load ++operation, data unrelated to the load memory address can be speculatively ++forwarded from the buffers. Because the load operation causes a fault or ++assist and its result will be discarded, the forwarded data will not cause ++incorrect program execution or state changes. But a malicious operation ++may be able to forward this speculative data to a disclosure gadget which ++allows in turn to infer the value via a cache side channel attack. ++ ++Because the buffers are potentially shared between Hyper-Threads cross ++Hyper-Thread attacks are possible. ++ ++Deeper technical information is available in the MDS specific x86 ++architecture section: :ref:`Documentation/x86/mds.rst `. ++ ++ ++Attack scenarios ++---------------- ++ ++Attacks against the MDS vulnerabilities can be mounted from malicious non ++priviledged user space applications running on hosts or guest. Malicious ++guest OSes can obviously mount attacks as well. ++ ++Contrary to other speculation based vulnerabilities the MDS vulnerability ++does not allow the attacker to control the memory target address. As a ++consequence the attacks are purely sampling based, but as demonstrated with ++the TLBleed attack samples can be postprocessed successfully. ++ ++Web-Browsers ++^^^^^^^^^^^^ ++ ++ It's unclear whether attacks through Web-Browsers are possible at ++ all. The exploitation through Java-Script is considered very unlikely, ++ but other widely used web technologies like Webassembly could possibly be ++ abused. ++ ++ ++.. _mds_sys_info: ++ ++MDS system information ++----------------------- ++ ++The Linux kernel provides a sysfs interface to enumerate the current MDS ++status of the system: whether the system is vulnerable, and which ++mitigations are active. The relevant sysfs file is: ++ ++/sys/devices/system/cpu/vulnerabilities/mds ++ ++The possible values in this file are: ++ ++ ========================================= ================================= ++ 'Not affected' The processor is not vulnerable ++ ++ 'Vulnerable' The processor is vulnerable, ++ but no mitigation enabled ++ ++ 'Vulnerable: Clear CPU buffers attempted' The processor is vulnerable but ++ microcode is not updated. ++ The mitigation is enabled on a ++ best effort basis. ++ See :ref:`vmwerv` ++ ++ 'Mitigation: CPU buffer clear' The processor is vulnerable and the ++ CPU buffer clearing mitigation is ++ enabled. ++ ========================================= ================================= ++ ++If the processor is vulnerable then the following information is appended ++to the above information: ++ ++ ======================== ============================================ ++ 'SMT vulnerable' SMT is enabled ++ 'SMT mitigated' SMT is enabled and mitigated ++ 'SMT disabled' SMT is disabled ++ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown ++ ======================== ============================================ ++ ++.. _vmwerv: ++ ++Best effort mitigation mode ++^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ If the processor is vulnerable, but the availability of the microcode based ++ mitigation mechanism is not advertised via CPUID the kernel selects a best ++ effort mitigation mode. This mode invokes the mitigation instructions ++ without a guarantee that they clear the CPU buffers. ++ ++ This is done to address virtualization scenarios where the host has the ++ microcode update applied, but the hypervisor is not yet updated to expose ++ the CPUID to the guest. If the host has updated microcode the protection ++ takes effect otherwise a few cpu cycles are wasted pointlessly. ++ ++ The state in the mds sysfs file reflects this situation accordingly. ++ ++ ++Mitigation mechanism ++------------------------- ++ ++The kernel detects the affected CPUs and the presence of the microcode ++which is required. ++ ++If a CPU is affected and the microcode is available, then the kernel ++enables the mitigation by default. The mitigation can be controlled at boot ++time via a kernel command line option. See ++:ref:`mds_mitigation_control_command_line`. ++ ++.. _cpu_buffer_clear: ++ ++CPU buffer clearing ++^^^^^^^^^^^^^^^^^^^ ++ ++ The mitigation for MDS clears the affected CPU buffers on return to user ++ space and when entering a guest. ++ ++ If SMT is enabled it also clears the buffers on idle entry when the CPU ++ is only affected by MSBDS and not any other MDS variant, because the ++ other variants cannot be protected against cross Hyper-Thread attacks. ++ ++ For CPUs which are only affected by MSBDS the user space, guest and idle ++ transition mitigations are sufficient and SMT is not affected. ++ ++.. _virt_mechanism: ++ ++Virtualization mitigation ++^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ The protection for host to guest transition depends on the L1TF ++ vulnerability of the CPU: ++ ++ - CPU is affected by L1TF: ++ ++ If the L1D flush mitigation is enabled and up to date microcode is ++ available, the L1D flush mitigation is automatically protecting the ++ guest transition. ++ ++ If the L1D flush mitigation is disabled then the MDS mitigation is ++ invoked explicit when the host MDS mitigation is enabled. ++ ++ For details on L1TF and virtualization see: ++ :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst `. ++ ++ - CPU is not affected by L1TF: ++ ++ CPU buffers are flushed before entering the guest when the host MDS ++ mitigation is enabled. ++ ++ The resulting MDS protection matrix for the host to guest transition: ++ ++ ============ ===== ============= ============ ================= ++ L1TF MDS VMX-L1FLUSH Host MDS MDS-State ++ ++ Don't care No Don't care N/A Not affected ++ ++ Yes Yes Disabled Off Vulnerable ++ ++ Yes Yes Disabled Full Mitigated ++ ++ Yes Yes Enabled Don't care Mitigated ++ ++ No Yes N/A Off Vulnerable ++ ++ No Yes N/A Full Mitigated ++ ============ ===== ============= ============ ================= ++ ++ This only covers the host to guest transition, i.e. prevents leakage from ++ host to guest, but does not protect the guest internally. Guests need to ++ have their own protections. ++ ++.. _xeon_phi: ++ ++XEON PHI specific considerations ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ The XEON PHI processor family is affected by MSBDS which can be exploited ++ cross Hyper-Threads when entering idle states. Some XEON PHI variants allow ++ to use MWAIT in user space (Ring 3) which opens an potential attack vector ++ for malicious user space. The exposure can be disabled on the kernel ++ command line with the 'ring3mwait=disable' command line option. ++ ++ XEON PHI is not affected by the other MDS variants and MSBDS is mitigated ++ before the CPU enters a idle state. As XEON PHI is not affected by L1TF ++ either disabling SMT is not required for full protection. ++ ++.. _mds_smt_control: ++ ++SMT control ++^^^^^^^^^^^ ++ ++ All MDS variants except MSBDS can be attacked cross Hyper-Threads. That ++ means on CPUs which are affected by MFBDS or MLPDS it is necessary to ++ disable SMT for full protection. These are most of the affected CPUs; the ++ exception is XEON PHI, see :ref:`xeon_phi`. ++ ++ Disabling SMT can have a significant performance impact, but the impact ++ depends on the type of workloads. ++ ++ See the relevant chapter in the L1TF mitigation documentation for details: ++ :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst `. ++ ++ ++.. _mds_mitigation_control_command_line: ++ ++Mitigation control on the kernel command line ++--------------------------------------------- ++ ++The kernel command line allows to control the MDS mitigations at boot ++time with the option "mds=". The valid arguments for this option are: ++ ++ ============ ============================================================= ++ full If the CPU is vulnerable, enable all available mitigations ++ for the MDS vulnerability, CPU buffer clearing on exit to ++ userspace and when entering a VM. Idle transitions are ++ protected as well if SMT is enabled. ++ ++ It does not automatically disable SMT. ++ ++ off Disables MDS mitigations completely. ++ ++ ============ ============================================================= ++ ++Not specifying this option is equivalent to "mds=full". ++ ++ ++Mitigation selection guide ++-------------------------- ++ ++1. Trusted userspace ++^^^^^^^^^^^^^^^^^^^^ ++ ++ If all userspace applications are from a trusted source and do not ++ execute untrusted code which is supplied externally, then the mitigation ++ can be disabled. ++ ++ ++2. Virtualization with trusted guests ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ The same considerations as above versus trusted user space apply. ++ ++3. Virtualization with untrusted guests ++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ++ ++ The protection depends on the state of the L1TF mitigations. ++ See :ref:`virt_mechanism`. ++ ++ If the MDS mitigation is enabled and SMT is disabled, guest to host and ++ guest to guest attacks are prevented. ++ ++.. _mds_default_mitigations: ++ ++Default mitigations ++------------------- ++ ++ The kernel default mitigations for vulnerable processors are: ++ ++ - Enable CPU buffer clearing ++ ++ The kernel does not by default enforce the disabling of SMT, which leaves ++ SMT systems vulnerable when running untrusted code. The same rationale as ++ for L1TF applies. ++ See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst `. +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 9afcb240a673..7325319c2c23 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2378,6 +2378,8 @@ + Not specifying this option is equivalent to + mds=full. + ++ For details see: Documentation/admin-guide/hw-vuln/mds.rst ++ + mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory + Amount of memory to be used when the kernel is not able + to see the whole system memory or for test. +-- +2.21.0 + diff --git a/queue-5.0/0015-x86-speculation-mds-Add-mds-full-nosmt-cmdline-optio.patch b/queue-5.0/0015-x86-speculation-mds-Add-mds-full-nosmt-cmdline-optio.patch new file mode 100644 index 00000000000..89261835dc9 --- /dev/null +++ b/queue-5.0/0015-x86-speculation-mds-Add-mds-full-nosmt-cmdline-optio.patch @@ -0,0 +1,92 @@ +From e6cbd74a9a1b97fba94245adac157852d0b78ff5 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 09:59:33 -0500 +Subject: [PATCH 15/27] x86/speculation/mds: Add mds=full,nosmt cmdline option + +commit d71eb0ce109a124b0fa714832823b9452f2762cf upstream + +Add the mds=full,nosmt cmdline option. This is like mds=full, but with +SMT disabled if the CPU is vulnerable. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/mds.rst | 3 +++ + Documentation/admin-guide/kernel-parameters.txt | 6 ++++-- + arch/x86/kernel/cpu/bugs.c | 10 ++++++++++ + 3 files changed, 17 insertions(+), 2 deletions(-) + +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst +index 1de29d28903d..244ab47d1fb3 100644 +--- a/Documentation/admin-guide/hw-vuln/mds.rst ++++ b/Documentation/admin-guide/hw-vuln/mds.rst +@@ -260,6 +260,9 @@ time with the option "mds=". The valid arguments for this option are: + + It does not automatically disable SMT. + ++ full,nosmt The same as mds=full, with SMT disabled on vulnerable ++ CPUs. This is the complete mitigation. ++ + off Disables MDS mitigations completely. + + ============ ============================================================= +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 7325319c2c23..8f04985d3122 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2372,8 +2372,10 @@ + This parameter controls the MDS mitigation. The + options are: + +- full - Enable MDS mitigation on vulnerable CPUs +- off - Unconditionally disable MDS mitigation ++ full - Enable MDS mitigation on vulnerable CPUs ++ full,nosmt - Enable MDS mitigation and disable ++ SMT on vulnerable CPUs ++ off - Unconditionally disable MDS mitigation + + Not specifying this option is equivalent to + mds=full. +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index bcc110a85830..4939addbd758 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -221,6 +221,7 @@ static void x86_amd_ssb_disable(void) + + /* Default mitigation for L1TF-affected CPUs */ + static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; ++static bool mds_nosmt __ro_after_init = false; + + static const char * const mds_strings[] = { + [MDS_MITIGATION_OFF] = "Vulnerable", +@@ -238,8 +239,13 @@ static void __init mds_select_mitigation(void) + if (mds_mitigation == MDS_MITIGATION_FULL) { + if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) + mds_mitigation = MDS_MITIGATION_VMWERV; ++ + static_branch_enable(&mds_user_clear); ++ ++ if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ cpu_smt_disable(false); + } ++ + pr_info("%s\n", mds_strings[mds_mitigation]); + } + +@@ -255,6 +261,10 @@ static int __init mds_cmdline(char *str) + mds_mitigation = MDS_MITIGATION_OFF; + else if (!strcmp(str, "full")) + mds_mitigation = MDS_MITIGATION_FULL; ++ else if (!strcmp(str, "full,nosmt")) { ++ mds_mitigation = MDS_MITIGATION_FULL; ++ mds_nosmt = true; ++ } + + return 0; + } +-- +2.21.0 + diff --git a/queue-5.0/0016-x86-speculation-Move-arch_smt_update-call-to-after-m.patch b/queue-5.0/0016-x86-speculation-Move-arch_smt_update-call-to-after-m.patch new file mode 100644 index 00000000000..a171fe5afce --- /dev/null +++ b/queue-5.0/0016-x86-speculation-Move-arch_smt_update-call-to-after-m.patch @@ -0,0 +1,47 @@ +From 66c060cbba0b112198575f2691fbdc2259d04eeb Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 10:00:14 -0500 +Subject: [PATCH 16/27] x86/speculation: Move arch_smt_update() call to after + mitigation decisions + +commit 7c3658b20194a5b3209a143f63bc9c643c6a3ae2 upstream + +arch_smt_update() now has a dependency on both Spectre v2 and MDS +mitigations. Move its initial call to after all the mitigation decisions +have been made. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 5 ++--- + 1 file changed, 2 insertions(+), 3 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 4939addbd758..0c2ff23906dc 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -111,6 +111,8 @@ void __init check_bugs(void) + + mds_select_mitigation(); + ++ arch_smt_update(); ++ + #ifdef CONFIG_X86_32 + /* + * Check whether we are able to run this kernel safely on SMP. +@@ -638,9 +640,6 @@ static void __init spectre_v2_select_mitigation(void) + + /* Set up IBPB and STIBP depending on the general spectre V2 command */ + spectre_v2_user_select_mitigation(cmd); +- +- /* Enable STIBP if appropriate */ +- arch_smt_update(); + } + + static void update_stibp_msr(void * __unused) +-- +2.21.0 + diff --git a/queue-5.0/0017-x86-speculation-mds-Add-SMT-warning-message.patch b/queue-5.0/0017-x86-speculation-mds-Add-SMT-warning-message.patch new file mode 100644 index 00000000000..3661cbdc28f --- /dev/null +++ b/queue-5.0/0017-x86-speculation-mds-Add-SMT-warning-message.patch @@ -0,0 +1,62 @@ +From 8fb055ed992da5fe882a82d97e05e23b859c1c6a Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Tue, 2 Apr 2019 10:00:51 -0500 +Subject: [PATCH 17/27] x86/speculation/mds: Add SMT warning message + +commit 39226ef02bfb43248b7db12a4fdccb39d95318e3 upstream + +MDS is vulnerable with SMT. Make that clear with a one-time printk +whenever SMT first gets enabled. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Acked-by: Jiri Kosina +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 0c2ff23906dc..2ee603a4863b 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -673,6 +673,9 @@ static void update_indir_branch_cond(void) + static_branch_disable(&switch_to_cond_stibp); + } + ++#undef pr_fmt ++#define pr_fmt(fmt) fmt ++ + /* Update the static key controlling the MDS CPU buffer clear in idle */ + static void update_mds_branch_idle(void) + { +@@ -693,6 +696,8 @@ static void update_mds_branch_idle(void) + static_branch_disable(&mds_idle_clear); + } + ++#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" ++ + void arch_smt_update(void) + { + /* Enhanced IBRS implies STIBP. No update required. */ +@@ -717,6 +722,8 @@ void arch_smt_update(void) + switch (mds_mitigation) { + case MDS_MITIGATION_FULL: + case MDS_MITIGATION_VMWERV: ++ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ pr_warn_once(MDS_MSG_SMT); + update_mds_branch_idle(); + break; + case MDS_MITIGATION_OFF: +@@ -1149,6 +1156,7 @@ static int __init l1tf_cmdline(char *str) + early_param("l1tf", l1tf_cmdline); + + #undef pr_fmt ++#define pr_fmt(fmt) fmt + + #ifdef CONFIG_SYSFS + +-- +2.21.0 + diff --git a/queue-5.0/0018-x86-speculation-mds-Fix-comment.patch b/queue-5.0/0018-x86-speculation-mds-Fix-comment.patch new file mode 100644 index 00000000000..9652487d79b --- /dev/null +++ b/queue-5.0/0018-x86-speculation-mds-Fix-comment.patch @@ -0,0 +1,35 @@ +From ad63c7fca93463b7a8876f4a82bda79b415f2b16 Mon Sep 17 00:00:00 2001 +From: Boris Ostrovsky +Date: Fri, 12 Apr 2019 17:50:57 -0400 +Subject: [PATCH 18/27] x86/speculation/mds: Fix comment + +commit cae5ec342645746d617dd420d206e1588d47768a upstream + +s/L1TF/MDS/ + +Signed-off-by: Boris Ostrovsky +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Reviewed-by: Josh Poimboeuf +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 2ee603a4863b..ada7cdd33f69 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -221,7 +221,7 @@ static void x86_amd_ssb_disable(void) + #undef pr_fmt + #define pr_fmt(fmt) "MDS: " fmt + +-/* Default mitigation for L1TF-affected CPUs */ ++/* Default mitigation for MDS-affected CPUs */ + static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL; + static bool mds_nosmt __ro_after_init = false; + +-- +2.21.0 + diff --git a/queue-5.0/0019-x86-speculation-mds-Print-SMT-vulnerable-on-MSBDS-wi.patch b/queue-5.0/0019-x86-speculation-mds-Print-SMT-vulnerable-on-MSBDS-wi.patch new file mode 100644 index 00000000000..97d74ec45ff --- /dev/null +++ b/queue-5.0/0019-x86-speculation-mds-Print-SMT-vulnerable-on-MSBDS-wi.patch @@ -0,0 +1,50 @@ +From e2dfbd8285ea479fccb948b74849919b6275910d Mon Sep 17 00:00:00 2001 +From: Konrad Rzeszutek Wilk +Date: Fri, 12 Apr 2019 17:50:58 -0400 +Subject: [PATCH 19/27] x86/speculation/mds: Print SMT vulnerable on MSBDS with + mitigations off + +commit e2c3c94788b08891dcf3dbe608f9880523ecd71b upstream + +This code is only for CPUs which are affected by MSBDS, but are *not* +affected by the other two MDS issues. + +For such CPUs, enabling the mds_idle_clear mitigation is enough to +mitigate SMT. + +However if user boots with 'mds=off' and still has SMT enabled, we should +not report that SMT is mitigated: + +$cat /sys//devices/system/cpu/vulnerabilities/mds +Vulnerable; SMT mitigated + +But rather: +Vulnerable; SMT vulnerable + +Signed-off-by: Konrad Rzeszutek Wilk +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Reviewed-by: Josh Poimboeuf +Link: https://lkml.kernel.org/r/20190412215118.294906495@localhost.localdomain +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/bugs.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index ada7cdd33f69..04c140ac36af 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -1204,7 +1204,8 @@ static ssize_t mds_show_state(char *buf) + + if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], +- sched_smt_active() ? "mitigated" : "disabled"); ++ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : ++ sched_smt_active() ? "mitigated" : "disabled")); + } + + return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], +-- +2.21.0 + diff --git a/queue-5.0/0020-cpu-speculation-Add-mitigations-cmdline-option.patch b/queue-5.0/0020-cpu-speculation-Add-mitigations-cmdline-option.patch new file mode 100644 index 00000000000..210b115b14f --- /dev/null +++ b/queue-5.0/0020-cpu-speculation-Add-mitigations-cmdline-option.patch @@ -0,0 +1,168 @@ +From 8d7c1c38b02551bd808007884bc37f501076b8c8 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Fri, 12 Apr 2019 15:39:28 -0500 +Subject: [PATCH 20/27] cpu/speculation: Add 'mitigations=' cmdline option + +commit 98af8452945c55652de68536afdde3b520fec429 upstream + +Keeping track of the number of mitigations for all the CPU speculation +bugs has become overwhelming for many users. It's getting more and more +complicated to decide which mitigations are needed for a given +architecture. Complicating matters is the fact that each arch tends to +have its own custom way to mitigate the same vulnerability. + +Most users fall into a few basic categories: + +a) they want all mitigations off; + +b) they want all reasonable mitigations on, with SMT enabled even if + it's vulnerable; or + +c) they want all reasonable mitigations on, with SMT disabled if + vulnerable. + +Define a set of curated, arch-independent options, each of which is an +aggregation of existing options: + +- mitigations=off: Disable all mitigations. + +- mitigations=auto: [default] Enable all the default mitigations, but + leave SMT enabled, even if it's vulnerable. + +- mitigations=auto,nosmt: Enable all the default mitigations, disabling + SMT if needed by a mitigation. + +Currently, these options are placeholders which don't actually do +anything. They will be fleshed out in upcoming patches. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Tested-by: Jiri Kosina (on x86) +Reviewed-by: Jiri Kosina +Cc: Borislav Petkov +Cc: "H . Peter Anvin" +Cc: Andy Lutomirski +Cc: Peter Zijlstra +Cc: Jiri Kosina +Cc: Waiman Long +Cc: Andrea Arcangeli +Cc: Jon Masters +Cc: Benjamin Herrenschmidt +Cc: Paul Mackerras +Cc: Michael Ellerman +Cc: linuxppc-dev@lists.ozlabs.org +Cc: Martin Schwidefsky +Cc: Heiko Carstens +Cc: linux-s390@vger.kernel.org +Cc: Catalin Marinas +Cc: Will Deacon +Cc: linux-arm-kernel@lists.infradead.org +Cc: linux-arch@vger.kernel.org +Cc: Greg Kroah-Hartman +Cc: Tyler Hicks +Cc: Linus Torvalds +Cc: Randy Dunlap +Cc: Steven Price +Cc: Phil Auld +Link: https://lkml.kernel.org/r/b07a8ef9b7c5055c3a4637c87d07c296d5016fe0.1555085500.git.jpoimboe@redhat.com +Signed-off-by: Greg Kroah-Hartman +--- + .../admin-guide/kernel-parameters.txt | 24 +++++++++++++++++++ + include/linux/cpu.h | 24 +++++++++++++++++++ + kernel/cpu.c | 15 ++++++++++++ + 3 files changed, 63 insertions(+) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 8f04985d3122..df6d9a7c1724 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2539,6 +2539,30 @@ + in the "bleeding edge" mini2440 support kernel at + http://repo.or.cz/w/linux-2.6/mini2440.git + ++ mitigations= ++ Control optional mitigations for CPU vulnerabilities. ++ This is a set of curated, arch-independent options, each ++ of which is an aggregation of existing arch-specific ++ options. ++ ++ off ++ Disable all optional CPU mitigations. This ++ improves system performance, but it may also ++ expose users to several CPU vulnerabilities. ++ ++ auto (default) ++ Mitigate all CPU vulnerabilities, but leave SMT ++ enabled, even if it's vulnerable. This is for ++ users who don't want to be surprised by SMT ++ getting disabled across kernel upgrades, or who ++ have other ways of avoiding SMT-based attacks. ++ This is the default behavior. ++ ++ auto,nosmt ++ Mitigate all CPU vulnerabilities, disabling SMT ++ if needed. This is for users who always want to ++ be fully mitigated, even if it means losing SMT. ++ + mminit_loglevel= + [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this + parameter allows control of the logging verbosity for +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 3c87ad888ed3..57ae83c4d5f4 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -189,4 +189,28 @@ static inline void cpu_smt_disable(bool force) { } + static inline void cpu_smt_check_topology(void) { } + #endif + ++/* ++ * These are used for a global "mitigations=" cmdline option for toggling ++ * optional CPU mitigations. ++ */ ++enum cpu_mitigations { ++ CPU_MITIGATIONS_OFF, ++ CPU_MITIGATIONS_AUTO, ++ CPU_MITIGATIONS_AUTO_NOSMT, ++}; ++ ++extern enum cpu_mitigations cpu_mitigations; ++ ++/* mitigations=off */ ++static inline bool cpu_mitigations_off(void) ++{ ++ return cpu_mitigations == CPU_MITIGATIONS_OFF; ++} ++ ++/* mitigations=auto,nosmt */ ++static inline bool cpu_mitigations_auto_nosmt(void) ++{ ++ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; ++} ++ + #endif /* _LINUX_CPU_H_ */ +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 6754f3ecfd94..43e741e88691 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -2304,3 +2304,18 @@ void __init boot_cpu_hotplug_init(void) + #endif + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); + } ++ ++enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO; ++ ++static int __init mitigations_parse_cmdline(char *arg) ++{ ++ if (!strcmp(arg, "off")) ++ cpu_mitigations = CPU_MITIGATIONS_OFF; ++ else if (!strcmp(arg, "auto")) ++ cpu_mitigations = CPU_MITIGATIONS_AUTO; ++ else if (!strcmp(arg, "auto,nosmt")) ++ cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; ++ ++ return 0; ++} ++early_param("mitigations", mitigations_parse_cmdline); +-- +2.21.0 + diff --git a/queue-5.0/0021-x86-speculation-Support-mitigations-cmdline-option.patch b/queue-5.0/0021-x86-speculation-Support-mitigations-cmdline-option.patch new file mode 100644 index 00000000000..95ecf712f12 --- /dev/null +++ b/queue-5.0/0021-x86-speculation-Support-mitigations-cmdline-option.patch @@ -0,0 +1,155 @@ +From 1aaa69ec96c73918d45177d9d35680f648d0faed Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Fri, 12 Apr 2019 15:39:29 -0500 +Subject: [PATCH 21/27] x86/speculation: Support 'mitigations=' cmdline option + +commit d68be4c4d31295ff6ae34a8ddfaa4c1a8ff42812 upstream + +Configure x86 runtime CPU speculation bug mitigations in accordance with +the 'mitigations=' cmdline option. This affects Meltdown, Spectre v2, +Speculative Store Bypass, and L1TF. + +The default behavior is unchanged. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Tested-by: Jiri Kosina (on x86) +Reviewed-by: Jiri Kosina +Cc: Borislav Petkov +Cc: "H . Peter Anvin" +Cc: Andy Lutomirski +Cc: Peter Zijlstra +Cc: Jiri Kosina +Cc: Waiman Long +Cc: Andrea Arcangeli +Cc: Jon Masters +Cc: Benjamin Herrenschmidt +Cc: Paul Mackerras +Cc: Michael Ellerman +Cc: linuxppc-dev@lists.ozlabs.org +Cc: Martin Schwidefsky +Cc: Heiko Carstens +Cc: linux-s390@vger.kernel.org +Cc: Catalin Marinas +Cc: Will Deacon +Cc: linux-arm-kernel@lists.infradead.org +Cc: linux-arch@vger.kernel.org +Cc: Greg Kroah-Hartman +Cc: Tyler Hicks +Cc: Linus Torvalds +Cc: Randy Dunlap +Cc: Steven Price +Cc: Phil Auld +Link: https://lkml.kernel.org/r/6616d0ae169308516cfdf5216bedd169f8a8291b.1555085500.git.jpoimboe@redhat.com +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 16 +++++++++++----- + arch/x86/kernel/cpu/bugs.c | 11 +++++++++-- + arch/x86/mm/pti.c | 4 +++- + 3 files changed, 23 insertions(+), 8 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index df6d9a7c1724..59a1181e52b8 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2540,15 +2540,20 @@ + http://repo.or.cz/w/linux-2.6/mini2440.git + + mitigations= +- Control optional mitigations for CPU vulnerabilities. +- This is a set of curated, arch-independent options, each +- of which is an aggregation of existing arch-specific +- options. ++ [X86] Control optional mitigations for CPU ++ vulnerabilities. This is a set of curated, ++ arch-independent options, each of which is an ++ aggregation of existing arch-specific options. + + off + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. ++ Equivalent to: nopti [X86] ++ nospectre_v2 [X86] ++ spectre_v2_user=off [X86] ++ spec_store_bypass_disable=off [X86] ++ l1tf=off [X86] + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT +@@ -2556,12 +2561,13 @@ + users who don't want to be surprised by SMT + getting disabled across kernel upgrades, or who + have other ways of avoiding SMT-based attacks. +- This is the default behavior. ++ Equivalent to: (default behavior) + + auto,nosmt + Mitigate all CPU vulnerabilities, disabling SMT + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. ++ Equivalent to: l1tf=flush,nosmt [X86] + + mminit_loglevel= + [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 04c140ac36af..7c79672234e4 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -506,7 +506,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) + char arg[20]; + int ret, i; + +- if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) ++ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || ++ cpu_mitigations_off()) + return SPECTRE_V2_CMD_NONE; + + ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); +@@ -771,7 +772,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) + char arg[20]; + int ret, i; + +- if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) { ++ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || ++ cpu_mitigations_off()) { + return SPEC_STORE_BYPASS_CMD_NONE; + } else { + ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", +@@ -1095,6 +1097,11 @@ static void __init l1tf_select_mitigation(void) + if (!boot_cpu_has_bug(X86_BUG_L1TF)) + return; + ++ if (cpu_mitigations_off()) ++ l1tf_mitigation = L1TF_MITIGATION_OFF; ++ else if (cpu_mitigations_auto_nosmt()) ++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; ++ + override_cache_bits(&boot_cpu_data); + + switch (l1tf_mitigation) { +diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c +index 4fee5c3003ed..5890f09bfc19 100644 +--- a/arch/x86/mm/pti.c ++++ b/arch/x86/mm/pti.c +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -115,7 +116,8 @@ void __init pti_check_boottime_disable(void) + } + } + +- if (cmdline_find_option_bool(boot_command_line, "nopti")) { ++ if (cmdline_find_option_bool(boot_command_line, "nopti") || ++ cpu_mitigations_off()) { + pti_mode = PTI_FORCE_OFF; + pti_print_if_insecure("disabled on command line."); + return; +-- +2.21.0 + diff --git a/queue-5.0/0022-powerpc-speculation-Support-mitigations-cmdline-opti.patch b/queue-5.0/0022-powerpc-speculation-Support-mitigations-cmdline-opti.patch new file mode 100644 index 00000000000..2d72d6f81da --- /dev/null +++ b/queue-5.0/0022-powerpc-speculation-Support-mitigations-cmdline-opti.patch @@ -0,0 +1,126 @@ +From fd08ca316946b321be1a86b8fcc4ed8decd0c6a2 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Fri, 12 Apr 2019 15:39:30 -0500 +Subject: [PATCH 22/27] powerpc/speculation: Support 'mitigations=' cmdline + option + +commit 782e69efb3dfed6e8360bc612e8c7827a901a8f9 upstream + +Configure powerpc CPU runtime speculation bug mitigations in accordance +with the 'mitigations=' cmdline option. This affects Meltdown, Spectre +v1, Spectre v2, and Speculative Store Bypass. + +The default behavior is unchanged. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Tested-by: Jiri Kosina (on x86) +Reviewed-by: Jiri Kosina +Cc: Borislav Petkov +Cc: "H . Peter Anvin" +Cc: Andy Lutomirski +Cc: Peter Zijlstra +Cc: Jiri Kosina +Cc: Waiman Long +Cc: Andrea Arcangeli +Cc: Jon Masters +Cc: Benjamin Herrenschmidt +Cc: Paul Mackerras +Cc: Michael Ellerman +Cc: linuxppc-dev@lists.ozlabs.org +Cc: Martin Schwidefsky +Cc: Heiko Carstens +Cc: linux-s390@vger.kernel.org +Cc: Catalin Marinas +Cc: Will Deacon +Cc: linux-arm-kernel@lists.infradead.org +Cc: linux-arch@vger.kernel.org +Cc: Greg Kroah-Hartman +Cc: Tyler Hicks +Cc: Linus Torvalds +Cc: Randy Dunlap +Cc: Steven Price +Cc: Phil Auld +Link: https://lkml.kernel.org/r/245a606e1a42a558a310220312d9b6adb9159df6.1555085500.git.jpoimboe@redhat.com +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 9 +++++---- + arch/powerpc/kernel/security.c | 6 +++--- + arch/powerpc/kernel/setup_64.c | 2 +- + 3 files changed, 9 insertions(+), 8 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 59a1181e52b8..ed9ec2ea362d 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2540,7 +2540,7 @@ + http://repo.or.cz/w/linux-2.6/mini2440.git + + mitigations= +- [X86] Control optional mitigations for CPU ++ [X86,PPC] Control optional mitigations for CPU + vulnerabilities. This is a set of curated, + arch-independent options, each of which is an + aggregation of existing arch-specific options. +@@ -2549,10 +2549,11 @@ + Disable all optional CPU mitigations. This + improves system performance, but it may also + expose users to several CPU vulnerabilities. +- Equivalent to: nopti [X86] +- nospectre_v2 [X86] ++ Equivalent to: nopti [X86,PPC] ++ nospectre_v1 [PPC] ++ nospectre_v2 [X86,PPC] + spectre_v2_user=off [X86] +- spec_store_bypass_disable=off [X86] ++ spec_store_bypass_disable=off [X86,PPC] + l1tf=off [X86] + + auto (default) +diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c +index b33bafb8fcea..70568ccbd9fd 100644 +--- a/arch/powerpc/kernel/security.c ++++ b/arch/powerpc/kernel/security.c +@@ -57,7 +57,7 @@ void setup_barrier_nospec(void) + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && + security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); + +- if (!no_nospec) ++ if (!no_nospec && !cpu_mitigations_off()) + enable_barrier_nospec(enable); + } + +@@ -116,7 +116,7 @@ static int __init handle_nospectre_v2(char *p) + early_param("nospectre_v2", handle_nospectre_v2); + void setup_spectre_v2(void) + { +- if (no_spectrev2) ++ if (no_spectrev2 || cpu_mitigations_off()) + do_btb_flush_fixups(); + else + btb_flush_enabled = true; +@@ -300,7 +300,7 @@ void setup_stf_barrier(void) + + stf_enabled_flush_types = type; + +- if (!no_stf_barrier) ++ if (!no_stf_barrier && !cpu_mitigations_off()) + stf_barrier_enable(enable); + } + +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c +index 236c1151a3a7..c7ec27ba8926 100644 +--- a/arch/powerpc/kernel/setup_64.c ++++ b/arch/powerpc/kernel/setup_64.c +@@ -958,7 +958,7 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable) + + enabled_flush_types = types; + +- if (!no_rfi_flush) ++ if (!no_rfi_flush && !cpu_mitigations_off()) + rfi_flush_enable(enable); + } + +-- +2.21.0 + diff --git a/queue-5.0/0023-s390-speculation-Support-mitigations-cmdline-option.patch b/queue-5.0/0023-s390-speculation-Support-mitigations-cmdline-option.patch new file mode 100644 index 00000000000..d37bed95e61 --- /dev/null +++ b/queue-5.0/0023-s390-speculation-Support-mitigations-cmdline-option.patch @@ -0,0 +1,96 @@ +From aa18d76c6cd803424e787d15d6af34d7339c8419 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Fri, 12 Apr 2019 15:39:31 -0500 +Subject: [PATCH 23/27] s390/speculation: Support 'mitigations=' cmdline option + +commit 0336e04a6520bdaefdb0769d2a70084fa52e81ed upstream + +Configure s390 runtime CPU speculation bug mitigations in accordance +with the 'mitigations=' cmdline option. This affects Spectre v1 and +Spectre v2. + +The default behavior is unchanged. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Tested-by: Jiri Kosina (on x86) +Reviewed-by: Jiri Kosina +Cc: Borislav Petkov +Cc: "H . Peter Anvin" +Cc: Andy Lutomirski +Cc: Peter Zijlstra +Cc: Jiri Kosina +Cc: Waiman Long +Cc: Andrea Arcangeli +Cc: Jon Masters +Cc: Benjamin Herrenschmidt +Cc: Paul Mackerras +Cc: Michael Ellerman +Cc: linuxppc-dev@lists.ozlabs.org +Cc: Martin Schwidefsky +Cc: Heiko Carstens +Cc: linux-s390@vger.kernel.org +Cc: Catalin Marinas +Cc: Will Deacon +Cc: linux-arm-kernel@lists.infradead.org +Cc: linux-arch@vger.kernel.org +Cc: Greg Kroah-Hartman +Cc: Tyler Hicks +Cc: Linus Torvalds +Cc: Randy Dunlap +Cc: Steven Price +Cc: Phil Auld +Link: https://lkml.kernel.org/r/e4a161805458a5ec88812aac0307ae3908a030fc.1555085500.git.jpoimboe@redhat.com +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 5 +++-- + arch/s390/kernel/nospec-branch.c | 3 ++- + 2 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index ed9ec2ea362d..9aa3543a8723 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2540,7 +2540,7 @@ + http://repo.or.cz/w/linux-2.6/mini2440.git + + mitigations= +- [X86,PPC] Control optional mitigations for CPU ++ [X86,PPC,S390] Control optional mitigations for CPU + vulnerabilities. This is a set of curated, + arch-independent options, each of which is an + aggregation of existing arch-specific options. +@@ -2551,7 +2551,8 @@ + expose users to several CPU vulnerabilities. + Equivalent to: nopti [X86,PPC] + nospectre_v1 [PPC] +- nospectre_v2 [X86,PPC] ++ nobp=0 [S390] ++ nospectre_v2 [X86,PPC,S390] + spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] + l1tf=off [X86] +diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c +index bdddaae96559..649135cbedd5 100644 +--- a/arch/s390/kernel/nospec-branch.c ++++ b/arch/s390/kernel/nospec-branch.c +@@ -1,6 +1,7 @@ + // SPDX-License-Identifier: GPL-2.0 + #include + #include ++#include + #include + + static int __init nobp_setup_early(char *str) +@@ -58,7 +59,7 @@ early_param("nospectre_v2", nospectre_v2_setup_early); + + void __init nospec_auto_detect(void) + { +- if (test_facility(156)) { ++ if (test_facility(156) || cpu_mitigations_off()) { + /* + * The machine supports etokens. + * Disable expolines and disable nobp. +-- +2.21.0 + diff --git a/queue-5.0/0024-x86-speculation-mds-Add-mitigations-support-for-MDS.patch b/queue-5.0/0024-x86-speculation-mds-Add-mitigations-support-for-MDS.patch new file mode 100644 index 00000000000..5a2a757d547 --- /dev/null +++ b/queue-5.0/0024-x86-speculation-mds-Add-mitigations-support-for-MDS.patch @@ -0,0 +1,63 @@ +From 560cb4a822753dc4c3ca7fce5666f3b54ae71357 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Wed, 17 Apr 2019 16:39:02 -0500 +Subject: [PATCH 24/27] x86/speculation/mds: Add 'mitigations=' support for MDS + +commit 5c14068f87d04adc73ba3f41c2a303d3c3d1fa12 upstream + +Add MDS to the new 'mitigations=' cmdline option. + +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/kernel-parameters.txt | 2 ++ + arch/x86/kernel/cpu/bugs.c | 5 +++-- + 2 files changed, 5 insertions(+), 2 deletions(-) + +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 9aa3543a8723..18cad2b0392a 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -2556,6 +2556,7 @@ + spectre_v2_user=off [X86] + spec_store_bypass_disable=off [X86,PPC] + l1tf=off [X86] ++ mds=off [X86] + + auto (default) + Mitigate all CPU vulnerabilities, but leave SMT +@@ -2570,6 +2571,7 @@ + if needed. This is for users who always want to + be fully mitigated, even if it means losing SMT. + Equivalent to: l1tf=flush,nosmt [X86] ++ mds=full,nosmt [X86] + + mminit_loglevel= + [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 7c79672234e4..1b2ce0c6c4da 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -233,7 +233,7 @@ static const char * const mds_strings[] = { + + static void __init mds_select_mitigation(void) + { +- if (!boot_cpu_has_bug(X86_BUG_MDS)) { ++ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { + mds_mitigation = MDS_MITIGATION_OFF; + return; + } +@@ -244,7 +244,8 @@ static void __init mds_select_mitigation(void) + + static_branch_enable(&mds_user_clear); + +- if (mds_nosmt && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) ++ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && ++ (mds_nosmt || cpu_mitigations_auto_nosmt())) + cpu_smt_disable(false); + } + +-- +2.21.0 + diff --git a/queue-5.0/0025-x86-mds-Add-MDSUM-variant-to-the-MDS-documentation.patch b/queue-5.0/0025-x86-mds-Add-MDSUM-variant-to-the-MDS-documentation.patch new file mode 100644 index 00000000000..37546fee883 --- /dev/null +++ b/queue-5.0/0025-x86-mds-Add-MDSUM-variant-to-the-MDS-documentation.patch @@ -0,0 +1,73 @@ +From d6610164b523f736fc1bb1576767c048c562a09b Mon Sep 17 00:00:00 2001 +From: speck for Pawan Gupta +Date: Mon, 6 May 2019 12:23:50 -0700 +Subject: [PATCH 25/27] x86/mds: Add MDSUM variant to the MDS documentation + +commit e672f8bf71c66253197e503f75c771dd28ada4a0 upstream + +Updated the documentation for a new CVE-2019-11091 Microarchitectural Data +Sampling Uncacheable Memory (MDSUM) which is a variant of +Microarchitectural Data Sampling (MDS). MDS is a family of side channel +attacks on internal buffers in Intel CPUs. + +MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from +memory that takes a fault or assist can leave data in a microarchitectural +structure that may later be observed using one of the same methods used by +MSBDS, MFBDS or MLPDS. There are no new code changes expected for MDSUM. +The existing mitigation for MDS applies to MDSUM as well. + +Signed-off-by: Pawan Gupta +Signed-off-by: Thomas Gleixner +Reviewed-by: Tyler Hicks +Reviewed-by: Jon Masters +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/mds.rst | 5 +++-- + Documentation/x86/mds.rst | 5 +++++ + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst +index 244ab47d1fb3..e0dccf414eca 100644 +--- a/Documentation/admin-guide/hw-vuln/mds.rst ++++ b/Documentation/admin-guide/hw-vuln/mds.rst +@@ -32,11 +32,12 @@ Related CVEs + + The following CVE entries are related to the MDS vulnerability: + +- ============== ===== ============================================== ++ ============== ===== =================================================== + CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling + CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling + CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling +- ============== ===== ============================================== ++ CVE-2019-11091 MDSUM Microarchitectural Data Sampling Uncacheable Memory ++ ============== ===== =================================================== + + Problem + ------- +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 3d6f943f1afb..979945be257a 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -12,6 +12,7 @@ on internal buffers in Intel CPUs. The variants are: + - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126) + - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130) + - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127) ++ - Microarchitectural Data Sampling Uncacheable Memory (MDSUM) (CVE-2019-11091) + + MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a + dependent load (store-to-load forwarding) as an optimization. The forward +@@ -38,6 +39,10 @@ faulting or assisting loads under certain conditions, which again can be + exploited eventually. Load ports are shared between Hyper-Threads so cross + thread leakage is possible. + ++MDSUM is a special case of MSBDS, MFBDS and MLPDS. An uncacheable load from ++memory that takes a fault or assist can leave data in a microarchitectural ++structure that may later be observed using one of the same methods used by ++MSBDS, MFBDS or MLPDS. + + Exposure assumptions + -------------------- +-- +2.21.0 + diff --git a/queue-5.0/0026-Documentation-Correct-the-possible-MDS-sysfs-values.patch b/queue-5.0/0026-Documentation-Correct-the-possible-MDS-sysfs-values.patch new file mode 100644 index 00000000000..653102a1feb --- /dev/null +++ b/queue-5.0/0026-Documentation-Correct-the-possible-MDS-sysfs-values.patch @@ -0,0 +1,66 @@ +From 4a9ca3864343514ff4d6abc832b71566923c0b62 Mon Sep 17 00:00:00 2001 +From: Tyler Hicks +Date: Mon, 6 May 2019 23:52:58 +0000 +Subject: [PATCH 26/27] Documentation: Correct the possible MDS sysfs values + +commit ea01668f9f43021b28b3f4d5ffad50106a1e1301 upstream + +Adjust the last two rows in the table that display possible values when +MDS mitigation is enabled. They both were slightly innacurate. + +In addition, convert the table of possible values and their descriptions +to a list-table. The simple table format uses the top border of equals +signs to determine cell width which resulted in the first column being +far too wide in comparison to the second column that contained the +majority of the text. + +Signed-off-by: Tyler Hicks +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/mds.rst | 29 ++++++++++------------- + 1 file changed, 13 insertions(+), 16 deletions(-) + +diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst +index e0dccf414eca..e3a796c0d3a2 100644 +--- a/Documentation/admin-guide/hw-vuln/mds.rst ++++ b/Documentation/admin-guide/hw-vuln/mds.rst +@@ -95,22 +95,19 @@ mitigations are active. The relevant sysfs file is: + + The possible values in this file are: + +- ========================================= ================================= +- 'Not affected' The processor is not vulnerable +- +- 'Vulnerable' The processor is vulnerable, +- but no mitigation enabled +- +- 'Vulnerable: Clear CPU buffers attempted' The processor is vulnerable but +- microcode is not updated. +- The mitigation is enabled on a +- best effort basis. +- See :ref:`vmwerv` +- +- 'Mitigation: CPU buffer clear' The processor is vulnerable and the +- CPU buffer clearing mitigation is +- enabled. +- ========================================= ================================= ++ .. list-table:: ++ ++ * - 'Not affected' ++ - The processor is not vulnerable ++ * - 'Vulnerable' ++ - The processor is vulnerable, but no mitigation enabled ++ * - 'Vulnerable: Clear CPU buffers attempted, no microcode' ++ - The processor is vulnerable but microcode is not updated. ++ ++ The mitigation is enabled on a best effort basis. See :ref:`vmwerv` ++ * - 'Mitigation: Clear CPU buffers' ++ - The processor is vulnerable and the CPU buffer clearing mitigation is ++ enabled. + + If the processor is vulnerable then the following information is appended + to the above information: +-- +2.21.0 + diff --git a/queue-5.0/0027-x86-speculation-mds-Fix-documentation-typo.patch b/queue-5.0/0027-x86-speculation-mds-Fix-documentation-typo.patch new file mode 100644 index 00000000000..3a550e1829f --- /dev/null +++ b/queue-5.0/0027-x86-speculation-mds-Fix-documentation-typo.patch @@ -0,0 +1,33 @@ +From 91efa25fdb1c52eed467f3e3a3363900e9726758 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Tue, 7 May 2019 15:05:22 -0500 +Subject: [PATCH 27/27] x86/speculation/mds: Fix documentation typo + +commit 95310e348a321b45fb746c176961d4da72344282 upstream + +Fix a minor typo in the MDS documentation: "eanbled" -> "enabled". + +Reported-by: Jeff Bastian +Signed-off-by: Josh Poimboeuf +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/x86/mds.rst | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst +index 979945be257a..534e9baa4e1d 100644 +--- a/Documentation/x86/mds.rst ++++ b/Documentation/x86/mds.rst +@@ -116,7 +116,7 @@ Kernel internal mitigation modes + off Mitigation is disabled. Either the CPU is not affected or + mds=off is supplied on the kernel command line + +- full Mitigation is eanbled. CPU is affected and MD_CLEAR is ++ full Mitigation is enabled. CPU is affected and MD_CLEAR is + advertised in CPUID. + + vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not +-- +2.21.0 +