]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.9/0052-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch
901f70ce0f24c5bc9da8a304c768902ce2d959df
[thirdparty/kernel/stable-queue.git] / queue-4.9 / 0052-x86-speculation-mds-Add-basic-bug-infrastructure-for.patch
1 From 8d09dc7fb3e6fce051a3a62c351ed5e40a64b81d Mon Sep 17 00:00:00 2001
2 From: Andi Kleen <ak@linux.intel.com>
3 Date: Fri, 18 Jan 2019 16:50:16 -0800
4 Subject: [PATCH 52/76] x86/speculation/mds: Add basic bug infrastructure for
5 MDS
6
7 commit ed5194c2732c8084af9fd159c146ea92bf137128 upstream.
8
9 Microarchitectural Data Sampling (MDS), is a class of side channel attacks
10 on internal buffers in Intel CPUs. The variants are:
11
12 - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
13 - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
14 - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
15
16 MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
17 dependent load (store-to-load forwarding) as an optimization. The forward
18 can also happen to a faulting or assisting load operation for a different
19 memory address, which can be exploited under certain conditions. Store
20 buffers are partitioned between Hyper-Threads so cross thread forwarding is
21 not possible. But if a thread enters or exits a sleep state the store
22 buffer is repartitioned which can expose data from one thread to the other.
23
24 MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
25 L1 miss situations and to hold data which is returned or sent in response
26 to a memory or I/O operation. Fill buffers can forward data to a load
27 operation and also write data to the cache. When the fill buffer is
28 deallocated it can retain the stale data of the preceding operations which
29 can then be forwarded to a faulting or assisting load operation, which can
30 be exploited under certain conditions. Fill buffers are shared between
31 Hyper-Threads so cross thread leakage is possible.
32
33 MLDPS leaks Load Port Data. Load ports are used to perform load operations
34 from memory or I/O. The received data is then forwarded to the register
35 file or a subsequent operation. In some implementations the Load Port can
36 contain stale data from a previous operation which can be forwarded to
37 faulting or assisting loads under certain conditions, which again can be
38 exploited eventually. Load ports are shared between Hyper-Threads so cross
39 thread leakage is possible.
40
41 All variants have the same mitigation for single CPU thread case (SMT off),
42 so the kernel can treat them as one MDS issue.
43
44 Add the basic infrastructure to detect if the current CPU is affected by
45 MDS.
46
47 [ tglx: Rewrote changelog ]
48
49 Signed-off-by: Andi Kleen <ak@linux.intel.com>
50 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
51 Reviewed-by: Borislav Petkov <bp@suse.de>
52 Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
53 Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
54 Reviewed-by: Jon Masters <jcm@redhat.com>
55 Tested-by: Jon Masters <jcm@redhat.com>
56 [bwh: Backported to 4.9: adjust context, indentation]
57 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
58 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
59 ---
60 arch/x86/include/asm/cpufeatures.h | 2 ++
61 arch/x86/include/asm/msr-index.h | 5 +++++
62 arch/x86/kernel/cpu/common.c | 23 +++++++++++++++--------
63 3 files changed, 22 insertions(+), 8 deletions(-)
64
65 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
66 index 234d74186046..c7510dbdf238 100644
67 --- a/arch/x86/include/asm/cpufeatures.h
68 +++ b/arch/x86/include/asm/cpufeatures.h
69 @@ -317,6 +317,7 @@
70 #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
71 #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
72 #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
73 +#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
74 #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
75 #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
76 #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
77 @@ -354,5 +355,6 @@
78 #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
79 #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
80 #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
81 +#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
82
83 #endif /* _ASM_X86_CPUFEATURES_H */
84 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
85 index dc582c10586c..38f94d07920d 100644
86 --- a/arch/x86/include/asm/msr-index.h
87 +++ b/arch/x86/include/asm/msr-index.h
88 @@ -72,6 +72,11 @@
89 * attack, so no Speculative Store Bypass
90 * control required.
91 */
92 +#define ARCH_CAP_MDS_NO BIT(5) /*
93 + * Not susceptible to
94 + * Microarchitectural Data
95 + * Sampling (MDS) vulnerabilities.
96 + */
97
98 #define MSR_IA32_FLUSH_CMD 0x0000010b
99 #define L1D_FLUSH BIT(0) /*
100 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
101 index e48e2463720c..9513280b84fa 100644
102 --- a/arch/x86/kernel/cpu/common.c
103 +++ b/arch/x86/kernel/cpu/common.c
104 @@ -895,6 +895,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
105 #define NO_MELTDOWN BIT(1)
106 #define NO_SSB BIT(2)
107 #define NO_L1TF BIT(3)
108 +#define NO_MDS BIT(4)
109
110 #define VULNWL(_vendor, _family, _model, _whitelist) \
111 { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
112 @@ -911,6 +912,7 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
113 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
114 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
115
116 + /* Intel Family 6 */
117 VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
118 VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
119 VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
120 @@ -927,17 +929,19 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
121 VULNWL_INTEL(CORE_YONAH, NO_SSB),
122
123 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF),
124 - VULNWL_INTEL(ATOM_GOLDMONT, NO_L1TF),
125 - VULNWL_INTEL(ATOM_GOLDMONT_X, NO_L1TF),
126 - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_L1TF),
127
128 - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF),
129 - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF),
130 - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF),
131 - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF),
132 + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
133 + VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
134 + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
135 +
136 + /* AMD Family 0xf - 0x12 */
137 + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
138 + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
139 + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
140 + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
141
142 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
143 - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF),
144 + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
145 {}
146 };
147
148 @@ -968,6 +972,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
149 if (ia32_cap & ARCH_CAP_IBRS_ALL)
150 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
151
152 + if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO))
153 + setup_force_cpu_bug(X86_BUG_MDS);
154 +
155 if (cpu_matches(NO_MELTDOWN))
156 return;
157
158 --
159 2.21.0
160