]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.18.6/x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.18.6 / x86-speculation-l1tf-increase-l1tf-memory-limit-for-nehalem.patch
CommitLineData
5eb24753
GKH
1From cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 Mon Sep 17 00:00:00 2001
2From: Andi Kleen <ak@linux.intel.com>
3Date: Fri, 24 Aug 2018 10:03:50 -0700
4Subject: x86/speculation/l1tf: Increase l1tf memory limit for Nehalem+
5
6From: Andi Kleen <ak@linux.intel.com>
7
8commit cc51e5428ea54f575d49cfcede1d4cb3a72b4ec4 upstream.
9
10On Nehalem and newer core CPUs the CPU cache internally uses 44 bits
11physical address space. The L1TF workaround is limited by this internal
12cache address width, and needs to have one bit free there for the
13mitigation to work.
14
15Older client systems report only 36bit physical address space so the range
16check decides that L1TF is not mitigated for a 36bit phys/32GB system with
17some memory holes.
18
19But since these actually have the larger internal cache width this warning
20is bogus because it would only really be needed if the system had more than
2143bits of memory.
22
23Add a new internal x86_cache_bits field. Normally it is the same as the
24physical bits field reported by CPUID, but for Nehalem and newerforce it to
25be at least 44bits.
26
27Change the L1TF memory size warning to use the new cache_bits field to
28avoid bogus warnings and remove the bogus comment about memory size.
29
30Fixes: 17dbca119312 ("x86/speculation/l1tf: Add sysfs reporting for l1tf")
31Reported-by: George Anchev <studio@anchev.net>
32Reported-by: Christopher Snowhill <kode54@gmail.com>
33Signed-off-by: Andi Kleen <ak@linux.intel.com>
34Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
35Cc: x86@kernel.org
36Cc: linux-kernel@vger.kernel.org
37Cc: Michael Hocko <mhocko@suse.com>
38Cc: vbabka@suse.cz
39Cc: stable@vger.kernel.org
40Link: https://lkml.kernel.org/r/20180824170351.34874-1-andi@firstfloor.org
41Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
42
43---
44 arch/x86/include/asm/processor.h | 4 ++-
45 arch/x86/kernel/cpu/bugs.c | 46 ++++++++++++++++++++++++++++++++++-----
46 arch/x86/kernel/cpu/common.c | 1
47 3 files changed, 45 insertions(+), 6 deletions(-)
48
49--- a/arch/x86/include/asm/processor.h
50+++ b/arch/x86/include/asm/processor.h
51@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
52 /* Index into per_cpu list: */
53 u16 cpu_index;
54 u32 microcode;
55+ /* Address space bits used by the cache internally */
56+ u8 x86_cache_bits;
57 unsigned initialized : 1;
58 } __randomize_layout;
59
60@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x8
61
62 static inline unsigned long long l1tf_pfn_limit(void)
63 {
64- return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
65+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
66 }
67
68 extern void early_cpu_init(void);
69--- a/arch/x86/kernel/cpu/bugs.c
70+++ b/arch/x86/kernel/cpu/bugs.c
71@@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
72 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
73 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
74
75+/*
76+ * These CPUs all support 44bits physical address space internally in the
77+ * cache but CPUID can report a smaller number of physical address bits.
78+ *
79+ * The L1TF mitigation uses the top most address bit for the inversion of
80+ * non present PTEs. When the installed memory reaches into the top most
81+ * address bit due to memory holes, which has been observed on machines
82+ * which report 36bits physical address bits and have 32G RAM installed,
83+ * then the mitigation range check in l1tf_select_mitigation() triggers.
84+ * This is a false positive because the mitigation is still possible due to
85+ * the fact that the cache uses 44bit internally. Use the cache bits
86+ * instead of the reported physical bits and adjust them on the affected
87+ * machines to 44bit if the reported bits are less than 44.
88+ */
89+static void override_cache_bits(struct cpuinfo_x86 *c)
90+{
91+ if (c->x86 != 6)
92+ return;
93+
94+ switch (c->x86_model) {
95+ case INTEL_FAM6_NEHALEM:
96+ case INTEL_FAM6_WESTMERE:
97+ case INTEL_FAM6_SANDYBRIDGE:
98+ case INTEL_FAM6_IVYBRIDGE:
99+ case INTEL_FAM6_HASWELL_CORE:
100+ case INTEL_FAM6_HASWELL_ULT:
101+ case INTEL_FAM6_HASWELL_GT3E:
102+ case INTEL_FAM6_BROADWELL_CORE:
103+ case INTEL_FAM6_BROADWELL_GT3E:
104+ case INTEL_FAM6_SKYLAKE_MOBILE:
105+ case INTEL_FAM6_SKYLAKE_DESKTOP:
106+ case INTEL_FAM6_KABYLAKE_MOBILE:
107+ case INTEL_FAM6_KABYLAKE_DESKTOP:
108+ if (c->x86_cache_bits < 44)
109+ c->x86_cache_bits = 44;
110+ break;
111+ }
112+}
113+
114 static void __init l1tf_select_mitigation(void)
115 {
116 u64 half_pa;
117@@ -659,6 +698,8 @@ static void __init l1tf_select_mitigatio
118 if (!boot_cpu_has_bug(X86_BUG_L1TF))
119 return;
120
121+ override_cache_bits(&boot_cpu_data);
122+
123 switch (l1tf_mitigation) {
124 case L1TF_MITIGATION_OFF:
125 case L1TF_MITIGATION_FLUSH_NOWARN:
126@@ -678,11 +719,6 @@ static void __init l1tf_select_mitigatio
127 return;
128 #endif
129
130- /*
131- * This is extremely unlikely to happen because almost all
132- * systems have far more MAX_PA/2 than RAM can be fit into
133- * DIMM slots.
134- */
135 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
136 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
137 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
138--- a/arch/x86/kernel/cpu/common.c
139+++ b/arch/x86/kernel/cpu/common.c
140@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinf
141 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
142 c->x86_phys_bits = 36;
143 #endif
144+ c->x86_cache_bits = c->x86_phys_bits;
145 }
146
147 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)