]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.4/x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
4.4-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.4 / x86-speculation-enable-cross-hyperthread-spectre-v2-stibp-mitigation.patch
1 From foo@baz Tue 14 May 2019 08:29:35 PM CEST
2 From: Jiri Kosina <jkosina@suse.cz>
3 Date: Tue, 25 Sep 2018 14:38:55 +0200
4 Subject: x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation
5
6 From: Jiri Kosina <jkosina@suse.cz>
7
8 commit 53c613fe6349994f023245519265999eed75957f upstream.
9
10 STIBP is a feature provided by certain Intel ucodes / CPUs. This feature
11 (once enabled) prevents cross-hyperthread control of decisions made by
12 indirect branch predictors.
13
14 Enable this feature if
15
16 - the CPU is vulnerable to spectre v2
17 - the CPU supports SMT and has SMT siblings online
18 - spectre_v2 mitigation autoselection is enabled (default)
19
20 After some previous discussion, this leaves STIBP on all the time, as wrmsr
21 on crossing kernel boundary is a no-no. This could perhaps later be a bit
22 more optimized (like disabling it in NOHZ, experiment with disabling it in
23 idle, etc) if needed.
24
25 Note that the synchronization of the mask manipulation via newly added
26 spec_ctrl_mutex is currently not strictly needed, as the only updater is
27 already being serialized by cpu_add_remove_lock, but let's make this a
28 little bit more future-proof.
29
30 Signed-off-by: Jiri Kosina <jkosina@suse.cz>
31 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
32 Cc: Peter Zijlstra <peterz@infradead.org>
33 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
34 Cc: Andrea Arcangeli <aarcange@redhat.com>
35 Cc: "WoodhouseDavid" <dwmw@amazon.co.uk>
36 Cc: Andi Kleen <ak@linux.intel.com>
37 Cc: Tim Chen <tim.c.chen@linux.intel.com>
38 Cc: "SchauflerCasey" <casey.schaufler@intel.com>
39 Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@cbobk.fhfr.pm
40 [bwh: Backported to 4.4:
41 - Don't add any calls to arch_smt_update() yet. They will be introduced by
42 "x86/speculation: Rework SMT state change".
43 - Use IS_ENABLED(CONFIG_SMP) instead of cpu_smt_control for now. This
44 will be fixed by "x86/speculation: Rework SMT state change".]
45 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
46 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
47 ---
48 arch/x86/kernel/cpu/bugs.c | 55 ++++++++++++++++++++++++++++++++++++++++-----
49 1 file changed, 50 insertions(+), 5 deletions(-)
50
51 --- a/arch/x86/kernel/cpu/bugs.c
52 +++ b/arch/x86/kernel/cpu/bugs.c
53 @@ -32,12 +32,10 @@ static void __init spectre_v2_select_mit
54 static void __init ssb_select_mitigation(void);
55 static void __init l1tf_select_mitigation(void);
56
57 -/*
58 - * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
59 - * writes to SPEC_CTRL contain whatever reserved bits have been set.
60 - */
61 +/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
62 u64 x86_spec_ctrl_base;
63 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
64 +static DEFINE_MUTEX(spec_ctrl_mutex);
65
66 /*
67 * The vendor and possibly platform specific bits which can be modified in
68 @@ -315,6 +313,46 @@ static enum spectre_v2_mitigation_cmd __
69 return cmd;
70 }
71
72 +static bool stibp_needed(void)
73 +{
74 + if (spectre_v2_enabled == SPECTRE_V2_NONE)
75 + return false;
76 +
77 + if (!boot_cpu_has(X86_FEATURE_STIBP))
78 + return false;
79 +
80 + return true;
81 +}
82 +
83 +static void update_stibp_msr(void *info)
84 +{
85 + wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
86 +}
87 +
88 +void arch_smt_update(void)
89 +{
90 + u64 mask;
91 +
92 + if (!stibp_needed())
93 + return;
94 +
95 + mutex_lock(&spec_ctrl_mutex);
96 + mask = x86_spec_ctrl_base;
97 + if (IS_ENABLED(CONFIG_SMP))
98 + mask |= SPEC_CTRL_STIBP;
99 + else
100 + mask &= ~SPEC_CTRL_STIBP;
101 +
102 + if (mask != x86_spec_ctrl_base) {
103 + pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
104 + IS_ENABLED(CONFIG_SMP) ?
105 + "Enabling" : "Disabling");
106 + x86_spec_ctrl_base = mask;
107 + on_each_cpu(update_stibp_msr, NULL, 1);
108 + }
109 + mutex_unlock(&spec_ctrl_mutex);
110 +}
111 +
112 static void __init spectre_v2_select_mitigation(void)
113 {
114 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
115 @@ -414,6 +452,9 @@ specv2_set_mode:
116 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
117 pr_info("Enabling Restricted Speculation for firmware calls\n");
118 }
119 +
120 + /* Enable STIBP if appropriate */
121 + arch_smt_update();
122 }
123
124 #undef pr_fmt
125 @@ -722,6 +763,8 @@ static void __init l1tf_select_mitigatio
126 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
127 char *buf, unsigned int bug)
128 {
129 + int ret;
130 +
131 if (!boot_cpu_has_bug(bug))
132 return sprintf(buf, "Not affected\n");
133
134 @@ -736,10 +779,12 @@ static ssize_t cpu_show_common(struct de
135 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
136
137 case X86_BUG_SPECTRE_V2:
138 - return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
139 + ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
140 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
141 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
142 + (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
143 spectre_v2_module_string());
144 + return ret;
145
146 case X86_BUG_SPEC_STORE_BYPASS:
147 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);