]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/powerpc/kernel/security.c
powerpc/fsl: Fix spectre_v2 mitigations reporting
[people/arne_f/kernel.git] / arch / powerpc / kernel / security.c
CommitLineData
62dfddfa
ME
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Security related flags and so on.
4//
5// Copyright 2018, Michael Ellerman, IBM Corporation.
6
7#include <linux/kernel.h>
e9b911a9 8#include <linux/debugfs.h>
6f81254e 9#include <linux/device.h>
a8f6001c 10#include <linux/seq_buf.h>
6f81254e 11
98f6dedb
ME
12#include <asm/asm-prototypes.h>
13#include <asm/code-patching.h>
e1217b4a 14#include <asm/debug.h>
62dfddfa 15#include <asm/security_features.h>
f3286f1a 16#include <asm/setup.h>
62dfddfa
ME
17
18
4ec7e5e8 19unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
6f81254e 20
98f6dedb
ME
21enum count_cache_flush_type {
22 COUNT_CACHE_FLUSH_NONE = 0x1,
23 COUNT_CACHE_FLUSH_SW = 0x2,
24 COUNT_CACHE_FLUSH_HW = 0x4,
25};
1eb1ddbd 26static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
98f6dedb 27
73b0f626 28bool barrier_nospec_enabled;
78749d1a 29static bool no_nospec;
f3286f1a
MS
30
31static void enable_barrier_nospec(bool enable)
32{
33 barrier_nospec_enabled = enable;
34 do_barrier_nospec_fixups(enable);
35}
36
e1217b4a
MS
37void setup_barrier_nospec(void)
38{
39 bool enable;
40
41 /*
42 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
43 * But there's a good reason not to. The two flags we check below are
44 * both are enabled by default in the kernel, so if the hcall is not
45 * functional they will be enabled.
46 * On a system where the host firmware has been updated (so the ori
47 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
48 * not been updated, we would like to enable the barrier. Dropping the
49 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
50 * we potentially enable the barrier on systems where the host firmware
51 * is not updated, but that's harmless as it's a no-op.
52 */
53 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
54 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
55
78749d1a
DC
56 if (!no_nospec)
57 enable_barrier_nospec(enable);
e1217b4a
MS
58}
59
78749d1a
DC
60static int __init handle_nospectre_v1(char *p)
61{
62 no_nospec = true;
63
64 return 0;
65}
66early_param("nospectre_v1", handle_nospectre_v1);
67
e1217b4a
MS
68#ifdef CONFIG_DEBUG_FS
69static int barrier_nospec_set(void *data, u64 val)
70{
71 switch (val) {
72 case 0:
73 case 1:
74 break;
75 default:
76 return -EINVAL;
77 }
78
79 if (!!val == !!barrier_nospec_enabled)
80 return 0;
81
82 enable_barrier_nospec(!!val);
83
84 return 0;
85}
86
87static int barrier_nospec_get(void *data, u64 *val)
88{
89 *val = barrier_nospec_enabled ? 1 : 0;
90 return 0;
91}
92
93DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
94 barrier_nospec_get, barrier_nospec_set, "%llu\n");
95
96static __init int barrier_nospec_debugfs_init(void)
97{
98 debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
99 &fops_barrier_nospec);
100 return 0;
101}
102device_initcall(barrier_nospec_debugfs_init);
103#endif /* CONFIG_DEBUG_FS */
104
a96e47d1 105#ifdef CONFIG_PPC_BOOK3S_64
6f81254e
ME
106ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
107{
a8f6001c
ME
108 bool thread_priv;
109
110 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
111
112 if (rfi_flush || thread_priv) {
113 struct seq_buf s;
114 seq_buf_init(&s, buf, PAGE_SIZE - 1);
115
116 seq_buf_printf(&s, "Mitigation: ");
117
118 if (rfi_flush)
119 seq_buf_printf(&s, "RFI Flush");
120
121 if (rfi_flush && thread_priv)
122 seq_buf_printf(&s, ", ");
123
124 if (thread_priv)
125 seq_buf_printf(&s, "L1D private per thread");
126
127 seq_buf_printf(&s, "\n");
128
129 return s.len;
130 }
131
132 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
133 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
134 return sprintf(buf, "Not affected\n");
6f81254e
ME
135
136 return sprintf(buf, "Vulnerable\n");
137}
a96e47d1 138#endif
ed50e032
ME
139
140ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
141{
ee617f58
ME
142 struct seq_buf s;
143
144 seq_buf_init(&s, buf, PAGE_SIZE - 1);
ed50e032 145
ee617f58
ME
146 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
147 if (barrier_nospec_enabled)
148 seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
149 else
150 seq_buf_printf(&s, "Vulnerable");
f7493c98 151
ee617f58
ME
152 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
153 seq_buf_printf(&s, ", ori31 speculation barrier enabled");
154
155 seq_buf_printf(&s, "\n");
156 } else
157 seq_buf_printf(&s, "Not affected\n");
158
159 return s.len;
ed50e032 160}
1dc0f1f1
ME
161
162ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
163{
1dc0f1f1 164 struct seq_buf s;
ee617f58 165 bool bcs, ccd;
1dc0f1f1
ME
166
167 seq_buf_init(&s, buf, PAGE_SIZE - 1);
168
169 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
170 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
1dc0f1f1 171
98f6dedb
ME
172 if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
173 bool comma = false;
1dc0f1f1
ME
174 seq_buf_printf(&s, "Mitigation: ");
175
98f6dedb 176 if (bcs) {
1dc0f1f1 177 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
98f6dedb
ME
178 comma = true;
179 }
180
181 if (ccd) {
182 if (comma)
183 seq_buf_printf(&s, ", ");
184 seq_buf_printf(&s, "Indirect branch cache disabled");
185 comma = true;
186 }
1dc0f1f1 187
98f6dedb 188 if (comma)
1dc0f1f1
ME
189 seq_buf_printf(&s, ", ");
190
98f6dedb
ME
191 seq_buf_printf(&s, "Software count cache flush");
192
193 if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
194 seq_buf_printf(&s, "(hardware accelerated)");
1dc0f1f1
ME
195 } else
196 seq_buf_printf(&s, "Vulnerable");
197
1dc0f1f1
ME
198 seq_buf_printf(&s, "\n");
199
200 return s.len;
201}
e9b911a9 202
1ec00d22 203#ifdef CONFIG_PPC_BOOK3S_64
e9b911a9
NP
204/*
205 * Store-forwarding barrier support.
206 */
207
208static enum stf_barrier_type stf_enabled_flush_types;
209static bool no_stf_barrier;
210bool stf_barrier;
211
212static int __init handle_no_stf_barrier(char *p)
213{
214 pr_info("stf-barrier: disabled on command line.");
215 no_stf_barrier = true;
216 return 0;
217}
218
219early_param("no_stf_barrier", handle_no_stf_barrier);
220
221/* This is the generic flag used by other architectures */
222static int __init handle_ssbd(char *p)
223{
224 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) {
225 /* Until firmware tells us, we have the barrier with auto */
226 return 0;
227 } else if (strncmp(p, "off", 3) == 0) {
228 handle_no_stf_barrier(NULL);
229 return 0;
230 } else
231 return 1;
232
233 return 0;
234}
235early_param("spec_store_bypass_disable", handle_ssbd);
236
237/* This is the generic flag used by other architectures */
238static int __init handle_no_ssbd(char *p)
239{
240 handle_no_stf_barrier(NULL);
241 return 0;
242}
243early_param("nospec_store_bypass_disable", handle_no_ssbd);
244
245static void stf_barrier_enable(bool enable)
246{
247 if (enable)
248 do_stf_barrier_fixups(stf_enabled_flush_types);
249 else
250 do_stf_barrier_fixups(STF_BARRIER_NONE);
251
252 stf_barrier = enable;
253}
254
255void setup_stf_barrier(void)
256{
257 enum stf_barrier_type type;
258 bool enable, hv;
259
260 hv = cpu_has_feature(CPU_FTR_HVMODE);
261
262 /* Default to fallback in case fw-features are not available */
263 if (cpu_has_feature(CPU_FTR_ARCH_300))
264 type = STF_BARRIER_EIEIO;
265 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
266 type = STF_BARRIER_SYNC_ORI;
267 else if (cpu_has_feature(CPU_FTR_ARCH_206))
268 type = STF_BARRIER_FALLBACK;
269 else
270 type = STF_BARRIER_NONE;
271
272 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
273 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
274 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
275
276 if (type == STF_BARRIER_FALLBACK) {
277 pr_info("stf-barrier: fallback barrier available\n");
278 } else if (type == STF_BARRIER_SYNC_ORI) {
279 pr_info("stf-barrier: hwsync barrier available\n");
280 } else if (type == STF_BARRIER_EIEIO) {
281 pr_info("stf-barrier: eieio barrier available\n");
282 }
283
284 stf_enabled_flush_types = type;
285
286 if (!no_stf_barrier)
287 stf_barrier_enable(enable);
288}
289
290ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
291{
292 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) {
293 const char *type;
294 switch (stf_enabled_flush_types) {
295 case STF_BARRIER_EIEIO:
296 type = "eieio";
297 break;
298 case STF_BARRIER_SYNC_ORI:
299 type = "hwsync";
300 break;
301 case STF_BARRIER_FALLBACK:
302 type = "fallback";
303 break;
304 default:
305 type = "unknown";
306 }
307 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type);
308 }
309
310 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
311 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
312 return sprintf(buf, "Not affected\n");
313
314 return sprintf(buf, "Vulnerable\n");
315}
316
317#ifdef CONFIG_DEBUG_FS
318static int stf_barrier_set(void *data, u64 val)
319{
320 bool enable;
321
322 if (val == 1)
323 enable = true;
324 else if (val == 0)
325 enable = false;
326 else
327 return -EINVAL;
328
329 /* Only do anything if we're changing state */
330 if (enable != stf_barrier)
331 stf_barrier_enable(enable);
332
333 return 0;
334}
335
336static int stf_barrier_get(void *data, u64 *val)
337{
338 *val = stf_barrier ? 1 : 0;
339 return 0;
340}
341
342DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n");
343
344static __init int stf_barrier_debugfs_init(void)
345{
346 debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier);
347 return 0;
348}
349device_initcall(stf_barrier_debugfs_init);
350#endif /* CONFIG_DEBUG_FS */
98f6dedb
ME
351
352static void toggle_count_cache_flush(bool enable)
353{
354 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
355 patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
356 count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
357 pr_info("count-cache-flush: software flush disabled.\n");
358 return;
359 }
360
361 patch_branch_site(&patch__call_flush_count_cache,
362 (u64)&flush_count_cache, BRANCH_SET_LINK);
363
364 if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
365 count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
366 pr_info("count-cache-flush: full software flush sequence enabled.\n");
367 return;
368 }
369
370 patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
371 count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
372 pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
373}
374
375void setup_count_cache_flush(void)
376{
377 toggle_count_cache_flush(true);
378}
379
380#ifdef CONFIG_DEBUG_FS
381static int count_cache_flush_set(void *data, u64 val)
382{
383 bool enable;
384
385 if (val == 1)
386 enable = true;
387 else if (val == 0)
388 enable = false;
389 else
390 return -EINVAL;
391
392 toggle_count_cache_flush(enable);
393
394 return 0;
395}
396
397static int count_cache_flush_get(void *data, u64 *val)
398{
399 if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
400 *val = 0;
401 else
402 *val = 1;
403
404 return 0;
405}
406
407DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
408 count_cache_flush_set, "%llu\n");
409
410static __init int count_cache_flush_debugfs_init(void)
411{
412 debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
413 NULL, &fops_count_cache_flush);
414 return 0;
415}
416device_initcall(count_cache_flush_debugfs_init);
417#endif /* CONFIG_DEBUG_FS */
1ec00d22 418#endif /* CONFIG_PPC_BOOK3S_64 */