]>
Commit | Line | Data |
---|---|---|
e2ba26db ME |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | // | |
3 | // Security related flags and so on. | |
4 | // | |
5 | // Copyright 2018, Michael Ellerman, IBM Corporation. | |
6 | ||
7 | #include <linux/kernel.h> | |
ae8afdf6 | 8 | #include <linux/device.h> |
d71a3e0a | 9 | #include <linux/seq_buf.h> |
ae8afdf6 | 10 | |
3084902a | 11 | #include <asm/debugfs.h> |
e2ba26db | 12 | #include <asm/security_features.h> |
f1a6390a | 13 | #include <asm/setup.h> |
e2ba26db ME |
14 | |
15 | ||
f092a180 | 16 | unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; |
ae8afdf6 | 17 | |
31c3a1e8 | 18 | bool barrier_nospec_enabled; |
f1a6390a MS |
19 | |
20 | static void enable_barrier_nospec(bool enable) | |
21 | { | |
22 | barrier_nospec_enabled = enable; | |
23 | do_barrier_nospec_fixups(enable); | |
24 | } | |
25 | ||
ae082ed4 MS |
26 | void setup_barrier_nospec(void) |
27 | { | |
28 | bool enable; | |
29 | ||
30 | /* | |
31 | * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. | |
32 | * But there's a good reason not to. The two flags we check below are | |
33 | * both are enabled by default in the kernel, so if the hcall is not | |
34 | * functional they will be enabled. | |
35 | * On a system where the host firmware has been updated (so the ori | |
36 | * functions as a barrier), but on which the hypervisor (KVM/Qemu) has | |
37 | * not been updated, we would like to enable the barrier. Dropping the | |
38 | * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is | |
39 | * we potentially enable the barrier on systems where the host firmware | |
40 | * is not updated, but that's harmless as it's a no-op. | |
41 | */ | |
42 | enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && | |
43 | security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); | |
44 | ||
45 | enable_barrier_nospec(enable); | |
46 | } | |
47 | ||
48 | #ifdef CONFIG_DEBUG_FS | |
49 | static int barrier_nospec_set(void *data, u64 val) | |
50 | { | |
51 | switch (val) { | |
52 | case 0: | |
53 | case 1: | |
54 | break; | |
55 | default: | |
56 | return -EINVAL; | |
57 | } | |
58 | ||
59 | if (!!val == !!barrier_nospec_enabled) | |
60 | return 0; | |
61 | ||
62 | enable_barrier_nospec(!!val); | |
63 | ||
64 | return 0; | |
65 | } | |
66 | ||
67 | static int barrier_nospec_get(void *data, u64 *val) | |
68 | { | |
69 | *val = barrier_nospec_enabled ? 1 : 0; | |
70 | return 0; | |
71 | } | |
72 | ||
73 | DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec, | |
74 | barrier_nospec_get, barrier_nospec_set, "%llu\n"); | |
75 | ||
76 | static __init int barrier_nospec_debugfs_init(void) | |
77 | { | |
78 | debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL, | |
79 | &fops_barrier_nospec); | |
80 | return 0; | |
81 | } | |
82 | device_initcall(barrier_nospec_debugfs_init); | |
83 | #endif /* CONFIG_DEBUG_FS */ | |
84 | ||
ae8afdf6 ME |
85 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
86 | { | |
d71a3e0a ME |
87 | bool thread_priv; |
88 | ||
89 | thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); | |
90 | ||
91 | if (rfi_flush || thread_priv) { | |
92 | struct seq_buf s; | |
93 | seq_buf_init(&s, buf, PAGE_SIZE - 1); | |
94 | ||
95 | seq_buf_printf(&s, "Mitigation: "); | |
96 | ||
97 | if (rfi_flush) | |
98 | seq_buf_printf(&s, "RFI Flush"); | |
99 | ||
100 | if (rfi_flush && thread_priv) | |
101 | seq_buf_printf(&s, ", "); | |
102 | ||
103 | if (thread_priv) | |
104 | seq_buf_printf(&s, "L1D private per thread"); | |
105 | ||
106 | seq_buf_printf(&s, "\n"); | |
107 | ||
108 | return s.len; | |
109 | } | |
110 | ||
111 | if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && | |
112 | !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) | |
113 | return sprintf(buf, "Not affected\n"); | |
ae8afdf6 ME |
114 | |
115 | return sprintf(buf, "Vulnerable\n"); | |
116 | } | |
6e77fead ME |
117 | |
118 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) | |
119 | { | |
120 | if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) | |
121 | return sprintf(buf, "Not affected\n"); | |
122 | ||
123 | return sprintf(buf, "Vulnerable\n"); | |
124 | } | |
046e9ada ME |
125 | |
126 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) | |
127 | { | |
128 | bool bcs, ccd, ori; | |
129 | struct seq_buf s; | |
130 | ||
131 | seq_buf_init(&s, buf, PAGE_SIZE - 1); | |
132 | ||
133 | bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); | |
134 | ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); | |
135 | ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31); | |
136 | ||
137 | if (bcs || ccd) { | |
138 | seq_buf_printf(&s, "Mitigation: "); | |
139 | ||
140 | if (bcs) | |
141 | seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); | |
142 | ||
143 | if (bcs && ccd) | |
144 | seq_buf_printf(&s, ", "); | |
145 | ||
146 | if (ccd) | |
147 | seq_buf_printf(&s, "Indirect branch cache disabled"); | |
148 | } else | |
149 | seq_buf_printf(&s, "Vulnerable"); | |
150 | ||
151 | if (ori) | |
152 | seq_buf_printf(&s, ", ori31 speculation barrier enabled"); | |
153 | ||
154 | seq_buf_printf(&s, "\n"); | |
155 | ||
156 | return s.len; | |
157 | } | |
3084902a NP |
158 | |
159 | /* | |
160 | * Store-forwarding barrier support. | |
161 | */ | |
162 | ||
163 | static enum stf_barrier_type stf_enabled_flush_types; | |
164 | static bool no_stf_barrier; | |
165 | bool stf_barrier; | |
166 | ||
167 | static int __init handle_no_stf_barrier(char *p) | |
168 | { | |
169 | pr_info("stf-barrier: disabled on command line."); | |
170 | no_stf_barrier = true; | |
171 | return 0; | |
172 | } | |
173 | ||
174 | early_param("no_stf_barrier", handle_no_stf_barrier); | |
175 | ||
176 | /* This is the generic flag used by other architectures */ | |
177 | static int __init handle_ssbd(char *p) | |
178 | { | |
179 | if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { | |
180 | /* Until firmware tells us, we have the barrier with auto */ | |
181 | return 0; | |
182 | } else if (strncmp(p, "off", 3) == 0) { | |
183 | handle_no_stf_barrier(NULL); | |
184 | return 0; | |
185 | } else | |
186 | return 1; | |
187 | ||
188 | return 0; | |
189 | } | |
190 | early_param("spec_store_bypass_disable", handle_ssbd); | |
191 | ||
192 | /* This is the generic flag used by other architectures */ | |
193 | static int __init handle_no_ssbd(char *p) | |
194 | { | |
195 | handle_no_stf_barrier(NULL); | |
196 | return 0; | |
197 | } | |
198 | early_param("nospec_store_bypass_disable", handle_no_ssbd); | |
199 | ||
200 | static void stf_barrier_enable(bool enable) | |
201 | { | |
202 | if (enable) | |
203 | do_stf_barrier_fixups(stf_enabled_flush_types); | |
204 | else | |
205 | do_stf_barrier_fixups(STF_BARRIER_NONE); | |
206 | ||
207 | stf_barrier = enable; | |
208 | } | |
209 | ||
210 | void setup_stf_barrier(void) | |
211 | { | |
212 | enum stf_barrier_type type; | |
213 | bool enable, hv; | |
214 | ||
215 | hv = cpu_has_feature(CPU_FTR_HVMODE); | |
216 | ||
217 | /* Default to fallback in case fw-features are not available */ | |
218 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
219 | type = STF_BARRIER_EIEIO; | |
220 | else if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
221 | type = STF_BARRIER_SYNC_ORI; | |
222 | else if (cpu_has_feature(CPU_FTR_ARCH_206)) | |
223 | type = STF_BARRIER_FALLBACK; | |
224 | else | |
225 | type = STF_BARRIER_NONE; | |
226 | ||
227 | enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && | |
228 | (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || | |
229 | (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); | |
230 | ||
231 | if (type == STF_BARRIER_FALLBACK) { | |
232 | pr_info("stf-barrier: fallback barrier available\n"); | |
233 | } else if (type == STF_BARRIER_SYNC_ORI) { | |
234 | pr_info("stf-barrier: hwsync barrier available\n"); | |
235 | } else if (type == STF_BARRIER_EIEIO) { | |
236 | pr_info("stf-barrier: eieio barrier available\n"); | |
237 | } | |
238 | ||
239 | stf_enabled_flush_types = type; | |
240 | ||
241 | if (!no_stf_barrier) | |
242 | stf_barrier_enable(enable); | |
243 | } | |
244 | ||
245 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) | |
246 | { | |
247 | if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { | |
248 | const char *type; | |
249 | switch (stf_enabled_flush_types) { | |
250 | case STF_BARRIER_EIEIO: | |
251 | type = "eieio"; | |
252 | break; | |
253 | case STF_BARRIER_SYNC_ORI: | |
254 | type = "hwsync"; | |
255 | break; | |
256 | case STF_BARRIER_FALLBACK: | |
257 | type = "fallback"; | |
258 | break; | |
259 | default: | |
260 | type = "unknown"; | |
261 | } | |
262 | return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); | |
263 | } | |
264 | ||
265 | if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && | |
266 | !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) | |
267 | return sprintf(buf, "Not affected\n"); | |
268 | ||
269 | return sprintf(buf, "Vulnerable\n"); | |
270 | } | |
271 | ||
272 | #ifdef CONFIG_DEBUG_FS | |
273 | static int stf_barrier_set(void *data, u64 val) | |
274 | { | |
275 | bool enable; | |
276 | ||
277 | if (val == 1) | |
278 | enable = true; | |
279 | else if (val == 0) | |
280 | enable = false; | |
281 | else | |
282 | return -EINVAL; | |
283 | ||
284 | /* Only do anything if we're changing state */ | |
285 | if (enable != stf_barrier) | |
286 | stf_barrier_enable(enable); | |
287 | ||
288 | return 0; | |
289 | } | |
290 | ||
291 | static int stf_barrier_get(void *data, u64 *val) | |
292 | { | |
293 | *val = stf_barrier ? 1 : 0; | |
294 | return 0; | |
295 | } | |
296 | ||
297 | DEFINE_SIMPLE_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, "%llu\n"); | |
298 | ||
299 | static __init int stf_barrier_debugfs_init(void) | |
300 | { | |
301 | debugfs_create_file("stf_barrier", 0600, powerpc_debugfs_root, NULL, &fops_stf_barrier); | |
302 | return 0; | |
303 | } | |
304 | device_initcall(stf_barrier_debugfs_init); | |
305 | #endif /* CONFIG_DEBUG_FS */ |