1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
37 static void __init
spectre_v1_select_mitigation(void);
38 static void __init
spectre_v2_select_mitigation(void);
39 static void __init
retbleed_select_mitigation(void);
40 static void __init
spectre_v2_user_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
md_clear_update_mitigation(void);
45 static void __init
md_clear_select_mitigation(void);
46 static void __init
taa_select_mitigation(void);
47 static void __init
mmio_select_mitigation(void);
48 static void __init
srbds_select_mitigation(void);
49 static void __init
l1d_flush_select_mitigation(void);
50 static void __init
srso_select_mitigation(void);
51 static void __init
gds_select_mitigation(void);
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base
;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64
, x86_spec_ctrl_current
);
59 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current
);
61 u64 x86_pred_cmd __ro_after_init
= PRED_CMD_IBPB
;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd
);
64 static DEFINE_MUTEX(spec_ctrl_mutex
);
66 void (*x86_return_thunk
)(void) __ro_after_init
= __x86_return_thunk
;
68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
69 static void update_spec_ctrl(u64 val
)
71 this_cpu_write(x86_spec_ctrl_current
, val
);
72 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
76 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
77 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
79 void update_spec_ctrl_cond(u64 val
)
81 if (this_cpu_read(x86_spec_ctrl_current
) == val
)
84 this_cpu_write(x86_spec_ctrl_current
, val
);
87 * When KERNEL_IBRS this MSR is written on return-to-user, unless
88 * forced the update can be delayed until that time.
90 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS
))
91 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
94 noinstr u64
spec_ctrl_current(void)
96 return this_cpu_read(x86_spec_ctrl_current
);
98 EXPORT_SYMBOL_GPL(spec_ctrl_current
);
101 * AMD specific MSR info for Speculative Store Bypass control.
102 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
104 u64 __ro_after_init x86_amd_ls_cfg_base
;
105 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
107 /* Control conditional STIBP in switch_to() */
108 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
109 /* Control conditional IBPB in switch_mm() */
110 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
111 /* Control unconditional IBPB in switch_mm() */
112 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
114 /* Control MDS CPU buffer clear before idling (halt, mwait) */
115 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
116 EXPORT_SYMBOL_GPL(mds_idle_clear
);
119 * Controls whether l1d flush based mitigations are enabled,
120 * based on hw features and admin setting via boot parameter
123 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
125 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
126 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
127 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
129 void __init
cpu_select_mitigations(void)
132 * Read the SPEC_CTRL MSR to account for reserved bits which may
133 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
134 * init code as it is not enumerated and depends on the family.
136 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL
)) {
137 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
140 * Previously running kernel (kexec), may have some controls
141 * turned ON. Clear them and let the mitigations setup below
142 * rediscover them based on configuration.
144 x86_spec_ctrl_base
&= ~SPEC_CTRL_MITIGATIONS_MASK
;
147 /* Select the proper CPU mitigations before patching alternatives: */
148 spectre_v1_select_mitigation();
149 spectre_v2_select_mitigation();
151 * retbleed_select_mitigation() relies on the state set by
152 * spectre_v2_select_mitigation(); specifically it wants to know about
155 retbleed_select_mitigation();
157 * spectre_v2_user_select_mitigation() relies on the state set by
158 * retbleed_select_mitigation(); specifically the STIBP selection is
159 * forced for UNRET or IBPB.
161 spectre_v2_user_select_mitigation();
162 ssb_select_mitigation();
163 l1tf_select_mitigation();
164 md_clear_select_mitigation();
165 srbds_select_mitigation();
166 l1d_flush_select_mitigation();
169 * srso_select_mitigation() depends and must run after
170 * retbleed_select_mitigation().
172 srso_select_mitigation();
173 gds_select_mitigation();
177 * NOTE: This function is *only* called for SVM, since Intel uses
178 * MSR_IA32_SPEC_CTRL for SSBD.
181 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl
, bool setguest
)
183 u64 guestval
, hostval
;
184 struct thread_info
*ti
= current_thread_info();
187 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
188 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
190 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
191 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
195 * If the host has SSBD mitigation enabled, force it in the host's
196 * virtual MSR value. If its not permanently enabled, evaluate
197 * current's TIF_SSBD thread flag.
199 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
200 hostval
= SPEC_CTRL_SSBD
;
202 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
204 /* Sanitize the guest value */
205 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
207 if (hostval
!= guestval
) {
210 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
211 ssbd_spec_ctrl_to_tif(hostval
);
213 speculation_ctrl_update(tif
);
216 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
218 static void x86_amd_ssb_disable(void)
220 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
222 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
223 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
224 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
225 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
229 #define pr_fmt(fmt) "MDS: " fmt
231 /* Default mitigation for MDS-affected CPUs */
232 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
233 static bool mds_nosmt __ro_after_init
= false;
235 static const char * const mds_strings
[] = {
236 [MDS_MITIGATION_OFF
] = "Vulnerable",
237 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
238 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
241 static void __init
mds_select_mitigation(void)
243 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
244 mds_mitigation
= MDS_MITIGATION_OFF
;
248 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
249 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
250 mds_mitigation
= MDS_MITIGATION_VMWERV
;
252 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
254 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
255 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
256 cpu_smt_disable(false);
260 static int __init
mds_cmdline(char *str
)
262 if (!boot_cpu_has_bug(X86_BUG_MDS
))
268 if (!strcmp(str
, "off"))
269 mds_mitigation
= MDS_MITIGATION_OFF
;
270 else if (!strcmp(str
, "full"))
271 mds_mitigation
= MDS_MITIGATION_FULL
;
272 else if (!strcmp(str
, "full,nosmt")) {
273 mds_mitigation
= MDS_MITIGATION_FULL
;
279 early_param("mds", mds_cmdline
);
282 #define pr_fmt(fmt) "TAA: " fmt
284 enum taa_mitigations
{
286 TAA_MITIGATION_UCODE_NEEDED
,
288 TAA_MITIGATION_TSX_DISABLED
,
291 /* Default mitigation for TAA-affected CPUs */
292 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
293 static bool taa_nosmt __ro_after_init
;
295 static const char * const taa_strings
[] = {
296 [TAA_MITIGATION_OFF
] = "Vulnerable",
297 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
298 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
299 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
302 static void __init
taa_select_mitigation(void)
306 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
307 taa_mitigation
= TAA_MITIGATION_OFF
;
311 /* TSX previously disabled by tsx=off */
312 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
313 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
317 if (cpu_mitigations_off()) {
318 taa_mitigation
= TAA_MITIGATION_OFF
;
323 * TAA mitigation via VERW is turned off if both
324 * tsx_async_abort=off and mds=off are specified.
326 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
327 mds_mitigation
== MDS_MITIGATION_OFF
)
330 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
331 taa_mitigation
= TAA_MITIGATION_VERW
;
333 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
336 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
337 * A microcode update fixes this behavior to clear CPU buffers. It also
338 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
339 * ARCH_CAP_TSX_CTRL_MSR bit.
341 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
342 * update is required.
344 ia32_cap
= x86_read_arch_cap_msr();
345 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
346 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
347 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
350 * TSX is enabled, select alternate mitigation for TAA which is
351 * the same as MDS. Enable MDS static branch to clear CPU buffers.
353 * For guests that can't determine whether the correct microcode is
354 * present on host, enable the mitigation for UCODE_NEEDED as well.
356 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
358 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
359 cpu_smt_disable(false);
362 static int __init
tsx_async_abort_parse_cmdline(char *str
)
364 if (!boot_cpu_has_bug(X86_BUG_TAA
))
370 if (!strcmp(str
, "off")) {
371 taa_mitigation
= TAA_MITIGATION_OFF
;
372 } else if (!strcmp(str
, "full")) {
373 taa_mitigation
= TAA_MITIGATION_VERW
;
374 } else if (!strcmp(str
, "full,nosmt")) {
375 taa_mitigation
= TAA_MITIGATION_VERW
;
381 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
384 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
386 enum mmio_mitigations
{
388 MMIO_MITIGATION_UCODE_NEEDED
,
389 MMIO_MITIGATION_VERW
,
392 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
393 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
394 static bool mmio_nosmt __ro_after_init
= false;
396 static const char * const mmio_strings
[] = {
397 [MMIO_MITIGATION_OFF
] = "Vulnerable",
398 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
399 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
402 static void __init
mmio_select_mitigation(void)
406 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
407 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
) ||
408 cpu_mitigations_off()) {
409 mmio_mitigation
= MMIO_MITIGATION_OFF
;
413 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
416 ia32_cap
= x86_read_arch_cap_msr();
419 * Enable CPU buffer clear mitigation for host and VMM, if also affected
420 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
422 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
423 boot_cpu_has(X86_FEATURE_RTM
)))
424 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
427 * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
428 * mitigations, disable KVM-only mitigation in that case.
430 if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF
))
431 static_branch_disable(&mmio_stale_data_clear
);
433 static_branch_enable(&mmio_stale_data_clear
);
436 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
437 * be propagated to uncore buffers, clearing the Fill buffers on idle
438 * is required irrespective of SMT state.
440 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
441 static_branch_enable(&mds_idle_clear
);
444 * Check if the system has the right microcode.
446 * CPU Fill buffer clear mitigation is enumerated by either an explicit
447 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
450 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
451 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
452 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
453 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
454 mmio_mitigation
= MMIO_MITIGATION_VERW
;
456 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
458 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
459 cpu_smt_disable(false);
462 static int __init
mmio_stale_data_parse_cmdline(char *str
)
464 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
470 if (!strcmp(str
, "off")) {
471 mmio_mitigation
= MMIO_MITIGATION_OFF
;
472 } else if (!strcmp(str
, "full")) {
473 mmio_mitigation
= MMIO_MITIGATION_VERW
;
474 } else if (!strcmp(str
, "full,nosmt")) {
475 mmio_mitigation
= MMIO_MITIGATION_VERW
;
481 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
484 #define pr_fmt(fmt) "Register File Data Sampling: " fmt
486 enum rfds_mitigations
{
488 RFDS_MITIGATION_VERW
,
489 RFDS_MITIGATION_UCODE_NEEDED
,
492 /* Default mitigation for Register File Data Sampling */
493 static enum rfds_mitigations rfds_mitigation __ro_after_init
=
494 IS_ENABLED(CONFIG_MITIGATION_RFDS
) ? RFDS_MITIGATION_VERW
: RFDS_MITIGATION_OFF
;
496 static const char * const rfds_strings
[] = {
497 [RFDS_MITIGATION_OFF
] = "Vulnerable",
498 [RFDS_MITIGATION_VERW
] = "Mitigation: Clear Register File",
499 [RFDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
502 static void __init
rfds_select_mitigation(void)
504 if (!boot_cpu_has_bug(X86_BUG_RFDS
) || cpu_mitigations_off()) {
505 rfds_mitigation
= RFDS_MITIGATION_OFF
;
508 if (rfds_mitigation
== RFDS_MITIGATION_OFF
)
511 if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR
)
512 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
514 rfds_mitigation
= RFDS_MITIGATION_UCODE_NEEDED
;
517 static __init
int rfds_parse_cmdline(char *str
)
522 if (!boot_cpu_has_bug(X86_BUG_RFDS
))
525 if (!strcmp(str
, "off"))
526 rfds_mitigation
= RFDS_MITIGATION_OFF
;
527 else if (!strcmp(str
, "on"))
528 rfds_mitigation
= RFDS_MITIGATION_VERW
;
532 early_param("reg_file_data_sampling", rfds_parse_cmdline
);
535 #define pr_fmt(fmt) "" fmt
537 static void __init
md_clear_update_mitigation(void)
539 if (cpu_mitigations_off())
542 if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF
))
546 * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
547 * Stale Data mitigation, if necessary.
549 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
550 boot_cpu_has_bug(X86_BUG_MDS
)) {
551 mds_mitigation
= MDS_MITIGATION_FULL
;
552 mds_select_mitigation();
554 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
555 boot_cpu_has_bug(X86_BUG_TAA
)) {
556 taa_mitigation
= TAA_MITIGATION_VERW
;
557 taa_select_mitigation();
560 * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
561 * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
563 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
564 mmio_mitigation
= MMIO_MITIGATION_VERW
;
565 mmio_select_mitigation();
567 if (rfds_mitigation
== RFDS_MITIGATION_OFF
&&
568 boot_cpu_has_bug(X86_BUG_RFDS
)) {
569 rfds_mitigation
= RFDS_MITIGATION_VERW
;
570 rfds_select_mitigation();
573 if (boot_cpu_has_bug(X86_BUG_MDS
))
574 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
575 if (boot_cpu_has_bug(X86_BUG_TAA
))
576 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
577 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
578 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
579 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
580 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
581 if (boot_cpu_has_bug(X86_BUG_RFDS
))
582 pr_info("Register File Data Sampling: %s\n", rfds_strings
[rfds_mitigation
]);
585 static void __init
md_clear_select_mitigation(void)
587 mds_select_mitigation();
588 taa_select_mitigation();
589 mmio_select_mitigation();
590 rfds_select_mitigation();
593 * As these mitigations are inter-related and rely on VERW instruction
594 * to clear the microarchitural buffers, update and print their status
595 * after mitigation selection is done for each of these vulnerabilities.
597 md_clear_update_mitigation();
601 #define pr_fmt(fmt) "SRBDS: " fmt
603 enum srbds_mitigations
{
604 SRBDS_MITIGATION_OFF
,
605 SRBDS_MITIGATION_UCODE_NEEDED
,
606 SRBDS_MITIGATION_FULL
,
607 SRBDS_MITIGATION_TSX_OFF
,
608 SRBDS_MITIGATION_HYPERVISOR
,
611 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
613 static const char * const srbds_strings
[] = {
614 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
615 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
616 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
617 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
618 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
621 static bool srbds_off
;
623 void update_srbds_msr(void)
627 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
630 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
633 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
637 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
638 * being disabled and it hasn't received the SRBDS MSR microcode.
640 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
643 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
645 switch (srbds_mitigation
) {
646 case SRBDS_MITIGATION_OFF
:
647 case SRBDS_MITIGATION_TSX_OFF
:
648 mcu_ctrl
|= RNGDS_MITG_DIS
;
650 case SRBDS_MITIGATION_FULL
:
651 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
657 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
660 static void __init
srbds_select_mitigation(void)
664 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
668 * Check to see if this is one of the MDS_NO systems supporting TSX that
669 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
670 * by Processor MMIO Stale Data vulnerability.
672 ia32_cap
= x86_read_arch_cap_msr();
673 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
) &&
674 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
675 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
676 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
677 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
678 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
679 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
680 else if (cpu_mitigations_off() || srbds_off
)
681 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
684 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
687 static int __init
srbds_parse_cmdline(char *str
)
692 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
695 srbds_off
= !strcmp(str
, "off");
698 early_param("srbds", srbds_parse_cmdline
);
701 #define pr_fmt(fmt) "L1D Flush : " fmt
703 enum l1d_flush_mitigations
{
708 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
710 static void __init
l1d_flush_select_mitigation(void)
712 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
715 static_branch_enable(&switch_mm_cond_l1d_flush
);
716 pr_info("Conditional flush on switch_mm() enabled\n");
719 static int __init
l1d_flush_parse_cmdline(char *str
)
721 if (!strcmp(str
, "on"))
722 l1d_flush_mitigation
= L1D_FLUSH_ON
;
726 early_param("l1d_flush", l1d_flush_parse_cmdline
);
729 #define pr_fmt(fmt) "GDS: " fmt
731 enum gds_mitigations
{
733 GDS_MITIGATION_UCODE_NEEDED
,
734 GDS_MITIGATION_FORCE
,
736 GDS_MITIGATION_FULL_LOCKED
,
737 GDS_MITIGATION_HYPERVISOR
,
740 #if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
741 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FORCE
;
743 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FULL
;
746 static const char * const gds_strings
[] = {
747 [GDS_MITIGATION_OFF
] = "Vulnerable",
748 [GDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
749 [GDS_MITIGATION_FORCE
] = "Mitigation: AVX disabled, no microcode",
750 [GDS_MITIGATION_FULL
] = "Mitigation: Microcode",
751 [GDS_MITIGATION_FULL_LOCKED
] = "Mitigation: Microcode (locked)",
752 [GDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
755 bool gds_ucode_mitigated(void)
757 return (gds_mitigation
== GDS_MITIGATION_FULL
||
758 gds_mitigation
== GDS_MITIGATION_FULL_LOCKED
);
760 EXPORT_SYMBOL_GPL(gds_ucode_mitigated
);
762 void update_gds_msr(void)
767 switch (gds_mitigation
) {
768 case GDS_MITIGATION_OFF
:
769 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
770 mcu_ctrl
|= GDS_MITG_DIS
;
772 case GDS_MITIGATION_FULL_LOCKED
:
774 * The LOCKED state comes from the boot CPU. APs might not have
775 * the same state. Make sure the mitigation is enabled on all
778 case GDS_MITIGATION_FULL
:
779 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
780 mcu_ctrl
&= ~GDS_MITG_DIS
;
782 case GDS_MITIGATION_FORCE
:
783 case GDS_MITIGATION_UCODE_NEEDED
:
784 case GDS_MITIGATION_HYPERVISOR
:
788 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
791 * Check to make sure that the WRMSR value was not ignored. Writes to
792 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
795 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl_after
);
796 WARN_ON_ONCE(mcu_ctrl
!= mcu_ctrl_after
);
799 static void __init
gds_select_mitigation(void)
803 if (!boot_cpu_has_bug(X86_BUG_GDS
))
806 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
807 gds_mitigation
= GDS_MITIGATION_HYPERVISOR
;
811 if (cpu_mitigations_off())
812 gds_mitigation
= GDS_MITIGATION_OFF
;
813 /* Will verify below that mitigation _can_ be disabled */
816 if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL
)) {
817 if (gds_mitigation
== GDS_MITIGATION_FORCE
) {
819 * This only needs to be done on the boot CPU so do it
820 * here rather than in update_gds_msr()
822 setup_clear_cpu_cap(X86_FEATURE_AVX
);
823 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
825 gds_mitigation
= GDS_MITIGATION_UCODE_NEEDED
;
830 /* Microcode has mitigation, use it */
831 if (gds_mitigation
== GDS_MITIGATION_FORCE
)
832 gds_mitigation
= GDS_MITIGATION_FULL
;
834 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
835 if (mcu_ctrl
& GDS_MITG_LOCKED
) {
836 if (gds_mitigation
== GDS_MITIGATION_OFF
)
837 pr_warn("Mitigation locked. Disable failed.\n");
840 * The mitigation is selected from the boot CPU. All other CPUs
841 * _should_ have the same state. If the boot CPU isn't locked
842 * but others are then update_gds_msr() will WARN() of the state
843 * mismatch. If the boot CPU is locked update_gds_msr() will
844 * ensure the other CPUs have the mitigation enabled.
846 gds_mitigation
= GDS_MITIGATION_FULL_LOCKED
;
851 pr_info("%s\n", gds_strings
[gds_mitigation
]);
854 static int __init
gds_parse_cmdline(char *str
)
859 if (!boot_cpu_has_bug(X86_BUG_GDS
))
862 if (!strcmp(str
, "off"))
863 gds_mitigation
= GDS_MITIGATION_OFF
;
864 else if (!strcmp(str
, "force"))
865 gds_mitigation
= GDS_MITIGATION_FORCE
;
869 early_param("gather_data_sampling", gds_parse_cmdline
);
872 #define pr_fmt(fmt) "Spectre V1 : " fmt
874 enum spectre_v1_mitigation
{
875 SPECTRE_V1_MITIGATION_NONE
,
876 SPECTRE_V1_MITIGATION_AUTO
,
879 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
880 SPECTRE_V1_MITIGATION_AUTO
;
882 static const char * const spectre_v1_strings
[] = {
883 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
884 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
888 * Does SMAP provide full mitigation against speculative kernel access to
891 static bool smap_works_speculatively(void)
893 if (!boot_cpu_has(X86_FEATURE_SMAP
))
897 * On CPUs which are vulnerable to Meltdown, SMAP does not
898 * prevent speculative access to user data in the L1 cache.
899 * Consider SMAP to be non-functional as a mitigation on these
902 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
908 static void __init
spectre_v1_select_mitigation(void)
910 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
911 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
915 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
917 * With Spectre v1, a user can speculatively control either
918 * path of a conditional swapgs with a user-controlled GS
919 * value. The mitigation is to add lfences to both code paths.
921 * If FSGSBASE is enabled, the user can put a kernel address in
922 * GS, in which case SMAP provides no protection.
924 * If FSGSBASE is disabled, the user can only put a user space
925 * address in GS. That makes an attack harder, but still
926 * possible if there's no SMAP protection.
928 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
929 !smap_works_speculatively()) {
931 * Mitigation can be provided from SWAPGS itself or
932 * PTI as the CR3 write in the Meltdown mitigation
935 * If neither is there, mitigate with an LFENCE to
936 * stop speculation through swapgs.
938 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
939 !boot_cpu_has(X86_FEATURE_PTI
))
940 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
943 * Enable lfences in the kernel entry (non-swapgs)
944 * paths, to prevent user entry from speculatively
947 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
951 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
954 static int __init
nospectre_v1_cmdline(char *str
)
956 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
959 early_param("nospectre_v1", nospectre_v1_cmdline
);
961 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
= SPECTRE_V2_NONE
;
964 #define pr_fmt(fmt) "RETBleed: " fmt
966 enum retbleed_mitigation
{
967 RETBLEED_MITIGATION_NONE
,
968 RETBLEED_MITIGATION_UNRET
,
969 RETBLEED_MITIGATION_IBPB
,
970 RETBLEED_MITIGATION_IBRS
,
971 RETBLEED_MITIGATION_EIBRS
,
972 RETBLEED_MITIGATION_STUFF
,
975 enum retbleed_mitigation_cmd
{
983 static const char * const retbleed_strings
[] = {
984 [RETBLEED_MITIGATION_NONE
] = "Vulnerable",
985 [RETBLEED_MITIGATION_UNRET
] = "Mitigation: untrained return thunk",
986 [RETBLEED_MITIGATION_IBPB
] = "Mitigation: IBPB",
987 [RETBLEED_MITIGATION_IBRS
] = "Mitigation: IBRS",
988 [RETBLEED_MITIGATION_EIBRS
] = "Mitigation: Enhanced IBRS",
989 [RETBLEED_MITIGATION_STUFF
] = "Mitigation: Stuffing",
992 static enum retbleed_mitigation retbleed_mitigation __ro_after_init
=
993 RETBLEED_MITIGATION_NONE
;
994 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init
=
997 static int __ro_after_init retbleed_nosmt
= false;
999 static int __init
retbleed_parse_cmdline(char *str
)
1005 char *next
= strchr(str
, ',');
1011 if (!strcmp(str
, "off")) {
1012 retbleed_cmd
= RETBLEED_CMD_OFF
;
1013 } else if (!strcmp(str
, "auto")) {
1014 retbleed_cmd
= RETBLEED_CMD_AUTO
;
1015 } else if (!strcmp(str
, "unret")) {
1016 retbleed_cmd
= RETBLEED_CMD_UNRET
;
1017 } else if (!strcmp(str
, "ibpb")) {
1018 retbleed_cmd
= RETBLEED_CMD_IBPB
;
1019 } else if (!strcmp(str
, "stuff")) {
1020 retbleed_cmd
= RETBLEED_CMD_STUFF
;
1021 } else if (!strcmp(str
, "nosmt")) {
1022 retbleed_nosmt
= true;
1023 } else if (!strcmp(str
, "force")) {
1024 setup_force_cpu_bug(X86_BUG_RETBLEED
);
1026 pr_err("Ignoring unknown retbleed option (%s).", str
);
1034 early_param("retbleed", retbleed_parse_cmdline
);
1036 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1037 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1039 static void __init
retbleed_select_mitigation(void)
1041 bool mitigate_smt
= false;
1043 if (!boot_cpu_has_bug(X86_BUG_RETBLEED
) || cpu_mitigations_off())
1046 switch (retbleed_cmd
) {
1047 case RETBLEED_CMD_OFF
:
1050 case RETBLEED_CMD_UNRET
:
1051 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY
)) {
1052 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
1054 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1059 case RETBLEED_CMD_IBPB
:
1060 if (!boot_cpu_has(X86_FEATURE_IBPB
)) {
1061 pr_err("WARNING: CPU does not support IBPB.\n");
1063 } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
)) {
1064 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
1066 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1071 case RETBLEED_CMD_STUFF
:
1072 if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING
) &&
1073 spectre_v2_enabled
== SPECTRE_V2_RETPOLINE
) {
1074 retbleed_mitigation
= RETBLEED_MITIGATION_STUFF
;
1077 if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING
))
1078 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1080 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1087 case RETBLEED_CMD_AUTO
:
1088 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1089 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
) {
1090 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY
))
1091 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
1092 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
) &&
1093 boot_cpu_has(X86_FEATURE_IBPB
))
1094 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
1098 * The Intel mitigation (IBRS or eIBRS) was already selected in
1099 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
1100 * be set accordingly below.
1106 switch (retbleed_mitigation
) {
1107 case RETBLEED_MITIGATION_UNRET
:
1108 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1109 setup_force_cpu_cap(X86_FEATURE_UNRET
);
1111 x86_return_thunk
= retbleed_return_thunk
;
1113 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
1114 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
1115 pr_err(RETBLEED_UNTRAIN_MSG
);
1117 mitigate_smt
= true;
1120 case RETBLEED_MITIGATION_IBPB
:
1121 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
1122 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
1123 mitigate_smt
= true;
1126 case RETBLEED_MITIGATION_STUFF
:
1127 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1128 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH
);
1130 x86_return_thunk
= call_depth_return_thunk
;
1137 if (mitigate_smt
&& !boot_cpu_has(X86_FEATURE_STIBP
) &&
1138 (retbleed_nosmt
|| cpu_mitigations_auto_nosmt()))
1139 cpu_smt_disable(false);
1142 * Let IBRS trump all on Intel without affecting the effects of the
1143 * retbleed= cmdline option except for call depth based stuffing
1145 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1146 switch (spectre_v2_enabled
) {
1147 case SPECTRE_V2_IBRS
:
1148 retbleed_mitigation
= RETBLEED_MITIGATION_IBRS
;
1150 case SPECTRE_V2_EIBRS
:
1151 case SPECTRE_V2_EIBRS_RETPOLINE
:
1152 case SPECTRE_V2_EIBRS_LFENCE
:
1153 retbleed_mitigation
= RETBLEED_MITIGATION_EIBRS
;
1156 if (retbleed_mitigation
!= RETBLEED_MITIGATION_STUFF
)
1157 pr_err(RETBLEED_INTEL_MSG
);
1161 pr_info("%s\n", retbleed_strings
[retbleed_mitigation
]);
1165 #define pr_fmt(fmt) "Spectre V2 : " fmt
1167 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
1168 SPECTRE_V2_USER_NONE
;
1169 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
1170 SPECTRE_V2_USER_NONE
;
1172 #ifdef CONFIG_MITIGATION_RETPOLINE
1173 static bool spectre_v2_bad_module
;
1175 bool retpoline_module_ok(bool has_retpoline
)
1177 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
1180 pr_err("System may be vulnerable to spectre v2\n");
1181 spectre_v2_bad_module
= true;
1185 static inline const char *spectre_v2_module_string(void)
1187 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
1190 static inline const char *spectre_v2_module_string(void) { return ""; }
1193 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1194 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1195 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1196 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1198 #ifdef CONFIG_BPF_SYSCALL
1199 void unpriv_ebpf_notify(int new_state
)
1204 /* Unprivileged eBPF is enabled */
1206 switch (spectre_v2_enabled
) {
1207 case SPECTRE_V2_EIBRS
:
1208 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1210 case SPECTRE_V2_EIBRS_LFENCE
:
1211 if (sched_smt_active())
1212 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1220 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
1222 int len
= strlen(opt
);
1224 return len
== arglen
&& !strncmp(arg
, opt
, len
);
1227 /* The kernel command line selection for spectre v2 */
1228 enum spectre_v2_mitigation_cmd
{
1229 SPECTRE_V2_CMD_NONE
,
1230 SPECTRE_V2_CMD_AUTO
,
1231 SPECTRE_V2_CMD_FORCE
,
1232 SPECTRE_V2_CMD_RETPOLINE
,
1233 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
1234 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
1235 SPECTRE_V2_CMD_EIBRS
,
1236 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
1237 SPECTRE_V2_CMD_EIBRS_LFENCE
,
1238 SPECTRE_V2_CMD_IBRS
,
1241 enum spectre_v2_user_cmd
{
1242 SPECTRE_V2_USER_CMD_NONE
,
1243 SPECTRE_V2_USER_CMD_AUTO
,
1244 SPECTRE_V2_USER_CMD_FORCE
,
1245 SPECTRE_V2_USER_CMD_PRCTL
,
1246 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
1247 SPECTRE_V2_USER_CMD_SECCOMP
,
1248 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
1251 static const char * const spectre_v2_user_strings
[] = {
1252 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
1253 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
1254 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
1255 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
1256 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
1259 static const struct {
1261 enum spectre_v2_user_cmd cmd
;
1263 } v2_user_options
[] __initconst
= {
1264 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
1265 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
1266 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
1267 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
1268 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
1269 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
1270 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
1273 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
1275 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1276 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
1279 static __ro_after_init
enum spectre_v2_mitigation_cmd spectre_v2_cmd
;
1281 static enum spectre_v2_user_cmd __init
1282 spectre_v2_parse_user_cmdline(void)
1287 switch (spectre_v2_cmd
) {
1288 case SPECTRE_V2_CMD_NONE
:
1289 return SPECTRE_V2_USER_CMD_NONE
;
1290 case SPECTRE_V2_CMD_FORCE
:
1291 return SPECTRE_V2_USER_CMD_FORCE
;
1296 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
1299 return SPECTRE_V2_USER_CMD_AUTO
;
1301 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
1302 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
1303 spec_v2_user_print_cond(v2_user_options
[i
].option
,
1304 v2_user_options
[i
].secure
);
1305 return v2_user_options
[i
].cmd
;
1309 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
1310 return SPECTRE_V2_USER_CMD_AUTO
;
1313 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode
)
1315 return spectre_v2_in_eibrs_mode(mode
) || mode
== SPECTRE_V2_IBRS
;
1319 spectre_v2_user_select_mitigation(void)
1321 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
1322 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
1323 enum spectre_v2_user_cmd cmd
;
1325 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
1328 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
1329 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
1330 smt_possible
= false;
1332 cmd
= spectre_v2_parse_user_cmdline();
1334 case SPECTRE_V2_USER_CMD_NONE
:
1336 case SPECTRE_V2_USER_CMD_FORCE
:
1337 mode
= SPECTRE_V2_USER_STRICT
;
1339 case SPECTRE_V2_USER_CMD_AUTO
:
1340 case SPECTRE_V2_USER_CMD_PRCTL
:
1341 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1342 mode
= SPECTRE_V2_USER_PRCTL
;
1344 case SPECTRE_V2_USER_CMD_SECCOMP
:
1345 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1346 if (IS_ENABLED(CONFIG_SECCOMP
))
1347 mode
= SPECTRE_V2_USER_SECCOMP
;
1349 mode
= SPECTRE_V2_USER_PRCTL
;
1353 /* Initialize Indirect Branch Prediction Barrier */
1354 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1355 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
1357 spectre_v2_user_ibpb
= mode
;
1359 case SPECTRE_V2_USER_CMD_NONE
:
1361 case SPECTRE_V2_USER_CMD_FORCE
:
1362 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1363 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1364 static_branch_enable(&switch_mm_always_ibpb
);
1365 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
1367 case SPECTRE_V2_USER_CMD_PRCTL
:
1368 case SPECTRE_V2_USER_CMD_AUTO
:
1369 case SPECTRE_V2_USER_CMD_SECCOMP
:
1370 static_branch_enable(&switch_mm_cond_ibpb
);
1374 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1375 static_key_enabled(&switch_mm_always_ibpb
) ?
1376 "always-on" : "conditional");
1380 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1383 * Intel's Enhanced IBRS also protects against cross-thread branch target
1384 * injection in user-mode as the IBRS bit remains always set which
1385 * implicitly enables cross-thread protections. However, in legacy IBRS
1386 * mode, the IBRS bit is set only on kernel entry and cleared on return
1387 * to userspace. AMD Automatic IBRS also does not protect userspace.
1388 * These modes therefore disable the implicit cross-thread protection,
1389 * so allow for STIBP to be selected in those cases.
1391 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
1393 (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
1394 !boot_cpu_has(X86_FEATURE_AUTOIBRS
)))
1398 * At this point, an STIBP mode other than "off" has been set.
1399 * If STIBP support is not being forced, check if STIBP always-on
1402 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1403 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
1404 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1406 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
1407 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
1408 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1409 mode
!= SPECTRE_V2_USER_STRICT_PREFERRED
)
1410 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1411 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1414 spectre_v2_user_stibp
= mode
;
1417 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
1420 static const char * const spectre_v2_strings
[] = {
1421 [SPECTRE_V2_NONE
] = "Vulnerable",
1422 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
1423 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
1424 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced / Automatic IBRS",
1425 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1426 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1427 [SPECTRE_V2_IBRS
] = "Mitigation: IBRS",
1430 static const struct {
1432 enum spectre_v2_mitigation_cmd cmd
;
1434 } mitigation_options
[] __initconst
= {
1435 { "off", SPECTRE_V2_CMD_NONE
, false },
1436 { "on", SPECTRE_V2_CMD_FORCE
, true },
1437 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1438 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1439 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1440 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1441 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1442 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1443 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1444 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1445 { "ibrs", SPECTRE_V2_CMD_IBRS
, false },
1448 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1450 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1451 pr_info("%s selected on command line.\n", reason
);
1454 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1456 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1460 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1461 cpu_mitigations_off())
1462 return SPECTRE_V2_CMD_NONE
;
1464 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1466 return SPECTRE_V2_CMD_AUTO
;
1468 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1469 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1471 cmd
= mitigation_options
[i
].cmd
;
1475 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1476 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1477 return SPECTRE_V2_CMD_AUTO
;
1480 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1481 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1482 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1483 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1484 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1485 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
)) {
1486 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1487 mitigation_options
[i
].option
);
1488 return SPECTRE_V2_CMD_AUTO
;
1491 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1492 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1493 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1494 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1495 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1496 mitigation_options
[i
].option
);
1497 return SPECTRE_V2_CMD_AUTO
;
1500 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1501 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1502 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1503 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1504 mitigation_options
[i
].option
);
1505 return SPECTRE_V2_CMD_AUTO
;
1508 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY
)) {
1509 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1510 mitigation_options
[i
].option
);
1511 return SPECTRE_V2_CMD_AUTO
;
1514 if (cmd
== SPECTRE_V2_CMD_IBRS
&& boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
1515 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1516 mitigation_options
[i
].option
);
1517 return SPECTRE_V2_CMD_AUTO
;
1520 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !boot_cpu_has(X86_FEATURE_IBRS
)) {
1521 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1522 mitigation_options
[i
].option
);
1523 return SPECTRE_V2_CMD_AUTO
;
1526 if (cmd
== SPECTRE_V2_CMD_IBRS
&& cpu_feature_enabled(X86_FEATURE_XENPV
)) {
1527 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1528 mitigation_options
[i
].option
);
1529 return SPECTRE_V2_CMD_AUTO
;
1532 spec_v2_print_cond(mitigation_options
[i
].option
,
1533 mitigation_options
[i
].secure
);
1537 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1539 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
)) {
1540 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1541 return SPECTRE_V2_NONE
;
1544 return SPECTRE_V2_RETPOLINE
;
1547 /* Disable in-kernel use of non-RSB RET predictors */
1548 static void __init
spec_ctrl_disable_kernel_rrsba(void)
1552 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL
))
1555 ia32_cap
= x86_read_arch_cap_msr();
1557 if (ia32_cap
& ARCH_CAP_RRSBA
) {
1558 x86_spec_ctrl_base
|= SPEC_CTRL_RRSBA_DIS_S
;
1559 update_spec_ctrl(x86_spec_ctrl_base
);
1563 static void __init
spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode
)
1566 * Similar to context switches, there are two types of RSB attacks
1571 * 2) Poisoned RSB entry
1573 * When retpoline is enabled, both are mitigated by filling/clearing
1576 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1577 * prediction isolation protections, RSB still needs to be cleared
1578 * because of #2. Note that SMEP provides no protection here, unlike
1579 * user-space-poisoned RSB entries.
1581 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1582 * bug is present then a LITE version of RSB protection is required,
1583 * just a single call needs to retire before a RET is executed.
1586 case SPECTRE_V2_NONE
:
1589 case SPECTRE_V2_EIBRS_LFENCE
:
1590 case SPECTRE_V2_EIBRS
:
1591 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
1592 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE
);
1593 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1597 case SPECTRE_V2_EIBRS_RETPOLINE
:
1598 case SPECTRE_V2_RETPOLINE
:
1599 case SPECTRE_V2_LFENCE
:
1600 case SPECTRE_V2_IBRS
:
1601 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT
);
1602 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1606 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1611 * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by
1612 * branch history in userspace. Not needed if BHI_NO is set.
1614 static bool __init
spec_ctrl_bhi_dis(void)
1616 if (!boot_cpu_has(X86_FEATURE_BHI_CTRL
))
1619 x86_spec_ctrl_base
|= SPEC_CTRL_BHI_DIS_S
;
1620 update_spec_ctrl(x86_spec_ctrl_base
);
1621 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW
);
1626 enum bhi_mitigations
{
1629 BHI_MITIGATION_AUTO
,
1632 static enum bhi_mitigations bhi_mitigation __ro_after_init
=
1633 IS_ENABLED(CONFIG_SPECTRE_BHI_ON
) ? BHI_MITIGATION_ON
:
1634 IS_ENABLED(CONFIG_SPECTRE_BHI_OFF
) ? BHI_MITIGATION_OFF
:
1635 BHI_MITIGATION_AUTO
;
1637 static int __init
spectre_bhi_parse_cmdline(char *str
)
1642 if (!strcmp(str
, "off"))
1643 bhi_mitigation
= BHI_MITIGATION_OFF
;
1644 else if (!strcmp(str
, "on"))
1645 bhi_mitigation
= BHI_MITIGATION_ON
;
1646 else if (!strcmp(str
, "auto"))
1647 bhi_mitigation
= BHI_MITIGATION_AUTO
;
1649 pr_err("Ignoring unknown spectre_bhi option (%s)", str
);
1653 early_param("spectre_bhi", spectre_bhi_parse_cmdline
);
1655 static void __init
bhi_select_mitigation(void)
1657 if (bhi_mitigation
== BHI_MITIGATION_OFF
)
1660 /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
1661 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE
) &&
1662 !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA
))
1665 if (spec_ctrl_bhi_dis())
1668 if (!IS_ENABLED(CONFIG_X86_64
))
1671 /* Mitigate KVM by default */
1672 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
);
1673 pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
1675 if (bhi_mitigation
== BHI_MITIGATION_AUTO
)
1678 /* Mitigate syscalls when the mitigation is forced =on */
1679 setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP
);
1680 pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
1683 static void __init
spectre_v2_select_mitigation(void)
1685 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1686 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1689 * If the CPU is not affected and the command line mode is NONE or AUTO
1690 * then nothing to do.
1692 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1693 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1697 case SPECTRE_V2_CMD_NONE
:
1700 case SPECTRE_V2_CMD_FORCE
:
1701 case SPECTRE_V2_CMD_AUTO
:
1702 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1703 mode
= SPECTRE_V2_EIBRS
;
1707 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY
) &&
1708 boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1709 retbleed_cmd
!= RETBLEED_CMD_OFF
&&
1710 retbleed_cmd
!= RETBLEED_CMD_STUFF
&&
1711 boot_cpu_has(X86_FEATURE_IBRS
) &&
1712 boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1713 mode
= SPECTRE_V2_IBRS
;
1717 mode
= spectre_v2_select_retpoline();
1720 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1721 pr_err(SPECTRE_V2_LFENCE_MSG
);
1722 mode
= SPECTRE_V2_LFENCE
;
1725 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1726 mode
= SPECTRE_V2_RETPOLINE
;
1729 case SPECTRE_V2_CMD_RETPOLINE
:
1730 mode
= spectre_v2_select_retpoline();
1733 case SPECTRE_V2_CMD_IBRS
:
1734 mode
= SPECTRE_V2_IBRS
;
1737 case SPECTRE_V2_CMD_EIBRS
:
1738 mode
= SPECTRE_V2_EIBRS
;
1741 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1742 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1745 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1746 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1750 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1751 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1753 if (spectre_v2_in_ibrs_mode(mode
)) {
1754 if (boot_cpu_has(X86_FEATURE_AUTOIBRS
)) {
1755 msr_set_bit(MSR_EFER
, _EFER_AUTOIBRS
);
1757 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1758 update_spec_ctrl(x86_spec_ctrl_base
);
1763 case SPECTRE_V2_NONE
:
1764 case SPECTRE_V2_EIBRS
:
1767 case SPECTRE_V2_IBRS
:
1768 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS
);
1769 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
))
1770 pr_warn(SPECTRE_V2_IBRS_PERF_MSG
);
1773 case SPECTRE_V2_LFENCE
:
1774 case SPECTRE_V2_EIBRS_LFENCE
:
1775 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1778 case SPECTRE_V2_RETPOLINE
:
1779 case SPECTRE_V2_EIBRS_RETPOLINE
:
1780 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1785 * Disable alternate RSB predictions in kernel when indirect CALLs and
1786 * JMPs gets protection against BHI and Intramode-BTI, but RET
1787 * prediction from a non-RSB predictor is still a risk.
1789 if (mode
== SPECTRE_V2_EIBRS_LFENCE
||
1790 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
1791 mode
== SPECTRE_V2_RETPOLINE
)
1792 spec_ctrl_disable_kernel_rrsba();
1794 if (boot_cpu_has(X86_BUG_BHI
))
1795 bhi_select_mitigation();
1797 spectre_v2_enabled
= mode
;
1798 pr_info("%s\n", spectre_v2_strings
[mode
]);
1801 * If Spectre v2 protection has been enabled, fill the RSB during a
1802 * context switch. In general there are two types of RSB attacks
1803 * across context switches, for which the CALLs/RETs may be unbalanced.
1807 * Some Intel parts have "bottomless RSB". When the RSB is empty,
1808 * speculated return targets may come from the branch predictor,
1809 * which could have a user-poisoned BTB or BHB entry.
1811 * AMD has it even worse: *all* returns are speculated from the BTB,
1812 * regardless of the state of the RSB.
1814 * When IBRS or eIBRS is enabled, the "user -> kernel" attack
1815 * scenario is mitigated by the IBRS branch prediction isolation
1816 * properties, so the RSB buffer filling wouldn't be necessary to
1817 * protect against this type of attack.
1819 * The "user -> user" attack scenario is mitigated by RSB filling.
1821 * 2) Poisoned RSB entry
1823 * If the 'next' in-kernel return stack is shorter than 'prev',
1824 * 'next' could be tricked into speculating with a user-poisoned RSB
1827 * The "user -> kernel" attack scenario is mitigated by SMEP and
1830 * The "user -> user" scenario, also known as SpectreBHB, requires
1833 * So to mitigate all cases, unconditionally fill RSB on context
1836 * FIXME: Is this pointless for retbleed-affected AMD?
1838 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1839 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1841 spectre_v2_determine_rsb_fill_type_at_vmexit(mode
);
1844 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
1845 * and Enhanced IBRS protect firmware too, so enable IBRS around
1846 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1847 * otherwise enabled.
1849 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1850 * the user might select retpoline on the kernel command line and if
1851 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1852 * enable IBRS around firmware calls.
1854 if (boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1855 boot_cpu_has(X86_FEATURE_IBPB
) &&
1856 (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1857 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
)) {
1859 if (retbleed_cmd
!= RETBLEED_CMD_IBPB
) {
1860 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW
);
1861 pr_info("Enabling Speculation Barrier for firmware calls\n");
1864 } else if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_ibrs_mode(mode
)) {
1865 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1866 pr_info("Enabling Restricted Speculation for firmware calls\n");
1869 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1870 spectre_v2_cmd
= cmd
;
1873 static void update_stibp_msr(void * __unused
)
1875 u64 val
= spec_ctrl_current() | (x86_spec_ctrl_base
& SPEC_CTRL_STIBP
);
1876 update_spec_ctrl(val
);
1879 /* Update x86_spec_ctrl_base in case SMT state changed. */
1880 static void update_stibp_strict(void)
1882 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1884 if (sched_smt_active())
1885 mask
|= SPEC_CTRL_STIBP
;
1887 if (mask
== x86_spec_ctrl_base
)
1890 pr_info("Update user space SMT mitigation: STIBP %s\n",
1891 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1892 x86_spec_ctrl_base
= mask
;
1893 on_each_cpu(update_stibp_msr
, NULL
, 1);
1896 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1897 static void update_indir_branch_cond(void)
1899 if (sched_smt_active())
1900 static_branch_enable(&switch_to_cond_stibp
);
1902 static_branch_disable(&switch_to_cond_stibp
);
1906 #define pr_fmt(fmt) fmt
1908 /* Update the static key controlling the MDS CPU buffer clear in idle */
1909 static void update_mds_branch_idle(void)
1911 u64 ia32_cap
= x86_read_arch_cap_msr();
1914 * Enable the idle clearing if SMT is active on CPUs which are
1915 * affected only by MSBDS and not any other MDS variant.
1917 * The other variants cannot be mitigated when SMT is enabled, so
1918 * clearing the buffers on idle just to prevent the Store Buffer
1919 * repartitioning leak would be a window dressing exercise.
1921 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1924 if (sched_smt_active()) {
1925 static_branch_enable(&mds_idle_clear
);
1926 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1927 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1928 static_branch_disable(&mds_idle_clear
);
1932 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1933 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1934 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1936 void cpu_bugs_smt_update(void)
1938 mutex_lock(&spec_ctrl_mutex
);
1940 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1941 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1942 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1944 switch (spectre_v2_user_stibp
) {
1945 case SPECTRE_V2_USER_NONE
:
1947 case SPECTRE_V2_USER_STRICT
:
1948 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1949 update_stibp_strict();
1951 case SPECTRE_V2_USER_PRCTL
:
1952 case SPECTRE_V2_USER_SECCOMP
:
1953 update_indir_branch_cond();
1957 switch (mds_mitigation
) {
1958 case MDS_MITIGATION_FULL
:
1959 case MDS_MITIGATION_VMWERV
:
1960 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1961 pr_warn_once(MDS_MSG_SMT
);
1962 update_mds_branch_idle();
1964 case MDS_MITIGATION_OFF
:
1968 switch (taa_mitigation
) {
1969 case TAA_MITIGATION_VERW
:
1970 case TAA_MITIGATION_UCODE_NEEDED
:
1971 if (sched_smt_active())
1972 pr_warn_once(TAA_MSG_SMT
);
1974 case TAA_MITIGATION_TSX_DISABLED
:
1975 case TAA_MITIGATION_OFF
:
1979 switch (mmio_mitigation
) {
1980 case MMIO_MITIGATION_VERW
:
1981 case MMIO_MITIGATION_UCODE_NEEDED
:
1982 if (sched_smt_active())
1983 pr_warn_once(MMIO_MSG_SMT
);
1985 case MMIO_MITIGATION_OFF
:
1989 mutex_unlock(&spec_ctrl_mutex
);
1993 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1995 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1997 /* The kernel command line selection */
1998 enum ssb_mitigation_cmd
{
1999 SPEC_STORE_BYPASS_CMD_NONE
,
2000 SPEC_STORE_BYPASS_CMD_AUTO
,
2001 SPEC_STORE_BYPASS_CMD_ON
,
2002 SPEC_STORE_BYPASS_CMD_PRCTL
,
2003 SPEC_STORE_BYPASS_CMD_SECCOMP
,
2006 static const char * const ssb_strings
[] = {
2007 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
2008 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
2009 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
2010 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
2013 static const struct {
2015 enum ssb_mitigation_cmd cmd
;
2016 } ssb_mitigation_options
[] __initconst
= {
2017 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
2018 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
2019 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
2020 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
2021 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
2024 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
2026 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
2030 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
2031 cpu_mitigations_off()) {
2032 return SPEC_STORE_BYPASS_CMD_NONE
;
2034 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
2037 return SPEC_STORE_BYPASS_CMD_AUTO
;
2039 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
2040 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
2043 cmd
= ssb_mitigation_options
[i
].cmd
;
2047 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
2048 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
2049 return SPEC_STORE_BYPASS_CMD_AUTO
;
2056 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
2058 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
2059 enum ssb_mitigation_cmd cmd
;
2061 if (!boot_cpu_has(X86_FEATURE_SSBD
))
2064 cmd
= ssb_parse_cmdline();
2065 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
2066 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
2067 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
2071 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
2073 * Choose prctl+seccomp as the default mode if seccomp is
2076 if (IS_ENABLED(CONFIG_SECCOMP
))
2077 mode
= SPEC_STORE_BYPASS_SECCOMP
;
2079 mode
= SPEC_STORE_BYPASS_PRCTL
;
2081 case SPEC_STORE_BYPASS_CMD_ON
:
2082 mode
= SPEC_STORE_BYPASS_DISABLE
;
2084 case SPEC_STORE_BYPASS_CMD_AUTO
:
2085 case SPEC_STORE_BYPASS_CMD_PRCTL
:
2086 mode
= SPEC_STORE_BYPASS_PRCTL
;
2088 case SPEC_STORE_BYPASS_CMD_NONE
:
2093 * We have three CPU feature flags that are in play here:
2094 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2095 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2096 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2098 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
2099 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
2101 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2102 * use a completely different MSR and bit dependent on family.
2104 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
2105 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
2106 x86_amd_ssb_disable();
2108 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
2109 update_spec_ctrl(x86_spec_ctrl_base
);
2116 static void ssb_select_mitigation(void)
2118 ssb_mode
= __ssb_select_mitigation();
2120 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
2121 pr_info("%s\n", ssb_strings
[ssb_mode
]);
2125 #define pr_fmt(fmt) "Speculation prctl: " fmt
2127 static void task_update_spec_tif(struct task_struct
*tsk
)
2129 /* Force the update of the real TIF bits */
2130 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
2133 * Immediately update the speculation control MSRs for the current
2134 * task, but for a non-current task delay setting the CPU
2135 * mitigation until it is scheduled next.
2137 * This can only happen for SECCOMP mitigation. For PRCTL it's
2138 * always the current task.
2141 speculation_ctrl_update_current();
2144 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2147 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2151 case PR_SPEC_ENABLE
:
2152 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2154 case PR_SPEC_DISABLE
:
2155 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2162 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2164 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
2165 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
2169 case PR_SPEC_ENABLE
:
2170 /* If speculation is force disabled, enable is not allowed */
2171 if (task_spec_ssb_force_disable(task
))
2173 task_clear_spec_ssb_disable(task
);
2174 task_clear_spec_ssb_noexec(task
);
2175 task_update_spec_tif(task
);
2177 case PR_SPEC_DISABLE
:
2178 task_set_spec_ssb_disable(task
);
2179 task_clear_spec_ssb_noexec(task
);
2180 task_update_spec_tif(task
);
2182 case PR_SPEC_FORCE_DISABLE
:
2183 task_set_spec_ssb_disable(task
);
2184 task_set_spec_ssb_force_disable(task
);
2185 task_clear_spec_ssb_noexec(task
);
2186 task_update_spec_tif(task
);
2188 case PR_SPEC_DISABLE_NOEXEC
:
2189 if (task_spec_ssb_force_disable(task
))
2191 task_set_spec_ssb_disable(task
);
2192 task_set_spec_ssb_noexec(task
);
2193 task_update_spec_tif(task
);
2201 static bool is_spec_ib_user_controlled(void)
2203 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
2204 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2205 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
2206 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
2209 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2212 case PR_SPEC_ENABLE
:
2213 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2214 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2218 * With strict mode for both IBPB and STIBP, the instruction
2219 * code paths avoid checking this task flag and instead,
2220 * unconditionally run the instruction. However, STIBP and IBPB
2221 * are independent and either can be set to conditionally
2222 * enabled regardless of the mode of the other.
2224 * If either is set to conditional, allow the task flag to be
2225 * updated, unless it was force-disabled by a previous prctl
2226 * call. Currently, this is possible on an AMD CPU which has the
2227 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2228 * kernel is booted with 'spectre_v2_user=seccomp', then
2229 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2230 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2232 if (!is_spec_ib_user_controlled() ||
2233 task_spec_ib_force_disable(task
))
2236 task_clear_spec_ib_disable(task
);
2237 task_update_spec_tif(task
);
2239 case PR_SPEC_DISABLE
:
2240 case PR_SPEC_FORCE_DISABLE
:
2242 * Indirect branch speculation is always allowed when
2243 * mitigation is force disabled.
2245 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2246 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2249 if (!is_spec_ib_user_controlled())
2252 task_set_spec_ib_disable(task
);
2253 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
2254 task_set_spec_ib_force_disable(task
);
2255 task_update_spec_tif(task
);
2256 if (task
== current
)
2257 indirect_branch_prediction_barrier();
2265 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
2269 case PR_SPEC_STORE_BYPASS
:
2270 return ssb_prctl_set(task
, ctrl
);
2271 case PR_SPEC_INDIRECT_BRANCH
:
2272 return ib_prctl_set(task
, ctrl
);
2273 case PR_SPEC_L1D_FLUSH
:
2274 return l1d_flush_prctl_set(task
, ctrl
);
2280 #ifdef CONFIG_SECCOMP
2281 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
2283 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
2284 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2285 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2286 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
2287 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2291 static int l1d_flush_prctl_get(struct task_struct
*task
)
2293 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2294 return PR_SPEC_FORCE_DISABLE
;
2296 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
2297 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2299 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2302 static int ssb_prctl_get(struct task_struct
*task
)
2305 case SPEC_STORE_BYPASS_NONE
:
2306 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
2307 return PR_SPEC_ENABLE
;
2308 return PR_SPEC_NOT_AFFECTED
;
2309 case SPEC_STORE_BYPASS_DISABLE
:
2310 return PR_SPEC_DISABLE
;
2311 case SPEC_STORE_BYPASS_SECCOMP
:
2312 case SPEC_STORE_BYPASS_PRCTL
:
2313 if (task_spec_ssb_force_disable(task
))
2314 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2315 if (task_spec_ssb_noexec(task
))
2316 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
2317 if (task_spec_ssb_disable(task
))
2318 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2319 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2324 static int ib_prctl_get(struct task_struct
*task
)
2326 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
2327 return PR_SPEC_NOT_AFFECTED
;
2329 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2330 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2331 return PR_SPEC_ENABLE
;
2332 else if (is_spec_ib_user_controlled()) {
2333 if (task_spec_ib_force_disable(task
))
2334 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2335 if (task_spec_ib_disable(task
))
2336 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2337 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2338 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
2339 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2340 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
2341 return PR_SPEC_DISABLE
;
2343 return PR_SPEC_NOT_AFFECTED
;
2346 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
2349 case PR_SPEC_STORE_BYPASS
:
2350 return ssb_prctl_get(task
);
2351 case PR_SPEC_INDIRECT_BRANCH
:
2352 return ib_prctl_get(task
);
2353 case PR_SPEC_L1D_FLUSH
:
2354 return l1d_flush_prctl_get(task
);
2360 void x86_spec_ctrl_setup_ap(void)
2362 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
2363 update_spec_ctrl(x86_spec_ctrl_base
);
2365 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
2366 x86_amd_ssb_disable();
2369 bool itlb_multihit_kvm_mitigation
;
2370 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
2373 #define pr_fmt(fmt) "L1TF: " fmt
2375 /* Default mitigation for L1TF-affected CPUs */
2376 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
2377 #if IS_ENABLED(CONFIG_KVM_INTEL)
2378 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
2380 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
2381 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
2384 * These CPUs all support 44bits physical address space internally in the
2385 * cache but CPUID can report a smaller number of physical address bits.
2387 * The L1TF mitigation uses the top most address bit for the inversion of
2388 * non present PTEs. When the installed memory reaches into the top most
2389 * address bit due to memory holes, which has been observed on machines
2390 * which report 36bits physical address bits and have 32G RAM installed,
2391 * then the mitigation range check in l1tf_select_mitigation() triggers.
2392 * This is a false positive because the mitigation is still possible due to
2393 * the fact that the cache uses 44bit internally. Use the cache bits
2394 * instead of the reported physical bits and adjust them on the affected
2395 * machines to 44bit if the reported bits are less than 44.
2397 static void override_cache_bits(struct cpuinfo_x86
*c
)
2402 switch (c
->x86_model
) {
2403 case INTEL_FAM6_NEHALEM
:
2404 case INTEL_FAM6_WESTMERE
:
2405 case INTEL_FAM6_SANDYBRIDGE
:
2406 case INTEL_FAM6_IVYBRIDGE
:
2407 case INTEL_FAM6_HASWELL
:
2408 case INTEL_FAM6_HASWELL_L
:
2409 case INTEL_FAM6_HASWELL_G
:
2410 case INTEL_FAM6_BROADWELL
:
2411 case INTEL_FAM6_BROADWELL_G
:
2412 case INTEL_FAM6_SKYLAKE_L
:
2413 case INTEL_FAM6_SKYLAKE
:
2414 case INTEL_FAM6_KABYLAKE_L
:
2415 case INTEL_FAM6_KABYLAKE
:
2416 if (c
->x86_cache_bits
< 44)
2417 c
->x86_cache_bits
= 44;
2422 static void __init
l1tf_select_mitigation(void)
2426 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2429 if (cpu_mitigations_off())
2430 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2431 else if (cpu_mitigations_auto_nosmt())
2432 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2434 override_cache_bits(&boot_cpu_data
);
2436 switch (l1tf_mitigation
) {
2437 case L1TF_MITIGATION_OFF
:
2438 case L1TF_MITIGATION_FLUSH_NOWARN
:
2439 case L1TF_MITIGATION_FLUSH
:
2441 case L1TF_MITIGATION_FLUSH_NOSMT
:
2442 case L1TF_MITIGATION_FULL
:
2443 cpu_smt_disable(false);
2445 case L1TF_MITIGATION_FULL_FORCE
:
2446 cpu_smt_disable(true);
2450 #if CONFIG_PGTABLE_LEVELS == 2
2451 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2455 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
2456 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
2457 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
2458 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2459 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2461 pr_info("However, doing so will make a part of your RAM unusable.\n");
2462 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2466 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
2469 static int __init
l1tf_cmdline(char *str
)
2471 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2477 if (!strcmp(str
, "off"))
2478 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2479 else if (!strcmp(str
, "flush,nowarn"))
2480 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
2481 else if (!strcmp(str
, "flush"))
2482 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
2483 else if (!strcmp(str
, "flush,nosmt"))
2484 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2485 else if (!strcmp(str
, "full"))
2486 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
2487 else if (!strcmp(str
, "full,force"))
2488 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
2492 early_param("l1tf", l1tf_cmdline
);
2495 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2497 enum srso_mitigation
{
2498 SRSO_MITIGATION_NONE
,
2499 SRSO_MITIGATION_UCODE_NEEDED
,
2500 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
,
2501 SRSO_MITIGATION_MICROCODE
,
2502 SRSO_MITIGATION_SAFE_RET
,
2503 SRSO_MITIGATION_IBPB
,
2504 SRSO_MITIGATION_IBPB_ON_VMEXIT
,
2507 enum srso_mitigation_cmd
{
2512 SRSO_CMD_IBPB_ON_VMEXIT
,
2515 static const char * const srso_strings
[] = {
2516 [SRSO_MITIGATION_NONE
] = "Vulnerable",
2517 [SRSO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
2518 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
] = "Vulnerable: Safe RET, no microcode",
2519 [SRSO_MITIGATION_MICROCODE
] = "Vulnerable: Microcode, no safe RET",
2520 [SRSO_MITIGATION_SAFE_RET
] = "Mitigation: Safe RET",
2521 [SRSO_MITIGATION_IBPB
] = "Mitigation: IBPB",
2522 [SRSO_MITIGATION_IBPB_ON_VMEXIT
] = "Mitigation: IBPB on VMEXIT only"
2525 static enum srso_mitigation srso_mitigation __ro_after_init
= SRSO_MITIGATION_NONE
;
2526 static enum srso_mitigation_cmd srso_cmd __ro_after_init
= SRSO_CMD_SAFE_RET
;
2528 static int __init
srso_parse_cmdline(char *str
)
2533 if (!strcmp(str
, "off"))
2534 srso_cmd
= SRSO_CMD_OFF
;
2535 else if (!strcmp(str
, "microcode"))
2536 srso_cmd
= SRSO_CMD_MICROCODE
;
2537 else if (!strcmp(str
, "safe-ret"))
2538 srso_cmd
= SRSO_CMD_SAFE_RET
;
2539 else if (!strcmp(str
, "ibpb"))
2540 srso_cmd
= SRSO_CMD_IBPB
;
2541 else if (!strcmp(str
, "ibpb-vmexit"))
2542 srso_cmd
= SRSO_CMD_IBPB_ON_VMEXIT
;
2544 pr_err("Ignoring unknown SRSO option (%s).", str
);
2548 early_param("spec_rstack_overflow", srso_parse_cmdline
);
2550 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2552 static void __init
srso_select_mitigation(void)
2554 bool has_microcode
= boot_cpu_has(X86_FEATURE_IBPB_BRTYPE
);
2556 if (cpu_mitigations_off())
2559 if (!boot_cpu_has_bug(X86_BUG_SRSO
)) {
2560 if (boot_cpu_has(X86_FEATURE_SBPB
))
2561 x86_pred_cmd
= PRED_CMD_SBPB
;
2565 if (has_microcode
) {
2567 * Zen1/2 with SMT off aren't vulnerable after the right
2568 * IBPB microcode has been applied.
2570 * Zen1/2 don't have SBPB, no need to try to enable it here.
2572 if (boot_cpu_data
.x86
< 0x19 && !cpu_smt_possible()) {
2573 setup_force_cpu_cap(X86_FEATURE_SRSO_NO
);
2577 if (retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2578 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2582 pr_warn("IBPB-extending microcode not applied!\n");
2583 pr_warn(SRSO_NOTICE
);
2585 /* may be overwritten by SRSO_CMD_SAFE_RET below */
2586 srso_mitigation
= SRSO_MITIGATION_UCODE_NEEDED
;
2591 if (boot_cpu_has(X86_FEATURE_SBPB
))
2592 x86_pred_cmd
= PRED_CMD_SBPB
;
2595 case SRSO_CMD_MICROCODE
:
2596 if (has_microcode
) {
2597 srso_mitigation
= SRSO_MITIGATION_MICROCODE
;
2598 pr_warn(SRSO_NOTICE
);
2602 case SRSO_CMD_SAFE_RET
:
2603 if (IS_ENABLED(CONFIG_MITIGATION_SRSO
)) {
2605 * Enable the return thunk for generated code
2606 * like ftrace, static_call, etc.
2608 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
2609 setup_force_cpu_cap(X86_FEATURE_UNRET
);
2611 if (boot_cpu_data
.x86
== 0x19) {
2612 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS
);
2613 x86_return_thunk
= srso_alias_return_thunk
;
2615 setup_force_cpu_cap(X86_FEATURE_SRSO
);
2616 x86_return_thunk
= srso_return_thunk
;
2619 srso_mitigation
= SRSO_MITIGATION_SAFE_RET
;
2621 srso_mitigation
= SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
;
2623 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2628 if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
)) {
2629 if (has_microcode
) {
2630 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
2631 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2634 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2638 case SRSO_CMD_IBPB_ON_VMEXIT
:
2639 if (IS_ENABLED(CONFIG_MITIGATION_SRSO
)) {
2640 if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB
) && has_microcode
) {
2641 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
2642 srso_mitigation
= SRSO_MITIGATION_IBPB_ON_VMEXIT
;
2645 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2651 pr_info("%s\n", srso_strings
[srso_mitigation
]);
2655 #define pr_fmt(fmt) fmt
2659 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2661 #if IS_ENABLED(CONFIG_KVM_INTEL)
2662 static const char * const l1tf_vmx_states
[] = {
2663 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
2664 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
2665 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
2666 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
2667 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
2668 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
2671 static ssize_t
l1tf_show_state(char *buf
)
2673 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
2674 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2676 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
2677 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
2678 sched_smt_active())) {
2679 return sysfs_emit(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
2680 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
2683 return sysfs_emit(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
2684 l1tf_vmx_states
[l1tf_vmx_mitigation
],
2685 sched_smt_active() ? "vulnerable" : "disabled");
2688 static ssize_t
itlb_multihit_show_state(char *buf
)
2690 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2691 !boot_cpu_has(X86_FEATURE_VMX
))
2692 return sysfs_emit(buf
, "KVM: Mitigation: VMX unsupported\n");
2693 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
2694 return sysfs_emit(buf
, "KVM: Mitigation: VMX disabled\n");
2695 else if (itlb_multihit_kvm_mitigation
)
2696 return sysfs_emit(buf
, "KVM: Mitigation: Split huge pages\n");
2698 return sysfs_emit(buf
, "KVM: Vulnerable\n");
2701 static ssize_t
l1tf_show_state(char *buf
)
2703 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2706 static ssize_t
itlb_multihit_show_state(char *buf
)
2708 return sysfs_emit(buf
, "Processor vulnerable\n");
2712 static ssize_t
mds_show_state(char *buf
)
2714 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2715 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2716 mds_strings
[mds_mitigation
]);
2719 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
2720 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2721 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
2722 sched_smt_active() ? "mitigated" : "disabled"));
2725 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2726 sched_smt_active() ? "vulnerable" : "disabled");
2729 static ssize_t
tsx_async_abort_show_state(char *buf
)
2731 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
2732 (taa_mitigation
== TAA_MITIGATION_OFF
))
2733 return sysfs_emit(buf
, "%s\n", taa_strings
[taa_mitigation
]);
2735 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2736 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2737 taa_strings
[taa_mitigation
]);
2740 return sysfs_emit(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
2741 sched_smt_active() ? "vulnerable" : "disabled");
2744 static ssize_t
mmio_stale_data_show_state(char *buf
)
2746 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
2747 return sysfs_emit(buf
, "Unknown: No mitigations\n");
2749 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
2750 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
2752 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2753 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2754 mmio_strings
[mmio_mitigation
]);
2757 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
2758 sched_smt_active() ? "vulnerable" : "disabled");
2761 static ssize_t
rfds_show_state(char *buf
)
2763 return sysfs_emit(buf
, "%s\n", rfds_strings
[rfds_mitigation
]);
2766 static char *stibp_state(void)
2768 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
2769 !boot_cpu_has(X86_FEATURE_AUTOIBRS
))
2772 switch (spectre_v2_user_stibp
) {
2773 case SPECTRE_V2_USER_NONE
:
2774 return "; STIBP: disabled";
2775 case SPECTRE_V2_USER_STRICT
:
2776 return "; STIBP: forced";
2777 case SPECTRE_V2_USER_STRICT_PREFERRED
:
2778 return "; STIBP: always-on";
2779 case SPECTRE_V2_USER_PRCTL
:
2780 case SPECTRE_V2_USER_SECCOMP
:
2781 if (static_key_enabled(&switch_to_cond_stibp
))
2782 return "; STIBP: conditional";
2787 static char *ibpb_state(void)
2789 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
2790 if (static_key_enabled(&switch_mm_always_ibpb
))
2791 return "; IBPB: always-on";
2792 if (static_key_enabled(&switch_mm_cond_ibpb
))
2793 return "; IBPB: conditional";
2794 return "; IBPB: disabled";
2799 static char *pbrsb_eibrs_state(void)
2801 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
2802 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE
) ||
2803 boot_cpu_has(X86_FEATURE_RSB_VMEXIT
))
2804 return "; PBRSB-eIBRS: SW sequence";
2806 return "; PBRSB-eIBRS: Vulnerable";
2808 return "; PBRSB-eIBRS: Not affected";
2812 static const char *spectre_bhi_state(void)
2814 if (!boot_cpu_has_bug(X86_BUG_BHI
))
2815 return "; BHI: Not affected";
2816 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW
))
2817 return "; BHI: BHI_DIS_S";
2818 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP
))
2819 return "; BHI: SW loop, KVM: SW loop";
2820 else if (boot_cpu_has(X86_FEATURE_RETPOLINE
) &&
2821 !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA
))
2822 return "; BHI: Retpoline";
2823 else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
))
2824 return "; BHI: Syscall hardening, KVM: SW loop";
2826 return "; BHI: Vulnerable (Syscall hardening enabled)";
2829 static ssize_t
spectre_v2_show_state(char *buf
)
2831 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
2832 return sysfs_emit(buf
, "Vulnerable: LFENCE\n");
2834 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
2835 return sysfs_emit(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
2837 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2838 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
2839 return sysfs_emit(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2841 return sysfs_emit(buf
, "%s%s%s%s%s%s%s%s\n",
2842 spectre_v2_strings
[spectre_v2_enabled
],
2844 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? "; IBRS_FW" : "",
2846 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? "; RSB filling" : "",
2847 pbrsb_eibrs_state(),
2848 spectre_bhi_state(),
2849 /* this should always be at the end */
2850 spectre_v2_module_string());
2853 static ssize_t
srbds_show_state(char *buf
)
2855 return sysfs_emit(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
2858 static ssize_t
retbleed_show_state(char *buf
)
2860 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
2861 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2862 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
2863 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
2864 return sysfs_emit(buf
, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2866 return sysfs_emit(buf
, "%s; SMT %s\n", retbleed_strings
[retbleed_mitigation
],
2867 !sched_smt_active() ? "disabled" :
2868 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2869 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
?
2870 "enabled with STIBP protection" : "vulnerable");
2873 return sysfs_emit(buf
, "%s\n", retbleed_strings
[retbleed_mitigation
]);
2876 static ssize_t
srso_show_state(char *buf
)
2878 if (boot_cpu_has(X86_FEATURE_SRSO_NO
))
2879 return sysfs_emit(buf
, "Mitigation: SMT disabled\n");
2881 return sysfs_emit(buf
, "%s\n", srso_strings
[srso_mitigation
]);
2884 static ssize_t
gds_show_state(char *buf
)
2886 return sysfs_emit(buf
, "%s\n", gds_strings
[gds_mitigation
]);
2889 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
2890 char *buf
, unsigned int bug
)
2892 if (!boot_cpu_has_bug(bug
))
2893 return sysfs_emit(buf
, "Not affected\n");
2896 case X86_BUG_CPU_MELTDOWN
:
2897 if (boot_cpu_has(X86_FEATURE_PTI
))
2898 return sysfs_emit(buf
, "Mitigation: PTI\n");
2900 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
2901 return sysfs_emit(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2905 case X86_BUG_SPECTRE_V1
:
2906 return sysfs_emit(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
2908 case X86_BUG_SPECTRE_V2
:
2909 return spectre_v2_show_state(buf
);
2911 case X86_BUG_SPEC_STORE_BYPASS
:
2912 return sysfs_emit(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2915 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2916 return l1tf_show_state(buf
);
2920 return mds_show_state(buf
);
2923 return tsx_async_abort_show_state(buf
);
2925 case X86_BUG_ITLB_MULTIHIT
:
2926 return itlb_multihit_show_state(buf
);
2929 return srbds_show_state(buf
);
2931 case X86_BUG_MMIO_STALE_DATA
:
2932 case X86_BUG_MMIO_UNKNOWN
:
2933 return mmio_stale_data_show_state(buf
);
2935 case X86_BUG_RETBLEED
:
2936 return retbleed_show_state(buf
);
2939 return srso_show_state(buf
);
2942 return gds_show_state(buf
);
2945 return rfds_show_state(buf
);
2951 return sysfs_emit(buf
, "Vulnerable\n");
2954 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2956 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2959 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2961 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2964 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2966 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2969 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2971 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2974 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2976 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2979 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2981 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2984 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2986 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2989 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2991 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2994 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2996 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2999 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3001 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
3002 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_UNKNOWN
);
3004 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);
3007 ssize_t
cpu_show_retbleed(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3009 return cpu_show_common(dev
, attr
, buf
, X86_BUG_RETBLEED
);
3012 ssize_t
cpu_show_spec_rstack_overflow(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3014 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRSO
);
3017 ssize_t
cpu_show_gds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3019 return cpu_show_common(dev
, attr
, buf
, X86_BUG_GDS
);
3022 ssize_t
cpu_show_reg_file_data_sampling(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
3024 return cpu_show_common(dev
, attr
, buf
, X86_BUG_RFDS
);
3028 void __warn_thunk(void)
3030 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");