1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
37 static void __init
spectre_v1_select_mitigation(void);
38 static void __init
spectre_v2_select_mitigation(void);
39 static void __init
retbleed_select_mitigation(void);
40 static void __init
spectre_v2_user_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
md_clear_update_mitigation(void);
45 static void __init
md_clear_select_mitigation(void);
46 static void __init
taa_select_mitigation(void);
47 static void __init
mmio_select_mitigation(void);
48 static void __init
srbds_select_mitigation(void);
49 static void __init
l1d_flush_select_mitigation(void);
50 static void __init
srso_select_mitigation(void);
51 static void __init
gds_select_mitigation(void);
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base
;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64
, x86_spec_ctrl_current
);
59 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current
);
61 u64 x86_pred_cmd __ro_after_init
= PRED_CMD_IBPB
;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd
);
64 static DEFINE_MUTEX(spec_ctrl_mutex
);
66 void (*x86_return_thunk
)(void) __ro_after_init
= &__x86_return_thunk
;
68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
69 static void update_spec_ctrl(u64 val
)
71 this_cpu_write(x86_spec_ctrl_current
, val
);
72 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
76 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
77 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
79 void update_spec_ctrl_cond(u64 val
)
81 if (this_cpu_read(x86_spec_ctrl_current
) == val
)
84 this_cpu_write(x86_spec_ctrl_current
, val
);
87 * When KERNEL_IBRS this MSR is written on return-to-user, unless
88 * forced the update can be delayed until that time.
90 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS
))
91 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
94 noinstr u64
spec_ctrl_current(void)
96 return this_cpu_read(x86_spec_ctrl_current
);
98 EXPORT_SYMBOL_GPL(spec_ctrl_current
);
101 * AMD specific MSR info for Speculative Store Bypass control.
102 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
104 u64 __ro_after_init x86_amd_ls_cfg_base
;
105 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
107 /* Control conditional STIBP in switch_to() */
108 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
109 /* Control conditional IBPB in switch_mm() */
110 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
111 /* Control unconditional IBPB in switch_mm() */
112 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
114 /* Control MDS CPU buffer clear before returning to user space */
115 DEFINE_STATIC_KEY_FALSE(mds_user_clear
);
116 EXPORT_SYMBOL_GPL(mds_user_clear
);
117 /* Control MDS CPU buffer clear before idling (halt, mwait) */
118 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
119 EXPORT_SYMBOL_GPL(mds_idle_clear
);
122 * Controls whether l1d flush based mitigations are enabled,
123 * based on hw features and admin setting via boot parameter
126 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
128 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
129 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
130 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
132 void __init
cpu_select_mitigations(void)
135 * Read the SPEC_CTRL MSR to account for reserved bits which may
136 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
137 * init code as it is not enumerated and depends on the family.
139 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL
)) {
140 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
143 * Previously running kernel (kexec), may have some controls
144 * turned ON. Clear them and let the mitigations setup below
145 * rediscover them based on configuration.
147 x86_spec_ctrl_base
&= ~SPEC_CTRL_MITIGATIONS_MASK
;
150 /* Select the proper CPU mitigations before patching alternatives: */
151 spectre_v1_select_mitigation();
152 spectre_v2_select_mitigation();
154 * retbleed_select_mitigation() relies on the state set by
155 * spectre_v2_select_mitigation(); specifically it wants to know about
158 retbleed_select_mitigation();
160 * spectre_v2_user_select_mitigation() relies on the state set by
161 * retbleed_select_mitigation(); specifically the STIBP selection is
162 * forced for UNRET or IBPB.
164 spectre_v2_user_select_mitigation();
165 ssb_select_mitigation();
166 l1tf_select_mitigation();
167 md_clear_select_mitigation();
168 srbds_select_mitigation();
169 l1d_flush_select_mitigation();
172 * srso_select_mitigation() depends and must run after
173 * retbleed_select_mitigation().
175 srso_select_mitigation();
176 gds_select_mitigation();
180 * NOTE: This function is *only* called for SVM, since Intel uses
181 * MSR_IA32_SPEC_CTRL for SSBD.
184 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl
, bool setguest
)
186 u64 guestval
, hostval
;
187 struct thread_info
*ti
= current_thread_info();
190 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
191 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
193 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
194 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
198 * If the host has SSBD mitigation enabled, force it in the host's
199 * virtual MSR value. If its not permanently enabled, evaluate
200 * current's TIF_SSBD thread flag.
202 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
203 hostval
= SPEC_CTRL_SSBD
;
205 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
207 /* Sanitize the guest value */
208 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
210 if (hostval
!= guestval
) {
213 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
214 ssbd_spec_ctrl_to_tif(hostval
);
216 speculation_ctrl_update(tif
);
219 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
221 static void x86_amd_ssb_disable(void)
223 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
225 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
226 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
227 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
228 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
232 #define pr_fmt(fmt) "MDS: " fmt
234 /* Default mitigation for MDS-affected CPUs */
235 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
236 static bool mds_nosmt __ro_after_init
= false;
238 static const char * const mds_strings
[] = {
239 [MDS_MITIGATION_OFF
] = "Vulnerable",
240 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
241 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
244 static void __init
mds_select_mitigation(void)
246 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
247 mds_mitigation
= MDS_MITIGATION_OFF
;
251 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
252 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
253 mds_mitigation
= MDS_MITIGATION_VMWERV
;
255 static_branch_enable(&mds_user_clear
);
257 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
258 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
259 cpu_smt_disable(false);
263 static int __init
mds_cmdline(char *str
)
265 if (!boot_cpu_has_bug(X86_BUG_MDS
))
271 if (!strcmp(str
, "off"))
272 mds_mitigation
= MDS_MITIGATION_OFF
;
273 else if (!strcmp(str
, "full"))
274 mds_mitigation
= MDS_MITIGATION_FULL
;
275 else if (!strcmp(str
, "full,nosmt")) {
276 mds_mitigation
= MDS_MITIGATION_FULL
;
282 early_param("mds", mds_cmdline
);
285 #define pr_fmt(fmt) "TAA: " fmt
287 enum taa_mitigations
{
289 TAA_MITIGATION_UCODE_NEEDED
,
291 TAA_MITIGATION_TSX_DISABLED
,
294 /* Default mitigation for TAA-affected CPUs */
295 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
296 static bool taa_nosmt __ro_after_init
;
298 static const char * const taa_strings
[] = {
299 [TAA_MITIGATION_OFF
] = "Vulnerable",
300 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
301 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
302 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
305 static void __init
taa_select_mitigation(void)
309 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
310 taa_mitigation
= TAA_MITIGATION_OFF
;
314 /* TSX previously disabled by tsx=off */
315 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
316 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
320 if (cpu_mitigations_off()) {
321 taa_mitigation
= TAA_MITIGATION_OFF
;
326 * TAA mitigation via VERW is turned off if both
327 * tsx_async_abort=off and mds=off are specified.
329 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
330 mds_mitigation
== MDS_MITIGATION_OFF
)
333 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
334 taa_mitigation
= TAA_MITIGATION_VERW
;
336 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
339 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
340 * A microcode update fixes this behavior to clear CPU buffers. It also
341 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
342 * ARCH_CAP_TSX_CTRL_MSR bit.
344 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
345 * update is required.
347 ia32_cap
= x86_read_arch_cap_msr();
348 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
349 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
350 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
353 * TSX is enabled, select alternate mitigation for TAA which is
354 * the same as MDS. Enable MDS static branch to clear CPU buffers.
356 * For guests that can't determine whether the correct microcode is
357 * present on host, enable the mitigation for UCODE_NEEDED as well.
359 static_branch_enable(&mds_user_clear
);
361 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
362 cpu_smt_disable(false);
365 static int __init
tsx_async_abort_parse_cmdline(char *str
)
367 if (!boot_cpu_has_bug(X86_BUG_TAA
))
373 if (!strcmp(str
, "off")) {
374 taa_mitigation
= TAA_MITIGATION_OFF
;
375 } else if (!strcmp(str
, "full")) {
376 taa_mitigation
= TAA_MITIGATION_VERW
;
377 } else if (!strcmp(str
, "full,nosmt")) {
378 taa_mitigation
= TAA_MITIGATION_VERW
;
384 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
387 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
389 enum mmio_mitigations
{
391 MMIO_MITIGATION_UCODE_NEEDED
,
392 MMIO_MITIGATION_VERW
,
395 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
396 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
397 static bool mmio_nosmt __ro_after_init
= false;
399 static const char * const mmio_strings
[] = {
400 [MMIO_MITIGATION_OFF
] = "Vulnerable",
401 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
402 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
405 static void __init
mmio_select_mitigation(void)
409 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
410 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
) ||
411 cpu_mitigations_off()) {
412 mmio_mitigation
= MMIO_MITIGATION_OFF
;
416 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
419 ia32_cap
= x86_read_arch_cap_msr();
422 * Enable CPU buffer clear mitigation for host and VMM, if also affected
423 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
425 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
426 boot_cpu_has(X86_FEATURE_RTM
)))
427 static_branch_enable(&mds_user_clear
);
429 static_branch_enable(&mmio_stale_data_clear
);
432 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
433 * be propagated to uncore buffers, clearing the Fill buffers on idle
434 * is required irrespective of SMT state.
436 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
437 static_branch_enable(&mds_idle_clear
);
440 * Check if the system has the right microcode.
442 * CPU Fill buffer clear mitigation is enumerated by either an explicit
443 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
446 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
447 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
448 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
449 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
450 mmio_mitigation
= MMIO_MITIGATION_VERW
;
452 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
454 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
455 cpu_smt_disable(false);
458 static int __init
mmio_stale_data_parse_cmdline(char *str
)
460 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
466 if (!strcmp(str
, "off")) {
467 mmio_mitigation
= MMIO_MITIGATION_OFF
;
468 } else if (!strcmp(str
, "full")) {
469 mmio_mitigation
= MMIO_MITIGATION_VERW
;
470 } else if (!strcmp(str
, "full,nosmt")) {
471 mmio_mitigation
= MMIO_MITIGATION_VERW
;
477 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
480 #define pr_fmt(fmt) "" fmt
482 static void __init
md_clear_update_mitigation(void)
484 if (cpu_mitigations_off())
487 if (!static_key_enabled(&mds_user_clear
))
491 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
492 * mitigation, if necessary.
494 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
495 boot_cpu_has_bug(X86_BUG_MDS
)) {
496 mds_mitigation
= MDS_MITIGATION_FULL
;
497 mds_select_mitigation();
499 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
500 boot_cpu_has_bug(X86_BUG_TAA
)) {
501 taa_mitigation
= TAA_MITIGATION_VERW
;
502 taa_select_mitigation();
504 if (mmio_mitigation
== MMIO_MITIGATION_OFF
&&
505 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
506 mmio_mitigation
= MMIO_MITIGATION_VERW
;
507 mmio_select_mitigation();
510 if (boot_cpu_has_bug(X86_BUG_MDS
))
511 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
512 if (boot_cpu_has_bug(X86_BUG_TAA
))
513 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
514 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
515 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
516 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
517 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
520 static void __init
md_clear_select_mitigation(void)
522 mds_select_mitigation();
523 taa_select_mitigation();
524 mmio_select_mitigation();
527 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
528 * and print their mitigation after MDS, TAA and MMIO Stale Data
529 * mitigation selection is done.
531 md_clear_update_mitigation();
535 #define pr_fmt(fmt) "SRBDS: " fmt
537 enum srbds_mitigations
{
538 SRBDS_MITIGATION_OFF
,
539 SRBDS_MITIGATION_UCODE_NEEDED
,
540 SRBDS_MITIGATION_FULL
,
541 SRBDS_MITIGATION_TSX_OFF
,
542 SRBDS_MITIGATION_HYPERVISOR
,
545 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
547 static const char * const srbds_strings
[] = {
548 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
549 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
550 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
551 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
552 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
555 static bool srbds_off
;
557 void update_srbds_msr(void)
561 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
564 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
567 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
571 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
572 * being disabled and it hasn't received the SRBDS MSR microcode.
574 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
577 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
579 switch (srbds_mitigation
) {
580 case SRBDS_MITIGATION_OFF
:
581 case SRBDS_MITIGATION_TSX_OFF
:
582 mcu_ctrl
|= RNGDS_MITG_DIS
;
584 case SRBDS_MITIGATION_FULL
:
585 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
591 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
594 static void __init
srbds_select_mitigation(void)
598 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
602 * Check to see if this is one of the MDS_NO systems supporting TSX that
603 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
604 * by Processor MMIO Stale Data vulnerability.
606 ia32_cap
= x86_read_arch_cap_msr();
607 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
) &&
608 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
609 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
610 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
611 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
612 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
613 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
614 else if (cpu_mitigations_off() || srbds_off
)
615 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
618 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
621 static int __init
srbds_parse_cmdline(char *str
)
626 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
629 srbds_off
= !strcmp(str
, "off");
632 early_param("srbds", srbds_parse_cmdline
);
635 #define pr_fmt(fmt) "L1D Flush : " fmt
637 enum l1d_flush_mitigations
{
642 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
644 static void __init
l1d_flush_select_mitigation(void)
646 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
649 static_branch_enable(&switch_mm_cond_l1d_flush
);
650 pr_info("Conditional flush on switch_mm() enabled\n");
653 static int __init
l1d_flush_parse_cmdline(char *str
)
655 if (!strcmp(str
, "on"))
656 l1d_flush_mitigation
= L1D_FLUSH_ON
;
660 early_param("l1d_flush", l1d_flush_parse_cmdline
);
663 #define pr_fmt(fmt) "GDS: " fmt
665 enum gds_mitigations
{
667 GDS_MITIGATION_UCODE_NEEDED
,
668 GDS_MITIGATION_FORCE
,
670 GDS_MITIGATION_FULL_LOCKED
,
671 GDS_MITIGATION_HYPERVISOR
,
674 #if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION)
675 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FORCE
;
677 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FULL
;
680 static const char * const gds_strings
[] = {
681 [GDS_MITIGATION_OFF
] = "Vulnerable",
682 [GDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
683 [GDS_MITIGATION_FORCE
] = "Mitigation: AVX disabled, no microcode",
684 [GDS_MITIGATION_FULL
] = "Mitigation: Microcode",
685 [GDS_MITIGATION_FULL_LOCKED
] = "Mitigation: Microcode (locked)",
686 [GDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
689 bool gds_ucode_mitigated(void)
691 return (gds_mitigation
== GDS_MITIGATION_FULL
||
692 gds_mitigation
== GDS_MITIGATION_FULL_LOCKED
);
694 EXPORT_SYMBOL_GPL(gds_ucode_mitigated
);
696 void update_gds_msr(void)
701 switch (gds_mitigation
) {
702 case GDS_MITIGATION_OFF
:
703 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
704 mcu_ctrl
|= GDS_MITG_DIS
;
706 case GDS_MITIGATION_FULL_LOCKED
:
708 * The LOCKED state comes from the boot CPU. APs might not have
709 * the same state. Make sure the mitigation is enabled on all
712 case GDS_MITIGATION_FULL
:
713 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
714 mcu_ctrl
&= ~GDS_MITG_DIS
;
716 case GDS_MITIGATION_FORCE
:
717 case GDS_MITIGATION_UCODE_NEEDED
:
718 case GDS_MITIGATION_HYPERVISOR
:
722 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
725 * Check to make sure that the WRMSR value was not ignored. Writes to
726 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
729 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl_after
);
730 WARN_ON_ONCE(mcu_ctrl
!= mcu_ctrl_after
);
733 static void __init
gds_select_mitigation(void)
737 if (!boot_cpu_has_bug(X86_BUG_GDS
))
740 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
741 gds_mitigation
= GDS_MITIGATION_HYPERVISOR
;
745 if (cpu_mitigations_off())
746 gds_mitigation
= GDS_MITIGATION_OFF
;
747 /* Will verify below that mitigation _can_ be disabled */
750 if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL
)) {
751 if (gds_mitigation
== GDS_MITIGATION_FORCE
) {
753 * This only needs to be done on the boot CPU so do it
754 * here rather than in update_gds_msr()
756 setup_clear_cpu_cap(X86_FEATURE_AVX
);
757 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
759 gds_mitigation
= GDS_MITIGATION_UCODE_NEEDED
;
764 /* Microcode has mitigation, use it */
765 if (gds_mitigation
== GDS_MITIGATION_FORCE
)
766 gds_mitigation
= GDS_MITIGATION_FULL
;
768 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
769 if (mcu_ctrl
& GDS_MITG_LOCKED
) {
770 if (gds_mitigation
== GDS_MITIGATION_OFF
)
771 pr_warn("Mitigation locked. Disable failed.\n");
774 * The mitigation is selected from the boot CPU. All other CPUs
775 * _should_ have the same state. If the boot CPU isn't locked
776 * but others are then update_gds_msr() will WARN() of the state
777 * mismatch. If the boot CPU is locked update_gds_msr() will
778 * ensure the other CPUs have the mitigation enabled.
780 gds_mitigation
= GDS_MITIGATION_FULL_LOCKED
;
785 pr_info("%s\n", gds_strings
[gds_mitigation
]);
788 static int __init
gds_parse_cmdline(char *str
)
793 if (!boot_cpu_has_bug(X86_BUG_GDS
))
796 if (!strcmp(str
, "off"))
797 gds_mitigation
= GDS_MITIGATION_OFF
;
798 else if (!strcmp(str
, "force"))
799 gds_mitigation
= GDS_MITIGATION_FORCE
;
803 early_param("gather_data_sampling", gds_parse_cmdline
);
806 #define pr_fmt(fmt) "Spectre V1 : " fmt
808 enum spectre_v1_mitigation
{
809 SPECTRE_V1_MITIGATION_NONE
,
810 SPECTRE_V1_MITIGATION_AUTO
,
813 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
814 SPECTRE_V1_MITIGATION_AUTO
;
816 static const char * const spectre_v1_strings
[] = {
817 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
818 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
822 * Does SMAP provide full mitigation against speculative kernel access to
825 static bool smap_works_speculatively(void)
827 if (!boot_cpu_has(X86_FEATURE_SMAP
))
831 * On CPUs which are vulnerable to Meltdown, SMAP does not
832 * prevent speculative access to user data in the L1 cache.
833 * Consider SMAP to be non-functional as a mitigation on these
836 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
842 static void __init
spectre_v1_select_mitigation(void)
844 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
845 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
849 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
851 * With Spectre v1, a user can speculatively control either
852 * path of a conditional swapgs with a user-controlled GS
853 * value. The mitigation is to add lfences to both code paths.
855 * If FSGSBASE is enabled, the user can put a kernel address in
856 * GS, in which case SMAP provides no protection.
858 * If FSGSBASE is disabled, the user can only put a user space
859 * address in GS. That makes an attack harder, but still
860 * possible if there's no SMAP protection.
862 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
863 !smap_works_speculatively()) {
865 * Mitigation can be provided from SWAPGS itself or
866 * PTI as the CR3 write in the Meltdown mitigation
869 * If neither is there, mitigate with an LFENCE to
870 * stop speculation through swapgs.
872 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
873 !boot_cpu_has(X86_FEATURE_PTI
))
874 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
877 * Enable lfences in the kernel entry (non-swapgs)
878 * paths, to prevent user entry from speculatively
881 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
885 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
888 static int __init
nospectre_v1_cmdline(char *str
)
890 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
893 early_param("nospectre_v1", nospectre_v1_cmdline
);
895 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
= SPECTRE_V2_NONE
;
898 #define pr_fmt(fmt) "RETBleed: " fmt
900 enum retbleed_mitigation
{
901 RETBLEED_MITIGATION_NONE
,
902 RETBLEED_MITIGATION_UNRET
,
903 RETBLEED_MITIGATION_IBPB
,
904 RETBLEED_MITIGATION_IBRS
,
905 RETBLEED_MITIGATION_EIBRS
,
906 RETBLEED_MITIGATION_STUFF
,
909 enum retbleed_mitigation_cmd
{
917 static const char * const retbleed_strings
[] = {
918 [RETBLEED_MITIGATION_NONE
] = "Vulnerable",
919 [RETBLEED_MITIGATION_UNRET
] = "Mitigation: untrained return thunk",
920 [RETBLEED_MITIGATION_IBPB
] = "Mitigation: IBPB",
921 [RETBLEED_MITIGATION_IBRS
] = "Mitigation: IBRS",
922 [RETBLEED_MITIGATION_EIBRS
] = "Mitigation: Enhanced IBRS",
923 [RETBLEED_MITIGATION_STUFF
] = "Mitigation: Stuffing",
926 static enum retbleed_mitigation retbleed_mitigation __ro_after_init
=
927 RETBLEED_MITIGATION_NONE
;
928 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init
=
931 static int __ro_after_init retbleed_nosmt
= false;
933 static int __init
retbleed_parse_cmdline(char *str
)
939 char *next
= strchr(str
, ',');
945 if (!strcmp(str
, "off")) {
946 retbleed_cmd
= RETBLEED_CMD_OFF
;
947 } else if (!strcmp(str
, "auto")) {
948 retbleed_cmd
= RETBLEED_CMD_AUTO
;
949 } else if (!strcmp(str
, "unret")) {
950 retbleed_cmd
= RETBLEED_CMD_UNRET
;
951 } else if (!strcmp(str
, "ibpb")) {
952 retbleed_cmd
= RETBLEED_CMD_IBPB
;
953 } else if (!strcmp(str
, "stuff")) {
954 retbleed_cmd
= RETBLEED_CMD_STUFF
;
955 } else if (!strcmp(str
, "nosmt")) {
956 retbleed_nosmt
= true;
957 } else if (!strcmp(str
, "force")) {
958 setup_force_cpu_bug(X86_BUG_RETBLEED
);
960 pr_err("Ignoring unknown retbleed option (%s).", str
);
968 early_param("retbleed", retbleed_parse_cmdline
);
970 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
971 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
973 static void __init
retbleed_select_mitigation(void)
975 bool mitigate_smt
= false;
977 if (!boot_cpu_has_bug(X86_BUG_RETBLEED
) || cpu_mitigations_off())
980 switch (retbleed_cmd
) {
981 case RETBLEED_CMD_OFF
:
984 case RETBLEED_CMD_UNRET
:
985 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY
)) {
986 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
988 pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
993 case RETBLEED_CMD_IBPB
:
994 if (!boot_cpu_has(X86_FEATURE_IBPB
)) {
995 pr_err("WARNING: CPU does not support IBPB.\n");
997 } else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY
)) {
998 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
1000 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
1005 case RETBLEED_CMD_STUFF
:
1006 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING
) &&
1007 spectre_v2_enabled
== SPECTRE_V2_RETPOLINE
) {
1008 retbleed_mitigation
= RETBLEED_MITIGATION_STUFF
;
1011 if (IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING
))
1012 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1014 pr_err("WARNING: kernel not compiled with CALL_DEPTH_TRACKING.\n");
1021 case RETBLEED_CMD_AUTO
:
1023 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1024 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
) {
1025 if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY
))
1026 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
1027 else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY
) && boot_cpu_has(X86_FEATURE_IBPB
))
1028 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
1032 * The Intel mitigation (IBRS or eIBRS) was already selected in
1033 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
1034 * be set accordingly below.
1040 switch (retbleed_mitigation
) {
1041 case RETBLEED_MITIGATION_UNRET
:
1042 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1043 setup_force_cpu_cap(X86_FEATURE_UNRET
);
1045 if (IS_ENABLED(CONFIG_RETHUNK
))
1046 x86_return_thunk
= retbleed_return_thunk
;
1048 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
1049 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
1050 pr_err(RETBLEED_UNTRAIN_MSG
);
1052 mitigate_smt
= true;
1055 case RETBLEED_MITIGATION_IBPB
:
1056 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
1057 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
1058 mitigate_smt
= true;
1061 case RETBLEED_MITIGATION_STUFF
:
1062 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1063 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH
);
1064 x86_set_skl_return_thunk();
1071 if (mitigate_smt
&& !boot_cpu_has(X86_FEATURE_STIBP
) &&
1072 (retbleed_nosmt
|| cpu_mitigations_auto_nosmt()))
1073 cpu_smt_disable(false);
1076 * Let IBRS trump all on Intel without affecting the effects of the
1077 * retbleed= cmdline option except for call depth based stuffing
1079 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1080 switch (spectre_v2_enabled
) {
1081 case SPECTRE_V2_IBRS
:
1082 retbleed_mitigation
= RETBLEED_MITIGATION_IBRS
;
1084 case SPECTRE_V2_EIBRS
:
1085 case SPECTRE_V2_EIBRS_RETPOLINE
:
1086 case SPECTRE_V2_EIBRS_LFENCE
:
1087 retbleed_mitigation
= RETBLEED_MITIGATION_EIBRS
;
1090 if (retbleed_mitigation
!= RETBLEED_MITIGATION_STUFF
)
1091 pr_err(RETBLEED_INTEL_MSG
);
1095 pr_info("%s\n", retbleed_strings
[retbleed_mitigation
]);
1099 #define pr_fmt(fmt) "Spectre V2 : " fmt
1101 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
1102 SPECTRE_V2_USER_NONE
;
1103 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
1104 SPECTRE_V2_USER_NONE
;
1106 #ifdef CONFIG_RETPOLINE
1107 static bool spectre_v2_bad_module
;
1109 bool retpoline_module_ok(bool has_retpoline
)
1111 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
1114 pr_err("System may be vulnerable to spectre v2\n");
1115 spectre_v2_bad_module
= true;
1119 static inline const char *spectre_v2_module_string(void)
1121 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
1124 static inline const char *spectre_v2_module_string(void) { return ""; }
1127 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1128 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1129 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1130 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1132 #ifdef CONFIG_BPF_SYSCALL
1133 void unpriv_ebpf_notify(int new_state
)
1138 /* Unprivileged eBPF is enabled */
1140 switch (spectre_v2_enabled
) {
1141 case SPECTRE_V2_EIBRS
:
1142 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1144 case SPECTRE_V2_EIBRS_LFENCE
:
1145 if (sched_smt_active())
1146 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1154 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
1156 int len
= strlen(opt
);
1158 return len
== arglen
&& !strncmp(arg
, opt
, len
);
1161 /* The kernel command line selection for spectre v2 */
1162 enum spectre_v2_mitigation_cmd
{
1163 SPECTRE_V2_CMD_NONE
,
1164 SPECTRE_V2_CMD_AUTO
,
1165 SPECTRE_V2_CMD_FORCE
,
1166 SPECTRE_V2_CMD_RETPOLINE
,
1167 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
1168 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
1169 SPECTRE_V2_CMD_EIBRS
,
1170 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
1171 SPECTRE_V2_CMD_EIBRS_LFENCE
,
1172 SPECTRE_V2_CMD_IBRS
,
1175 enum spectre_v2_user_cmd
{
1176 SPECTRE_V2_USER_CMD_NONE
,
1177 SPECTRE_V2_USER_CMD_AUTO
,
1178 SPECTRE_V2_USER_CMD_FORCE
,
1179 SPECTRE_V2_USER_CMD_PRCTL
,
1180 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
1181 SPECTRE_V2_USER_CMD_SECCOMP
,
1182 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
1185 static const char * const spectre_v2_user_strings
[] = {
1186 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
1187 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
1188 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
1189 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
1190 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
1193 static const struct {
1195 enum spectre_v2_user_cmd cmd
;
1197 } v2_user_options
[] __initconst
= {
1198 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
1199 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
1200 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
1201 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
1202 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
1203 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
1204 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
1207 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
1209 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1210 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
1213 static __ro_after_init
enum spectre_v2_mitigation_cmd spectre_v2_cmd
;
1215 static enum spectre_v2_user_cmd __init
1216 spectre_v2_parse_user_cmdline(void)
1221 switch (spectre_v2_cmd
) {
1222 case SPECTRE_V2_CMD_NONE
:
1223 return SPECTRE_V2_USER_CMD_NONE
;
1224 case SPECTRE_V2_CMD_FORCE
:
1225 return SPECTRE_V2_USER_CMD_FORCE
;
1230 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
1233 return SPECTRE_V2_USER_CMD_AUTO
;
1235 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
1236 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
1237 spec_v2_user_print_cond(v2_user_options
[i
].option
,
1238 v2_user_options
[i
].secure
);
1239 return v2_user_options
[i
].cmd
;
1243 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
1244 return SPECTRE_V2_USER_CMD_AUTO
;
1247 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode
)
1249 return spectre_v2_in_eibrs_mode(mode
) || mode
== SPECTRE_V2_IBRS
;
1253 spectre_v2_user_select_mitigation(void)
1255 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
1256 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
1257 enum spectre_v2_user_cmd cmd
;
1259 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
1262 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
1263 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
1264 smt_possible
= false;
1266 cmd
= spectre_v2_parse_user_cmdline();
1268 case SPECTRE_V2_USER_CMD_NONE
:
1270 case SPECTRE_V2_USER_CMD_FORCE
:
1271 mode
= SPECTRE_V2_USER_STRICT
;
1273 case SPECTRE_V2_USER_CMD_AUTO
:
1274 case SPECTRE_V2_USER_CMD_PRCTL
:
1275 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1276 mode
= SPECTRE_V2_USER_PRCTL
;
1278 case SPECTRE_V2_USER_CMD_SECCOMP
:
1279 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1280 if (IS_ENABLED(CONFIG_SECCOMP
))
1281 mode
= SPECTRE_V2_USER_SECCOMP
;
1283 mode
= SPECTRE_V2_USER_PRCTL
;
1287 /* Initialize Indirect Branch Prediction Barrier */
1288 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1289 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
1291 spectre_v2_user_ibpb
= mode
;
1293 case SPECTRE_V2_USER_CMD_FORCE
:
1294 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1295 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1296 static_branch_enable(&switch_mm_always_ibpb
);
1297 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
1299 case SPECTRE_V2_USER_CMD_PRCTL
:
1300 case SPECTRE_V2_USER_CMD_AUTO
:
1301 case SPECTRE_V2_USER_CMD_SECCOMP
:
1302 static_branch_enable(&switch_mm_cond_ibpb
);
1308 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1309 static_key_enabled(&switch_mm_always_ibpb
) ?
1310 "always-on" : "conditional");
1314 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1317 * Intel's Enhanced IBRS also protects against cross-thread branch target
1318 * injection in user-mode as the IBRS bit remains always set which
1319 * implicitly enables cross-thread protections. However, in legacy IBRS
1320 * mode, the IBRS bit is set only on kernel entry and cleared on return
1321 * to userspace. AMD Automatic IBRS also does not protect userspace.
1322 * These modes therefore disable the implicit cross-thread protection,
1323 * so allow for STIBP to be selected in those cases.
1325 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
1327 (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
1328 !boot_cpu_has(X86_FEATURE_AUTOIBRS
)))
1332 * At this point, an STIBP mode other than "off" has been set.
1333 * If STIBP support is not being forced, check if STIBP always-on
1336 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1337 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
1338 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1340 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
1341 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
1342 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1343 mode
!= SPECTRE_V2_USER_STRICT_PREFERRED
)
1344 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1345 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1348 spectre_v2_user_stibp
= mode
;
1351 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
1354 static const char * const spectre_v2_strings
[] = {
1355 [SPECTRE_V2_NONE
] = "Vulnerable",
1356 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
1357 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
1358 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced / Automatic IBRS",
1359 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1360 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1361 [SPECTRE_V2_IBRS
] = "Mitigation: IBRS",
1364 static const struct {
1366 enum spectre_v2_mitigation_cmd cmd
;
1368 } mitigation_options
[] __initconst
= {
1369 { "off", SPECTRE_V2_CMD_NONE
, false },
1370 { "on", SPECTRE_V2_CMD_FORCE
, true },
1371 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1372 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1373 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1374 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1375 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1376 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1377 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1378 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1379 { "ibrs", SPECTRE_V2_CMD_IBRS
, false },
1382 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1384 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1385 pr_info("%s selected on command line.\n", reason
);
1388 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1390 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1394 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1395 cpu_mitigations_off())
1396 return SPECTRE_V2_CMD_NONE
;
1398 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1400 return SPECTRE_V2_CMD_AUTO
;
1402 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1403 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1405 cmd
= mitigation_options
[i
].cmd
;
1409 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1410 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1411 return SPECTRE_V2_CMD_AUTO
;
1414 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1415 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1416 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1417 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1418 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1419 !IS_ENABLED(CONFIG_RETPOLINE
)) {
1420 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1421 mitigation_options
[i
].option
);
1422 return SPECTRE_V2_CMD_AUTO
;
1425 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1426 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1427 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1428 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1429 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1430 mitigation_options
[i
].option
);
1431 return SPECTRE_V2_CMD_AUTO
;
1434 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1435 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1436 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1437 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1438 mitigation_options
[i
].option
);
1439 return SPECTRE_V2_CMD_AUTO
;
1442 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !IS_ENABLED(CONFIG_CPU_IBRS_ENTRY
)) {
1443 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1444 mitigation_options
[i
].option
);
1445 return SPECTRE_V2_CMD_AUTO
;
1448 if (cmd
== SPECTRE_V2_CMD_IBRS
&& boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
1449 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1450 mitigation_options
[i
].option
);
1451 return SPECTRE_V2_CMD_AUTO
;
1454 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !boot_cpu_has(X86_FEATURE_IBRS
)) {
1455 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1456 mitigation_options
[i
].option
);
1457 return SPECTRE_V2_CMD_AUTO
;
1460 if (cmd
== SPECTRE_V2_CMD_IBRS
&& cpu_feature_enabled(X86_FEATURE_XENPV
)) {
1461 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1462 mitigation_options
[i
].option
);
1463 return SPECTRE_V2_CMD_AUTO
;
1466 spec_v2_print_cond(mitigation_options
[i
].option
,
1467 mitigation_options
[i
].secure
);
1471 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1473 if (!IS_ENABLED(CONFIG_RETPOLINE
)) {
1474 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1475 return SPECTRE_V2_NONE
;
1478 return SPECTRE_V2_RETPOLINE
;
1481 /* Disable in-kernel use of non-RSB RET predictors */
1482 static void __init
spec_ctrl_disable_kernel_rrsba(void)
1486 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL
))
1489 ia32_cap
= x86_read_arch_cap_msr();
1491 if (ia32_cap
& ARCH_CAP_RRSBA
) {
1492 x86_spec_ctrl_base
|= SPEC_CTRL_RRSBA_DIS_S
;
1493 update_spec_ctrl(x86_spec_ctrl_base
);
1497 static void __init
spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode
)
1500 * Similar to context switches, there are two types of RSB attacks
1505 * 2) Poisoned RSB entry
1507 * When retpoline is enabled, both are mitigated by filling/clearing
1510 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1511 * prediction isolation protections, RSB still needs to be cleared
1512 * because of #2. Note that SMEP provides no protection here, unlike
1513 * user-space-poisoned RSB entries.
1515 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1516 * bug is present then a LITE version of RSB protection is required,
1517 * just a single call needs to retire before a RET is executed.
1520 case SPECTRE_V2_NONE
:
1523 case SPECTRE_V2_EIBRS_LFENCE
:
1524 case SPECTRE_V2_EIBRS
:
1525 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
1526 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE
);
1527 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1531 case SPECTRE_V2_EIBRS_RETPOLINE
:
1532 case SPECTRE_V2_RETPOLINE
:
1533 case SPECTRE_V2_LFENCE
:
1534 case SPECTRE_V2_IBRS
:
1535 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT
);
1536 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1540 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1544 static void __init
spectre_v2_select_mitigation(void)
1546 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1547 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1550 * If the CPU is not affected and the command line mode is NONE or AUTO
1551 * then nothing to do.
1553 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1554 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1558 case SPECTRE_V2_CMD_NONE
:
1561 case SPECTRE_V2_CMD_FORCE
:
1562 case SPECTRE_V2_CMD_AUTO
:
1563 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1564 mode
= SPECTRE_V2_EIBRS
;
1568 if (IS_ENABLED(CONFIG_CPU_IBRS_ENTRY
) &&
1569 boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1570 retbleed_cmd
!= RETBLEED_CMD_OFF
&&
1571 retbleed_cmd
!= RETBLEED_CMD_STUFF
&&
1572 boot_cpu_has(X86_FEATURE_IBRS
) &&
1573 boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1574 mode
= SPECTRE_V2_IBRS
;
1578 mode
= spectre_v2_select_retpoline();
1581 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1582 pr_err(SPECTRE_V2_LFENCE_MSG
);
1583 mode
= SPECTRE_V2_LFENCE
;
1586 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1587 mode
= SPECTRE_V2_RETPOLINE
;
1590 case SPECTRE_V2_CMD_RETPOLINE
:
1591 mode
= spectre_v2_select_retpoline();
1594 case SPECTRE_V2_CMD_IBRS
:
1595 mode
= SPECTRE_V2_IBRS
;
1598 case SPECTRE_V2_CMD_EIBRS
:
1599 mode
= SPECTRE_V2_EIBRS
;
1602 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1603 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1606 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1607 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1611 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1612 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1614 if (spectre_v2_in_ibrs_mode(mode
)) {
1615 if (boot_cpu_has(X86_FEATURE_AUTOIBRS
)) {
1616 msr_set_bit(MSR_EFER
, _EFER_AUTOIBRS
);
1618 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1619 update_spec_ctrl(x86_spec_ctrl_base
);
1624 case SPECTRE_V2_NONE
:
1625 case SPECTRE_V2_EIBRS
:
1628 case SPECTRE_V2_IBRS
:
1629 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS
);
1630 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
))
1631 pr_warn(SPECTRE_V2_IBRS_PERF_MSG
);
1634 case SPECTRE_V2_LFENCE
:
1635 case SPECTRE_V2_EIBRS_LFENCE
:
1636 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1639 case SPECTRE_V2_RETPOLINE
:
1640 case SPECTRE_V2_EIBRS_RETPOLINE
:
1641 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1646 * Disable alternate RSB predictions in kernel when indirect CALLs and
1647 * JMPs gets protection against BHI and Intramode-BTI, but RET
1648 * prediction from a non-RSB predictor is still a risk.
1650 if (mode
== SPECTRE_V2_EIBRS_LFENCE
||
1651 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
1652 mode
== SPECTRE_V2_RETPOLINE
)
1653 spec_ctrl_disable_kernel_rrsba();
1655 spectre_v2_enabled
= mode
;
1656 pr_info("%s\n", spectre_v2_strings
[mode
]);
1659 * If Spectre v2 protection has been enabled, fill the RSB during a
1660 * context switch. In general there are two types of RSB attacks
1661 * across context switches, for which the CALLs/RETs may be unbalanced.
1665 * Some Intel parts have "bottomless RSB". When the RSB is empty,
1666 * speculated return targets may come from the branch predictor,
1667 * which could have a user-poisoned BTB or BHB entry.
1669 * AMD has it even worse: *all* returns are speculated from the BTB,
1670 * regardless of the state of the RSB.
1672 * When IBRS or eIBRS is enabled, the "user -> kernel" attack
1673 * scenario is mitigated by the IBRS branch prediction isolation
1674 * properties, so the RSB buffer filling wouldn't be necessary to
1675 * protect against this type of attack.
1677 * The "user -> user" attack scenario is mitigated by RSB filling.
1679 * 2) Poisoned RSB entry
1681 * If the 'next' in-kernel return stack is shorter than 'prev',
1682 * 'next' could be tricked into speculating with a user-poisoned RSB
1685 * The "user -> kernel" attack scenario is mitigated by SMEP and
1688 * The "user -> user" scenario, also known as SpectreBHB, requires
1691 * So to mitigate all cases, unconditionally fill RSB on context
1694 * FIXME: Is this pointless for retbleed-affected AMD?
1696 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1697 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1699 spectre_v2_determine_rsb_fill_type_at_vmexit(mode
);
1702 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
1703 * and Enhanced IBRS protect firmware too, so enable IBRS around
1704 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1705 * otherwise enabled.
1707 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1708 * the user might select retpoline on the kernel command line and if
1709 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1710 * enable IBRS around firmware calls.
1712 if (boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1713 boot_cpu_has(X86_FEATURE_IBPB
) &&
1714 (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1715 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
)) {
1717 if (retbleed_cmd
!= RETBLEED_CMD_IBPB
) {
1718 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW
);
1719 pr_info("Enabling Speculation Barrier for firmware calls\n");
1722 } else if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_ibrs_mode(mode
)) {
1723 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1724 pr_info("Enabling Restricted Speculation for firmware calls\n");
1727 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1728 spectre_v2_cmd
= cmd
;
1731 static void update_stibp_msr(void * __unused
)
1733 u64 val
= spec_ctrl_current() | (x86_spec_ctrl_base
& SPEC_CTRL_STIBP
);
1734 update_spec_ctrl(val
);
1737 /* Update x86_spec_ctrl_base in case SMT state changed. */
1738 static void update_stibp_strict(void)
1740 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1742 if (sched_smt_active())
1743 mask
|= SPEC_CTRL_STIBP
;
1745 if (mask
== x86_spec_ctrl_base
)
1748 pr_info("Update user space SMT mitigation: STIBP %s\n",
1749 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1750 x86_spec_ctrl_base
= mask
;
1751 on_each_cpu(update_stibp_msr
, NULL
, 1);
1754 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1755 static void update_indir_branch_cond(void)
1757 if (sched_smt_active())
1758 static_branch_enable(&switch_to_cond_stibp
);
1760 static_branch_disable(&switch_to_cond_stibp
);
1764 #define pr_fmt(fmt) fmt
1766 /* Update the static key controlling the MDS CPU buffer clear in idle */
1767 static void update_mds_branch_idle(void)
1769 u64 ia32_cap
= x86_read_arch_cap_msr();
1772 * Enable the idle clearing if SMT is active on CPUs which are
1773 * affected only by MSBDS and not any other MDS variant.
1775 * The other variants cannot be mitigated when SMT is enabled, so
1776 * clearing the buffers on idle just to prevent the Store Buffer
1777 * repartitioning leak would be a window dressing exercise.
1779 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1782 if (sched_smt_active()) {
1783 static_branch_enable(&mds_idle_clear
);
1784 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1785 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1786 static_branch_disable(&mds_idle_clear
);
1790 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1791 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1792 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1794 void cpu_bugs_smt_update(void)
1796 mutex_lock(&spec_ctrl_mutex
);
1798 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1799 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1800 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1802 switch (spectre_v2_user_stibp
) {
1803 case SPECTRE_V2_USER_NONE
:
1805 case SPECTRE_V2_USER_STRICT
:
1806 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1807 update_stibp_strict();
1809 case SPECTRE_V2_USER_PRCTL
:
1810 case SPECTRE_V2_USER_SECCOMP
:
1811 update_indir_branch_cond();
1815 switch (mds_mitigation
) {
1816 case MDS_MITIGATION_FULL
:
1817 case MDS_MITIGATION_VMWERV
:
1818 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1819 pr_warn_once(MDS_MSG_SMT
);
1820 update_mds_branch_idle();
1822 case MDS_MITIGATION_OFF
:
1826 switch (taa_mitigation
) {
1827 case TAA_MITIGATION_VERW
:
1828 case TAA_MITIGATION_UCODE_NEEDED
:
1829 if (sched_smt_active())
1830 pr_warn_once(TAA_MSG_SMT
);
1832 case TAA_MITIGATION_TSX_DISABLED
:
1833 case TAA_MITIGATION_OFF
:
1837 switch (mmio_mitigation
) {
1838 case MMIO_MITIGATION_VERW
:
1839 case MMIO_MITIGATION_UCODE_NEEDED
:
1840 if (sched_smt_active())
1841 pr_warn_once(MMIO_MSG_SMT
);
1843 case MMIO_MITIGATION_OFF
:
1847 mutex_unlock(&spec_ctrl_mutex
);
1851 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1853 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1855 /* The kernel command line selection */
1856 enum ssb_mitigation_cmd
{
1857 SPEC_STORE_BYPASS_CMD_NONE
,
1858 SPEC_STORE_BYPASS_CMD_AUTO
,
1859 SPEC_STORE_BYPASS_CMD_ON
,
1860 SPEC_STORE_BYPASS_CMD_PRCTL
,
1861 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1864 static const char * const ssb_strings
[] = {
1865 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1866 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1867 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1868 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1871 static const struct {
1873 enum ssb_mitigation_cmd cmd
;
1874 } ssb_mitigation_options
[] __initconst
= {
1875 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1876 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1877 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1878 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1879 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1882 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1884 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1888 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1889 cpu_mitigations_off()) {
1890 return SPEC_STORE_BYPASS_CMD_NONE
;
1892 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1895 return SPEC_STORE_BYPASS_CMD_AUTO
;
1897 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1898 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1901 cmd
= ssb_mitigation_options
[i
].cmd
;
1905 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1906 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1907 return SPEC_STORE_BYPASS_CMD_AUTO
;
1914 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1916 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1917 enum ssb_mitigation_cmd cmd
;
1919 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1922 cmd
= ssb_parse_cmdline();
1923 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1924 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1925 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1929 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1931 * Choose prctl+seccomp as the default mode if seccomp is
1934 if (IS_ENABLED(CONFIG_SECCOMP
))
1935 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1937 mode
= SPEC_STORE_BYPASS_PRCTL
;
1939 case SPEC_STORE_BYPASS_CMD_ON
:
1940 mode
= SPEC_STORE_BYPASS_DISABLE
;
1942 case SPEC_STORE_BYPASS_CMD_AUTO
:
1943 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1944 mode
= SPEC_STORE_BYPASS_PRCTL
;
1946 case SPEC_STORE_BYPASS_CMD_NONE
:
1951 * We have three CPU feature flags that are in play here:
1952 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1953 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1954 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1956 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1957 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1959 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1960 * use a completely different MSR and bit dependent on family.
1962 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1963 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1964 x86_amd_ssb_disable();
1966 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1967 update_spec_ctrl(x86_spec_ctrl_base
);
1974 static void ssb_select_mitigation(void)
1976 ssb_mode
= __ssb_select_mitigation();
1978 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1979 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1983 #define pr_fmt(fmt) "Speculation prctl: " fmt
1985 static void task_update_spec_tif(struct task_struct
*tsk
)
1987 /* Force the update of the real TIF bits */
1988 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1991 * Immediately update the speculation control MSRs for the current
1992 * task, but for a non-current task delay setting the CPU
1993 * mitigation until it is scheduled next.
1995 * This can only happen for SECCOMP mitigation. For PRCTL it's
1996 * always the current task.
1999 speculation_ctrl_update_current();
2002 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2005 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2009 case PR_SPEC_ENABLE
:
2010 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2012 case PR_SPEC_DISABLE
:
2013 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2020 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2022 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
2023 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
2027 case PR_SPEC_ENABLE
:
2028 /* If speculation is force disabled, enable is not allowed */
2029 if (task_spec_ssb_force_disable(task
))
2031 task_clear_spec_ssb_disable(task
);
2032 task_clear_spec_ssb_noexec(task
);
2033 task_update_spec_tif(task
);
2035 case PR_SPEC_DISABLE
:
2036 task_set_spec_ssb_disable(task
);
2037 task_clear_spec_ssb_noexec(task
);
2038 task_update_spec_tif(task
);
2040 case PR_SPEC_FORCE_DISABLE
:
2041 task_set_spec_ssb_disable(task
);
2042 task_set_spec_ssb_force_disable(task
);
2043 task_clear_spec_ssb_noexec(task
);
2044 task_update_spec_tif(task
);
2046 case PR_SPEC_DISABLE_NOEXEC
:
2047 if (task_spec_ssb_force_disable(task
))
2049 task_set_spec_ssb_disable(task
);
2050 task_set_spec_ssb_noexec(task
);
2051 task_update_spec_tif(task
);
2059 static bool is_spec_ib_user_controlled(void)
2061 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
2062 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2063 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
2064 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
2067 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2070 case PR_SPEC_ENABLE
:
2071 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2072 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2076 * With strict mode for both IBPB and STIBP, the instruction
2077 * code paths avoid checking this task flag and instead,
2078 * unconditionally run the instruction. However, STIBP and IBPB
2079 * are independent and either can be set to conditionally
2080 * enabled regardless of the mode of the other.
2082 * If either is set to conditional, allow the task flag to be
2083 * updated, unless it was force-disabled by a previous prctl
2084 * call. Currently, this is possible on an AMD CPU which has the
2085 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2086 * kernel is booted with 'spectre_v2_user=seccomp', then
2087 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2088 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2090 if (!is_spec_ib_user_controlled() ||
2091 task_spec_ib_force_disable(task
))
2094 task_clear_spec_ib_disable(task
);
2095 task_update_spec_tif(task
);
2097 case PR_SPEC_DISABLE
:
2098 case PR_SPEC_FORCE_DISABLE
:
2100 * Indirect branch speculation is always allowed when
2101 * mitigation is force disabled.
2103 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2104 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2107 if (!is_spec_ib_user_controlled())
2110 task_set_spec_ib_disable(task
);
2111 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
2112 task_set_spec_ib_force_disable(task
);
2113 task_update_spec_tif(task
);
2114 if (task
== current
)
2115 indirect_branch_prediction_barrier();
2123 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
2127 case PR_SPEC_STORE_BYPASS
:
2128 return ssb_prctl_set(task
, ctrl
);
2129 case PR_SPEC_INDIRECT_BRANCH
:
2130 return ib_prctl_set(task
, ctrl
);
2131 case PR_SPEC_L1D_FLUSH
:
2132 return l1d_flush_prctl_set(task
, ctrl
);
2138 #ifdef CONFIG_SECCOMP
2139 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
2141 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
2142 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2143 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2144 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
2145 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2149 static int l1d_flush_prctl_get(struct task_struct
*task
)
2151 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2152 return PR_SPEC_FORCE_DISABLE
;
2154 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
2155 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2157 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2160 static int ssb_prctl_get(struct task_struct
*task
)
2163 case SPEC_STORE_BYPASS_DISABLE
:
2164 return PR_SPEC_DISABLE
;
2165 case SPEC_STORE_BYPASS_SECCOMP
:
2166 case SPEC_STORE_BYPASS_PRCTL
:
2167 if (task_spec_ssb_force_disable(task
))
2168 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2169 if (task_spec_ssb_noexec(task
))
2170 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
2171 if (task_spec_ssb_disable(task
))
2172 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2173 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2175 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
2176 return PR_SPEC_ENABLE
;
2177 return PR_SPEC_NOT_AFFECTED
;
2181 static int ib_prctl_get(struct task_struct
*task
)
2183 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
2184 return PR_SPEC_NOT_AFFECTED
;
2186 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2187 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2188 return PR_SPEC_ENABLE
;
2189 else if (is_spec_ib_user_controlled()) {
2190 if (task_spec_ib_force_disable(task
))
2191 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2192 if (task_spec_ib_disable(task
))
2193 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2194 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2195 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
2196 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2197 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
2198 return PR_SPEC_DISABLE
;
2200 return PR_SPEC_NOT_AFFECTED
;
2203 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
2206 case PR_SPEC_STORE_BYPASS
:
2207 return ssb_prctl_get(task
);
2208 case PR_SPEC_INDIRECT_BRANCH
:
2209 return ib_prctl_get(task
);
2210 case PR_SPEC_L1D_FLUSH
:
2211 return l1d_flush_prctl_get(task
);
2217 void x86_spec_ctrl_setup_ap(void)
2219 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
2220 update_spec_ctrl(x86_spec_ctrl_base
);
2222 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
2223 x86_amd_ssb_disable();
2226 bool itlb_multihit_kvm_mitigation
;
2227 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
2230 #define pr_fmt(fmt) "L1TF: " fmt
2232 /* Default mitigation for L1TF-affected CPUs */
2233 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
2234 #if IS_ENABLED(CONFIG_KVM_INTEL)
2235 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
2237 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
2238 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
2241 * These CPUs all support 44bits physical address space internally in the
2242 * cache but CPUID can report a smaller number of physical address bits.
2244 * The L1TF mitigation uses the top most address bit for the inversion of
2245 * non present PTEs. When the installed memory reaches into the top most
2246 * address bit due to memory holes, which has been observed on machines
2247 * which report 36bits physical address bits and have 32G RAM installed,
2248 * then the mitigation range check in l1tf_select_mitigation() triggers.
2249 * This is a false positive because the mitigation is still possible due to
2250 * the fact that the cache uses 44bit internally. Use the cache bits
2251 * instead of the reported physical bits and adjust them on the affected
2252 * machines to 44bit if the reported bits are less than 44.
2254 static void override_cache_bits(struct cpuinfo_x86
*c
)
2259 switch (c
->x86_model
) {
2260 case INTEL_FAM6_NEHALEM
:
2261 case INTEL_FAM6_WESTMERE
:
2262 case INTEL_FAM6_SANDYBRIDGE
:
2263 case INTEL_FAM6_IVYBRIDGE
:
2264 case INTEL_FAM6_HASWELL
:
2265 case INTEL_FAM6_HASWELL_L
:
2266 case INTEL_FAM6_HASWELL_G
:
2267 case INTEL_FAM6_BROADWELL
:
2268 case INTEL_FAM6_BROADWELL_G
:
2269 case INTEL_FAM6_SKYLAKE_L
:
2270 case INTEL_FAM6_SKYLAKE
:
2271 case INTEL_FAM6_KABYLAKE_L
:
2272 case INTEL_FAM6_KABYLAKE
:
2273 if (c
->x86_cache_bits
< 44)
2274 c
->x86_cache_bits
= 44;
2279 static void __init
l1tf_select_mitigation(void)
2283 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2286 if (cpu_mitigations_off())
2287 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2288 else if (cpu_mitigations_auto_nosmt())
2289 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2291 override_cache_bits(&boot_cpu_data
);
2293 switch (l1tf_mitigation
) {
2294 case L1TF_MITIGATION_OFF
:
2295 case L1TF_MITIGATION_FLUSH_NOWARN
:
2296 case L1TF_MITIGATION_FLUSH
:
2298 case L1TF_MITIGATION_FLUSH_NOSMT
:
2299 case L1TF_MITIGATION_FULL
:
2300 cpu_smt_disable(false);
2302 case L1TF_MITIGATION_FULL_FORCE
:
2303 cpu_smt_disable(true);
2307 #if CONFIG_PGTABLE_LEVELS == 2
2308 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2312 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
2313 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
2314 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
2315 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2316 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2318 pr_info("However, doing so will make a part of your RAM unusable.\n");
2319 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2323 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
2326 static int __init
l1tf_cmdline(char *str
)
2328 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2334 if (!strcmp(str
, "off"))
2335 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2336 else if (!strcmp(str
, "flush,nowarn"))
2337 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
2338 else if (!strcmp(str
, "flush"))
2339 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
2340 else if (!strcmp(str
, "flush,nosmt"))
2341 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2342 else if (!strcmp(str
, "full"))
2343 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
2344 else if (!strcmp(str
, "full,force"))
2345 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
2349 early_param("l1tf", l1tf_cmdline
);
2352 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2354 enum srso_mitigation
{
2355 SRSO_MITIGATION_NONE
,
2356 SRSO_MITIGATION_MICROCODE
,
2357 SRSO_MITIGATION_SAFE_RET
,
2358 SRSO_MITIGATION_IBPB
,
2359 SRSO_MITIGATION_IBPB_ON_VMEXIT
,
2362 enum srso_mitigation_cmd
{
2367 SRSO_CMD_IBPB_ON_VMEXIT
,
2370 static const char * const srso_strings
[] = {
2371 [SRSO_MITIGATION_NONE
] = "Vulnerable",
2372 [SRSO_MITIGATION_MICROCODE
] = "Mitigation: microcode",
2373 [SRSO_MITIGATION_SAFE_RET
] = "Mitigation: safe RET",
2374 [SRSO_MITIGATION_IBPB
] = "Mitigation: IBPB",
2375 [SRSO_MITIGATION_IBPB_ON_VMEXIT
] = "Mitigation: IBPB on VMEXIT only"
2378 static enum srso_mitigation srso_mitigation __ro_after_init
= SRSO_MITIGATION_NONE
;
2379 static enum srso_mitigation_cmd srso_cmd __ro_after_init
= SRSO_CMD_SAFE_RET
;
2381 static int __init
srso_parse_cmdline(char *str
)
2386 if (!strcmp(str
, "off"))
2387 srso_cmd
= SRSO_CMD_OFF
;
2388 else if (!strcmp(str
, "microcode"))
2389 srso_cmd
= SRSO_CMD_MICROCODE
;
2390 else if (!strcmp(str
, "safe-ret"))
2391 srso_cmd
= SRSO_CMD_SAFE_RET
;
2392 else if (!strcmp(str
, "ibpb"))
2393 srso_cmd
= SRSO_CMD_IBPB
;
2394 else if (!strcmp(str
, "ibpb-vmexit"))
2395 srso_cmd
= SRSO_CMD_IBPB_ON_VMEXIT
;
2397 pr_err("Ignoring unknown SRSO option (%s).", str
);
2401 early_param("spec_rstack_overflow", srso_parse_cmdline
);
2403 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2405 static void __init
srso_select_mitigation(void)
2407 bool has_microcode
= boot_cpu_has(X86_FEATURE_IBPB_BRTYPE
);
2409 if (!boot_cpu_has_bug(X86_BUG_SRSO
) || cpu_mitigations_off())
2412 if (!has_microcode
) {
2413 pr_warn("IBPB-extending microcode not applied!\n");
2414 pr_warn(SRSO_NOTICE
);
2417 * Zen1/2 with SMT off aren't vulnerable after the right
2418 * IBPB microcode has been applied.
2420 if (boot_cpu_data
.x86
< 0x19 && !cpu_smt_possible()) {
2421 setup_force_cpu_cap(X86_FEATURE_SRSO_NO
);
2426 if (retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2427 if (has_microcode
) {
2428 pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
2429 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2438 case SRSO_CMD_MICROCODE
:
2439 if (has_microcode
) {
2440 srso_mitigation
= SRSO_MITIGATION_MICROCODE
;
2441 pr_warn(SRSO_NOTICE
);
2445 case SRSO_CMD_SAFE_RET
:
2446 if (IS_ENABLED(CONFIG_CPU_SRSO
)) {
2448 * Enable the return thunk for generated code
2449 * like ftrace, static_call, etc.
2451 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
2452 setup_force_cpu_cap(X86_FEATURE_UNRET
);
2454 if (boot_cpu_data
.x86
== 0x19) {
2455 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS
);
2456 x86_return_thunk
= srso_alias_return_thunk
;
2458 setup_force_cpu_cap(X86_FEATURE_SRSO
);
2459 x86_return_thunk
= srso_return_thunk
;
2461 srso_mitigation
= SRSO_MITIGATION_SAFE_RET
;
2463 pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2469 if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY
)) {
2470 if (has_microcode
) {
2471 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
2472 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2475 pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
2480 case SRSO_CMD_IBPB_ON_VMEXIT
:
2481 if (IS_ENABLED(CONFIG_CPU_SRSO
)) {
2482 if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB
) && has_microcode
) {
2483 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
2484 srso_mitigation
= SRSO_MITIGATION_IBPB_ON_VMEXIT
;
2487 pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
2496 pr_info("%s%s\n", srso_strings
[srso_mitigation
], (has_microcode
? "" : ", no microcode"));
2499 if ((boot_cpu_has(X86_FEATURE_SRSO_NO
) || srso_cmd
== SRSO_CMD_OFF
) &&
2500 boot_cpu_has(X86_FEATURE_SBPB
))
2501 x86_pred_cmd
= PRED_CMD_SBPB
;
2505 #define pr_fmt(fmt) fmt
2509 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2511 #if IS_ENABLED(CONFIG_KVM_INTEL)
2512 static const char * const l1tf_vmx_states
[] = {
2513 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
2514 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
2515 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
2516 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
2517 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
2518 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
2521 static ssize_t
l1tf_show_state(char *buf
)
2523 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
2524 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2526 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
2527 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
2528 sched_smt_active())) {
2529 return sysfs_emit(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
2530 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
2533 return sysfs_emit(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
2534 l1tf_vmx_states
[l1tf_vmx_mitigation
],
2535 sched_smt_active() ? "vulnerable" : "disabled");
2538 static ssize_t
itlb_multihit_show_state(char *buf
)
2540 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2541 !boot_cpu_has(X86_FEATURE_VMX
))
2542 return sysfs_emit(buf
, "KVM: Mitigation: VMX unsupported\n");
2543 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
2544 return sysfs_emit(buf
, "KVM: Mitigation: VMX disabled\n");
2545 else if (itlb_multihit_kvm_mitigation
)
2546 return sysfs_emit(buf
, "KVM: Mitigation: Split huge pages\n");
2548 return sysfs_emit(buf
, "KVM: Vulnerable\n");
2551 static ssize_t
l1tf_show_state(char *buf
)
2553 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2556 static ssize_t
itlb_multihit_show_state(char *buf
)
2558 return sysfs_emit(buf
, "Processor vulnerable\n");
2562 static ssize_t
mds_show_state(char *buf
)
2564 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2565 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2566 mds_strings
[mds_mitigation
]);
2569 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
2570 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2571 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
2572 sched_smt_active() ? "mitigated" : "disabled"));
2575 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2576 sched_smt_active() ? "vulnerable" : "disabled");
2579 static ssize_t
tsx_async_abort_show_state(char *buf
)
2581 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
2582 (taa_mitigation
== TAA_MITIGATION_OFF
))
2583 return sysfs_emit(buf
, "%s\n", taa_strings
[taa_mitigation
]);
2585 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2586 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2587 taa_strings
[taa_mitigation
]);
2590 return sysfs_emit(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
2591 sched_smt_active() ? "vulnerable" : "disabled");
2594 static ssize_t
mmio_stale_data_show_state(char *buf
)
2596 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
2597 return sysfs_emit(buf
, "Unknown: No mitigations\n");
2599 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
2600 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
2602 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2603 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2604 mmio_strings
[mmio_mitigation
]);
2607 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
2608 sched_smt_active() ? "vulnerable" : "disabled");
2611 static char *stibp_state(void)
2613 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
2614 !boot_cpu_has(X86_FEATURE_AUTOIBRS
))
2617 switch (spectre_v2_user_stibp
) {
2618 case SPECTRE_V2_USER_NONE
:
2619 return ", STIBP: disabled";
2620 case SPECTRE_V2_USER_STRICT
:
2621 return ", STIBP: forced";
2622 case SPECTRE_V2_USER_STRICT_PREFERRED
:
2623 return ", STIBP: always-on";
2624 case SPECTRE_V2_USER_PRCTL
:
2625 case SPECTRE_V2_USER_SECCOMP
:
2626 if (static_key_enabled(&switch_to_cond_stibp
))
2627 return ", STIBP: conditional";
2632 static char *ibpb_state(void)
2634 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
2635 if (static_key_enabled(&switch_mm_always_ibpb
))
2636 return ", IBPB: always-on";
2637 if (static_key_enabled(&switch_mm_cond_ibpb
))
2638 return ", IBPB: conditional";
2639 return ", IBPB: disabled";
2644 static char *pbrsb_eibrs_state(void)
2646 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
2647 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE
) ||
2648 boot_cpu_has(X86_FEATURE_RSB_VMEXIT
))
2649 return ", PBRSB-eIBRS: SW sequence";
2651 return ", PBRSB-eIBRS: Vulnerable";
2653 return ", PBRSB-eIBRS: Not affected";
2657 static ssize_t
spectre_v2_show_state(char *buf
)
2659 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
2660 return sysfs_emit(buf
, "Vulnerable: LFENCE\n");
2662 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
2663 return sysfs_emit(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
2665 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2666 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
2667 return sysfs_emit(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2669 return sysfs_emit(buf
, "%s%s%s%s%s%s%s\n",
2670 spectre_v2_strings
[spectre_v2_enabled
],
2672 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
2674 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
2675 pbrsb_eibrs_state(),
2676 spectre_v2_module_string());
2679 static ssize_t
srbds_show_state(char *buf
)
2681 return sysfs_emit(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
2684 static ssize_t
retbleed_show_state(char *buf
)
2686 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
2687 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2688 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
2689 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
2690 return sysfs_emit(buf
, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2692 return sysfs_emit(buf
, "%s; SMT %s\n", retbleed_strings
[retbleed_mitigation
],
2693 !sched_smt_active() ? "disabled" :
2694 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2695 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
?
2696 "enabled with STIBP protection" : "vulnerable");
2699 return sysfs_emit(buf
, "%s\n", retbleed_strings
[retbleed_mitigation
]);
2702 static ssize_t
srso_show_state(char *buf
)
2704 if (boot_cpu_has(X86_FEATURE_SRSO_NO
))
2705 return sysfs_emit(buf
, "Mitigation: SMT disabled\n");
2707 return sysfs_emit(buf
, "%s%s\n",
2708 srso_strings
[srso_mitigation
],
2709 boot_cpu_has(X86_FEATURE_IBPB_BRTYPE
) ? "" : ", no microcode");
2712 static ssize_t
gds_show_state(char *buf
)
2714 return sysfs_emit(buf
, "%s\n", gds_strings
[gds_mitigation
]);
2717 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
2718 char *buf
, unsigned int bug
)
2720 if (!boot_cpu_has_bug(bug
))
2721 return sysfs_emit(buf
, "Not affected\n");
2724 case X86_BUG_CPU_MELTDOWN
:
2725 if (boot_cpu_has(X86_FEATURE_PTI
))
2726 return sysfs_emit(buf
, "Mitigation: PTI\n");
2728 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
2729 return sysfs_emit(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2733 case X86_BUG_SPECTRE_V1
:
2734 return sysfs_emit(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
2736 case X86_BUG_SPECTRE_V2
:
2737 return spectre_v2_show_state(buf
);
2739 case X86_BUG_SPEC_STORE_BYPASS
:
2740 return sysfs_emit(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2743 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2744 return l1tf_show_state(buf
);
2748 return mds_show_state(buf
);
2751 return tsx_async_abort_show_state(buf
);
2753 case X86_BUG_ITLB_MULTIHIT
:
2754 return itlb_multihit_show_state(buf
);
2757 return srbds_show_state(buf
);
2759 case X86_BUG_MMIO_STALE_DATA
:
2760 case X86_BUG_MMIO_UNKNOWN
:
2761 return mmio_stale_data_show_state(buf
);
2763 case X86_BUG_RETBLEED
:
2764 return retbleed_show_state(buf
);
2767 return srso_show_state(buf
);
2770 return gds_show_state(buf
);
2776 return sysfs_emit(buf
, "Vulnerable\n");
2779 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2781 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2784 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2786 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2789 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2791 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2794 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2796 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2799 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2801 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2804 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2806 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2809 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2811 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2814 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2816 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2819 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2821 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2824 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2826 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
2827 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_UNKNOWN
);
2829 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);
2832 ssize_t
cpu_show_retbleed(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2834 return cpu_show_common(dev
, attr
, buf
, X86_BUG_RETBLEED
);
2837 ssize_t
cpu_show_spec_rstack_overflow(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2839 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRSO
);
2842 ssize_t
cpu_show_gds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2844 return cpu_show_common(dev
, attr
, buf
, X86_BUG_GDS
);