1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
37 static void __init
spectre_v1_select_mitigation(void);
38 static void __init
spectre_v2_select_mitigation(void);
39 static void __init
retbleed_select_mitigation(void);
40 static void __init
spectre_v2_user_select_mitigation(void);
41 static void __init
ssb_select_mitigation(void);
42 static void __init
l1tf_select_mitigation(void);
43 static void __init
mds_select_mitigation(void);
44 static void __init
md_clear_update_mitigation(void);
45 static void __init
md_clear_select_mitigation(void);
46 static void __init
taa_select_mitigation(void);
47 static void __init
mmio_select_mitigation(void);
48 static void __init
srbds_select_mitigation(void);
49 static void __init
l1d_flush_select_mitigation(void);
50 static void __init
srso_select_mitigation(void);
51 static void __init
gds_select_mitigation(void);
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base
;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base
);
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64
, x86_spec_ctrl_current
);
59 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current
);
61 u64 x86_pred_cmd __ro_after_init
= PRED_CMD_IBPB
;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd
);
64 static DEFINE_MUTEX(spec_ctrl_mutex
);
66 void (*x86_return_thunk
)(void) __ro_after_init
= __x86_return_thunk
;
68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
69 static void update_spec_ctrl(u64 val
)
71 this_cpu_write(x86_spec_ctrl_current
, val
);
72 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
76 * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
77 * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
79 void update_spec_ctrl_cond(u64 val
)
81 if (this_cpu_read(x86_spec_ctrl_current
) == val
)
84 this_cpu_write(x86_spec_ctrl_current
, val
);
87 * When KERNEL_IBRS this MSR is written on return-to-user, unless
88 * forced the update can be delayed until that time.
90 if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS
))
91 wrmsrl(MSR_IA32_SPEC_CTRL
, val
);
94 noinstr u64
spec_ctrl_current(void)
96 return this_cpu_read(x86_spec_ctrl_current
);
98 EXPORT_SYMBOL_GPL(spec_ctrl_current
);
101 * AMD specific MSR info for Speculative Store Bypass control.
102 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
104 u64 __ro_after_init x86_amd_ls_cfg_base
;
105 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask
;
107 /* Control conditional STIBP in switch_to() */
108 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp
);
109 /* Control conditional IBPB in switch_mm() */
110 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb
);
111 /* Control unconditional IBPB in switch_mm() */
112 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb
);
114 /* Control MDS CPU buffer clear before idling (halt, mwait) */
115 DEFINE_STATIC_KEY_FALSE(mds_idle_clear
);
116 EXPORT_SYMBOL_GPL(mds_idle_clear
);
119 * Controls whether l1d flush based mitigations are enabled,
120 * based on hw features and admin setting via boot parameter
123 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush
);
125 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
126 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear
);
127 EXPORT_SYMBOL_GPL(mmio_stale_data_clear
);
129 void __init
cpu_select_mitigations(void)
132 * Read the SPEC_CTRL MSR to account for reserved bits which may
133 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
134 * init code as it is not enumerated and depends on the family.
136 if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL
)) {
137 rdmsrl(MSR_IA32_SPEC_CTRL
, x86_spec_ctrl_base
);
140 * Previously running kernel (kexec), may have some controls
141 * turned ON. Clear them and let the mitigations setup below
142 * rediscover them based on configuration.
144 x86_spec_ctrl_base
&= ~SPEC_CTRL_MITIGATIONS_MASK
;
147 /* Select the proper CPU mitigations before patching alternatives: */
148 spectre_v1_select_mitigation();
149 spectre_v2_select_mitigation();
151 * retbleed_select_mitigation() relies on the state set by
152 * spectre_v2_select_mitigation(); specifically it wants to know about
155 retbleed_select_mitigation();
157 * spectre_v2_user_select_mitigation() relies on the state set by
158 * retbleed_select_mitigation(); specifically the STIBP selection is
159 * forced for UNRET or IBPB.
161 spectre_v2_user_select_mitigation();
162 ssb_select_mitigation();
163 l1tf_select_mitigation();
164 md_clear_select_mitigation();
165 srbds_select_mitigation();
166 l1d_flush_select_mitigation();
169 * srso_select_mitigation() depends and must run after
170 * retbleed_select_mitigation().
172 srso_select_mitigation();
173 gds_select_mitigation();
177 * NOTE: This function is *only* called for SVM, since Intel uses
178 * MSR_IA32_SPEC_CTRL for SSBD.
181 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl
, bool setguest
)
183 u64 guestval
, hostval
;
184 struct thread_info
*ti
= current_thread_info();
187 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
188 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
190 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD
) &&
191 !static_cpu_has(X86_FEATURE_VIRT_SSBD
))
195 * If the host has SSBD mitigation enabled, force it in the host's
196 * virtual MSR value. If its not permanently enabled, evaluate
197 * current's TIF_SSBD thread flag.
199 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
))
200 hostval
= SPEC_CTRL_SSBD
;
202 hostval
= ssbd_tif_to_spec_ctrl(ti
->flags
);
204 /* Sanitize the guest value */
205 guestval
= guest_virt_spec_ctrl
& SPEC_CTRL_SSBD
;
207 if (hostval
!= guestval
) {
210 tif
= setguest
? ssbd_spec_ctrl_to_tif(guestval
) :
211 ssbd_spec_ctrl_to_tif(hostval
);
213 speculation_ctrl_update(tif
);
216 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl
);
218 static void x86_amd_ssb_disable(void)
220 u64 msrval
= x86_amd_ls_cfg_base
| x86_amd_ls_cfg_ssbd_mask
;
222 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD
))
223 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL
, SPEC_CTRL_SSBD
);
224 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD
))
225 wrmsrl(MSR_AMD64_LS_CFG
, msrval
);
229 #define pr_fmt(fmt) "MDS: " fmt
231 /* Default mitigation for MDS-affected CPUs */
232 static enum mds_mitigations mds_mitigation __ro_after_init
= MDS_MITIGATION_FULL
;
233 static bool mds_nosmt __ro_after_init
= false;
235 static const char * const mds_strings
[] = {
236 [MDS_MITIGATION_OFF
] = "Vulnerable",
237 [MDS_MITIGATION_FULL
] = "Mitigation: Clear CPU buffers",
238 [MDS_MITIGATION_VMWERV
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
241 static void __init
mds_select_mitigation(void)
243 if (!boot_cpu_has_bug(X86_BUG_MDS
) || cpu_mitigations_off()) {
244 mds_mitigation
= MDS_MITIGATION_OFF
;
248 if (mds_mitigation
== MDS_MITIGATION_FULL
) {
249 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR
))
250 mds_mitigation
= MDS_MITIGATION_VMWERV
;
252 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
254 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY
) &&
255 (mds_nosmt
|| cpu_mitigations_auto_nosmt()))
256 cpu_smt_disable(false);
260 static int __init
mds_cmdline(char *str
)
262 if (!boot_cpu_has_bug(X86_BUG_MDS
))
268 if (!strcmp(str
, "off"))
269 mds_mitigation
= MDS_MITIGATION_OFF
;
270 else if (!strcmp(str
, "full"))
271 mds_mitigation
= MDS_MITIGATION_FULL
;
272 else if (!strcmp(str
, "full,nosmt")) {
273 mds_mitigation
= MDS_MITIGATION_FULL
;
279 early_param("mds", mds_cmdline
);
282 #define pr_fmt(fmt) "TAA: " fmt
284 enum taa_mitigations
{
286 TAA_MITIGATION_UCODE_NEEDED
,
288 TAA_MITIGATION_TSX_DISABLED
,
291 /* Default mitigation for TAA-affected CPUs */
292 static enum taa_mitigations taa_mitigation __ro_after_init
= TAA_MITIGATION_VERW
;
293 static bool taa_nosmt __ro_after_init
;
295 static const char * const taa_strings
[] = {
296 [TAA_MITIGATION_OFF
] = "Vulnerable",
297 [TAA_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
298 [TAA_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
299 [TAA_MITIGATION_TSX_DISABLED
] = "Mitigation: TSX disabled",
302 static void __init
taa_select_mitigation(void)
306 if (!boot_cpu_has_bug(X86_BUG_TAA
)) {
307 taa_mitigation
= TAA_MITIGATION_OFF
;
311 /* TSX previously disabled by tsx=off */
312 if (!boot_cpu_has(X86_FEATURE_RTM
)) {
313 taa_mitigation
= TAA_MITIGATION_TSX_DISABLED
;
317 if (cpu_mitigations_off()) {
318 taa_mitigation
= TAA_MITIGATION_OFF
;
323 * TAA mitigation via VERW is turned off if both
324 * tsx_async_abort=off and mds=off are specified.
326 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
327 mds_mitigation
== MDS_MITIGATION_OFF
)
330 if (boot_cpu_has(X86_FEATURE_MD_CLEAR
))
331 taa_mitigation
= TAA_MITIGATION_VERW
;
333 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
336 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
337 * A microcode update fixes this behavior to clear CPU buffers. It also
338 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
339 * ARCH_CAP_TSX_CTRL_MSR bit.
341 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
342 * update is required.
344 ia32_cap
= x86_read_arch_cap_msr();
345 if ( (ia32_cap
& ARCH_CAP_MDS_NO
) &&
346 !(ia32_cap
& ARCH_CAP_TSX_CTRL_MSR
))
347 taa_mitigation
= TAA_MITIGATION_UCODE_NEEDED
;
350 * TSX is enabled, select alternate mitigation for TAA which is
351 * the same as MDS. Enable MDS static branch to clear CPU buffers.
353 * For guests that can't determine whether the correct microcode is
354 * present on host, enable the mitigation for UCODE_NEEDED as well.
356 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
358 if (taa_nosmt
|| cpu_mitigations_auto_nosmt())
359 cpu_smt_disable(false);
362 static int __init
tsx_async_abort_parse_cmdline(char *str
)
364 if (!boot_cpu_has_bug(X86_BUG_TAA
))
370 if (!strcmp(str
, "off")) {
371 taa_mitigation
= TAA_MITIGATION_OFF
;
372 } else if (!strcmp(str
, "full")) {
373 taa_mitigation
= TAA_MITIGATION_VERW
;
374 } else if (!strcmp(str
, "full,nosmt")) {
375 taa_mitigation
= TAA_MITIGATION_VERW
;
381 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline
);
384 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
386 enum mmio_mitigations
{
388 MMIO_MITIGATION_UCODE_NEEDED
,
389 MMIO_MITIGATION_VERW
,
392 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
393 static enum mmio_mitigations mmio_mitigation __ro_after_init
= MMIO_MITIGATION_VERW
;
394 static bool mmio_nosmt __ro_after_init
= false;
396 static const char * const mmio_strings
[] = {
397 [MMIO_MITIGATION_OFF
] = "Vulnerable",
398 [MMIO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: Clear CPU buffers attempted, no microcode",
399 [MMIO_MITIGATION_VERW
] = "Mitigation: Clear CPU buffers",
402 static void __init
mmio_select_mitigation(void)
406 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
) ||
407 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
) ||
408 cpu_mitigations_off()) {
409 mmio_mitigation
= MMIO_MITIGATION_OFF
;
413 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
416 ia32_cap
= x86_read_arch_cap_msr();
419 * Enable CPU buffer clear mitigation for host and VMM, if also affected
420 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
422 if (boot_cpu_has_bug(X86_BUG_MDS
) || (boot_cpu_has_bug(X86_BUG_TAA
) &&
423 boot_cpu_has(X86_FEATURE_RTM
)))
424 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF
);
426 static_branch_enable(&mmio_stale_data_clear
);
429 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
430 * be propagated to uncore buffers, clearing the Fill buffers on idle
431 * is required irrespective of SMT state.
433 if (!(ia32_cap
& ARCH_CAP_FBSDP_NO
))
434 static_branch_enable(&mds_idle_clear
);
437 * Check if the system has the right microcode.
439 * CPU Fill buffer clear mitigation is enumerated by either an explicit
440 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
443 if ((ia32_cap
& ARCH_CAP_FB_CLEAR
) ||
444 (boot_cpu_has(X86_FEATURE_MD_CLEAR
) &&
445 boot_cpu_has(X86_FEATURE_FLUSH_L1D
) &&
446 !(ia32_cap
& ARCH_CAP_MDS_NO
)))
447 mmio_mitigation
= MMIO_MITIGATION_VERW
;
449 mmio_mitigation
= MMIO_MITIGATION_UCODE_NEEDED
;
451 if (mmio_nosmt
|| cpu_mitigations_auto_nosmt())
452 cpu_smt_disable(false);
455 static int __init
mmio_stale_data_parse_cmdline(char *str
)
457 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
463 if (!strcmp(str
, "off")) {
464 mmio_mitigation
= MMIO_MITIGATION_OFF
;
465 } else if (!strcmp(str
, "full")) {
466 mmio_mitigation
= MMIO_MITIGATION_VERW
;
467 } else if (!strcmp(str
, "full,nosmt")) {
468 mmio_mitigation
= MMIO_MITIGATION_VERW
;
474 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline
);
477 #define pr_fmt(fmt) "" fmt
479 static void __init
md_clear_update_mitigation(void)
481 if (cpu_mitigations_off())
484 if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF
))
488 * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
489 * Stale Data mitigation, if necessary.
491 if (mds_mitigation
== MDS_MITIGATION_OFF
&&
492 boot_cpu_has_bug(X86_BUG_MDS
)) {
493 mds_mitigation
= MDS_MITIGATION_FULL
;
494 mds_select_mitigation();
496 if (taa_mitigation
== TAA_MITIGATION_OFF
&&
497 boot_cpu_has_bug(X86_BUG_TAA
)) {
498 taa_mitigation
= TAA_MITIGATION_VERW
;
499 taa_select_mitigation();
501 if (mmio_mitigation
== MMIO_MITIGATION_OFF
&&
502 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
)) {
503 mmio_mitigation
= MMIO_MITIGATION_VERW
;
504 mmio_select_mitigation();
507 if (boot_cpu_has_bug(X86_BUG_MDS
))
508 pr_info("MDS: %s\n", mds_strings
[mds_mitigation
]);
509 if (boot_cpu_has_bug(X86_BUG_TAA
))
510 pr_info("TAA: %s\n", taa_strings
[taa_mitigation
]);
511 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
512 pr_info("MMIO Stale Data: %s\n", mmio_strings
[mmio_mitigation
]);
513 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
514 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
517 static void __init
md_clear_select_mitigation(void)
519 mds_select_mitigation();
520 taa_select_mitigation();
521 mmio_select_mitigation();
524 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
525 * and print their mitigation after MDS, TAA and MMIO Stale Data
526 * mitigation selection is done.
528 md_clear_update_mitigation();
532 #define pr_fmt(fmt) "SRBDS: " fmt
534 enum srbds_mitigations
{
535 SRBDS_MITIGATION_OFF
,
536 SRBDS_MITIGATION_UCODE_NEEDED
,
537 SRBDS_MITIGATION_FULL
,
538 SRBDS_MITIGATION_TSX_OFF
,
539 SRBDS_MITIGATION_HYPERVISOR
,
542 static enum srbds_mitigations srbds_mitigation __ro_after_init
= SRBDS_MITIGATION_FULL
;
544 static const char * const srbds_strings
[] = {
545 [SRBDS_MITIGATION_OFF
] = "Vulnerable",
546 [SRBDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
547 [SRBDS_MITIGATION_FULL
] = "Mitigation: Microcode",
548 [SRBDS_MITIGATION_TSX_OFF
] = "Mitigation: TSX disabled",
549 [SRBDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
552 static bool srbds_off
;
554 void update_srbds_msr(void)
558 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
561 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
564 if (srbds_mitigation
== SRBDS_MITIGATION_UCODE_NEEDED
)
568 * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
569 * being disabled and it hasn't received the SRBDS MSR microcode.
571 if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
574 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
576 switch (srbds_mitigation
) {
577 case SRBDS_MITIGATION_OFF
:
578 case SRBDS_MITIGATION_TSX_OFF
:
579 mcu_ctrl
|= RNGDS_MITG_DIS
;
581 case SRBDS_MITIGATION_FULL
:
582 mcu_ctrl
&= ~RNGDS_MITG_DIS
;
588 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
591 static void __init
srbds_select_mitigation(void)
595 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
599 * Check to see if this is one of the MDS_NO systems supporting TSX that
600 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
601 * by Processor MMIO Stale Data vulnerability.
603 ia32_cap
= x86_read_arch_cap_msr();
604 if ((ia32_cap
& ARCH_CAP_MDS_NO
) && !boot_cpu_has(X86_FEATURE_RTM
) &&
605 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA
))
606 srbds_mitigation
= SRBDS_MITIGATION_TSX_OFF
;
607 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR
))
608 srbds_mitigation
= SRBDS_MITIGATION_HYPERVISOR
;
609 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL
))
610 srbds_mitigation
= SRBDS_MITIGATION_UCODE_NEEDED
;
611 else if (cpu_mitigations_off() || srbds_off
)
612 srbds_mitigation
= SRBDS_MITIGATION_OFF
;
615 pr_info("%s\n", srbds_strings
[srbds_mitigation
]);
618 static int __init
srbds_parse_cmdline(char *str
)
623 if (!boot_cpu_has_bug(X86_BUG_SRBDS
))
626 srbds_off
= !strcmp(str
, "off");
629 early_param("srbds", srbds_parse_cmdline
);
632 #define pr_fmt(fmt) "L1D Flush : " fmt
634 enum l1d_flush_mitigations
{
639 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata
= L1D_FLUSH_OFF
;
641 static void __init
l1d_flush_select_mitigation(void)
643 if (!l1d_flush_mitigation
|| !boot_cpu_has(X86_FEATURE_FLUSH_L1D
))
646 static_branch_enable(&switch_mm_cond_l1d_flush
);
647 pr_info("Conditional flush on switch_mm() enabled\n");
650 static int __init
l1d_flush_parse_cmdline(char *str
)
652 if (!strcmp(str
, "on"))
653 l1d_flush_mitigation
= L1D_FLUSH_ON
;
657 early_param("l1d_flush", l1d_flush_parse_cmdline
);
660 #define pr_fmt(fmt) "GDS: " fmt
662 enum gds_mitigations
{
664 GDS_MITIGATION_UCODE_NEEDED
,
665 GDS_MITIGATION_FORCE
,
667 GDS_MITIGATION_FULL_LOCKED
,
668 GDS_MITIGATION_HYPERVISOR
,
671 #if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
672 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FORCE
;
674 static enum gds_mitigations gds_mitigation __ro_after_init
= GDS_MITIGATION_FULL
;
677 static const char * const gds_strings
[] = {
678 [GDS_MITIGATION_OFF
] = "Vulnerable",
679 [GDS_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
680 [GDS_MITIGATION_FORCE
] = "Mitigation: AVX disabled, no microcode",
681 [GDS_MITIGATION_FULL
] = "Mitigation: Microcode",
682 [GDS_MITIGATION_FULL_LOCKED
] = "Mitigation: Microcode (locked)",
683 [GDS_MITIGATION_HYPERVISOR
] = "Unknown: Dependent on hypervisor status",
686 bool gds_ucode_mitigated(void)
688 return (gds_mitigation
== GDS_MITIGATION_FULL
||
689 gds_mitigation
== GDS_MITIGATION_FULL_LOCKED
);
691 EXPORT_SYMBOL_GPL(gds_ucode_mitigated
);
693 void update_gds_msr(void)
698 switch (gds_mitigation
) {
699 case GDS_MITIGATION_OFF
:
700 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
701 mcu_ctrl
|= GDS_MITG_DIS
;
703 case GDS_MITIGATION_FULL_LOCKED
:
705 * The LOCKED state comes from the boot CPU. APs might not have
706 * the same state. Make sure the mitigation is enabled on all
709 case GDS_MITIGATION_FULL
:
710 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
711 mcu_ctrl
&= ~GDS_MITG_DIS
;
713 case GDS_MITIGATION_FORCE
:
714 case GDS_MITIGATION_UCODE_NEEDED
:
715 case GDS_MITIGATION_HYPERVISOR
:
719 wrmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
722 * Check to make sure that the WRMSR value was not ignored. Writes to
723 * GDS_MITG_DIS will be ignored if this processor is locked but the boot
726 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl_after
);
727 WARN_ON_ONCE(mcu_ctrl
!= mcu_ctrl_after
);
730 static void __init
gds_select_mitigation(void)
734 if (!boot_cpu_has_bug(X86_BUG_GDS
))
737 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
738 gds_mitigation
= GDS_MITIGATION_HYPERVISOR
;
742 if (cpu_mitigations_off())
743 gds_mitigation
= GDS_MITIGATION_OFF
;
744 /* Will verify below that mitigation _can_ be disabled */
747 if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL
)) {
748 if (gds_mitigation
== GDS_MITIGATION_FORCE
) {
750 * This only needs to be done on the boot CPU so do it
751 * here rather than in update_gds_msr()
753 setup_clear_cpu_cap(X86_FEATURE_AVX
);
754 pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
756 gds_mitigation
= GDS_MITIGATION_UCODE_NEEDED
;
761 /* Microcode has mitigation, use it */
762 if (gds_mitigation
== GDS_MITIGATION_FORCE
)
763 gds_mitigation
= GDS_MITIGATION_FULL
;
765 rdmsrl(MSR_IA32_MCU_OPT_CTRL
, mcu_ctrl
);
766 if (mcu_ctrl
& GDS_MITG_LOCKED
) {
767 if (gds_mitigation
== GDS_MITIGATION_OFF
)
768 pr_warn("Mitigation locked. Disable failed.\n");
771 * The mitigation is selected from the boot CPU. All other CPUs
772 * _should_ have the same state. If the boot CPU isn't locked
773 * but others are then update_gds_msr() will WARN() of the state
774 * mismatch. If the boot CPU is locked update_gds_msr() will
775 * ensure the other CPUs have the mitigation enabled.
777 gds_mitigation
= GDS_MITIGATION_FULL_LOCKED
;
782 pr_info("%s\n", gds_strings
[gds_mitigation
]);
785 static int __init
gds_parse_cmdline(char *str
)
790 if (!boot_cpu_has_bug(X86_BUG_GDS
))
793 if (!strcmp(str
, "off"))
794 gds_mitigation
= GDS_MITIGATION_OFF
;
795 else if (!strcmp(str
, "force"))
796 gds_mitigation
= GDS_MITIGATION_FORCE
;
800 early_param("gather_data_sampling", gds_parse_cmdline
);
803 #define pr_fmt(fmt) "Spectre V1 : " fmt
805 enum spectre_v1_mitigation
{
806 SPECTRE_V1_MITIGATION_NONE
,
807 SPECTRE_V1_MITIGATION_AUTO
,
810 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init
=
811 SPECTRE_V1_MITIGATION_AUTO
;
813 static const char * const spectre_v1_strings
[] = {
814 [SPECTRE_V1_MITIGATION_NONE
] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
815 [SPECTRE_V1_MITIGATION_AUTO
] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
819 * Does SMAP provide full mitigation against speculative kernel access to
822 static bool smap_works_speculatively(void)
824 if (!boot_cpu_has(X86_FEATURE_SMAP
))
828 * On CPUs which are vulnerable to Meltdown, SMAP does not
829 * prevent speculative access to user data in the L1 cache.
830 * Consider SMAP to be non-functional as a mitigation on these
833 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN
))
839 static void __init
spectre_v1_select_mitigation(void)
841 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1
) || cpu_mitigations_off()) {
842 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
846 if (spectre_v1_mitigation
== SPECTRE_V1_MITIGATION_AUTO
) {
848 * With Spectre v1, a user can speculatively control either
849 * path of a conditional swapgs with a user-controlled GS
850 * value. The mitigation is to add lfences to both code paths.
852 * If FSGSBASE is enabled, the user can put a kernel address in
853 * GS, in which case SMAP provides no protection.
855 * If FSGSBASE is disabled, the user can only put a user space
856 * address in GS. That makes an attack harder, but still
857 * possible if there's no SMAP protection.
859 if (boot_cpu_has(X86_FEATURE_FSGSBASE
) ||
860 !smap_works_speculatively()) {
862 * Mitigation can be provided from SWAPGS itself or
863 * PTI as the CR3 write in the Meltdown mitigation
866 * If neither is there, mitigate with an LFENCE to
867 * stop speculation through swapgs.
869 if (boot_cpu_has_bug(X86_BUG_SWAPGS
) &&
870 !boot_cpu_has(X86_FEATURE_PTI
))
871 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER
);
874 * Enable lfences in the kernel entry (non-swapgs)
875 * paths, to prevent user entry from speculatively
878 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL
);
882 pr_info("%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
885 static int __init
nospectre_v1_cmdline(char *str
)
887 spectre_v1_mitigation
= SPECTRE_V1_MITIGATION_NONE
;
890 early_param("nospectre_v1", nospectre_v1_cmdline
);
892 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init
= SPECTRE_V2_NONE
;
895 #define pr_fmt(fmt) "RETBleed: " fmt
897 enum retbleed_mitigation
{
898 RETBLEED_MITIGATION_NONE
,
899 RETBLEED_MITIGATION_UNRET
,
900 RETBLEED_MITIGATION_IBPB
,
901 RETBLEED_MITIGATION_IBRS
,
902 RETBLEED_MITIGATION_EIBRS
,
903 RETBLEED_MITIGATION_STUFF
,
906 enum retbleed_mitigation_cmd
{
914 static const char * const retbleed_strings
[] = {
915 [RETBLEED_MITIGATION_NONE
] = "Vulnerable",
916 [RETBLEED_MITIGATION_UNRET
] = "Mitigation: untrained return thunk",
917 [RETBLEED_MITIGATION_IBPB
] = "Mitigation: IBPB",
918 [RETBLEED_MITIGATION_IBRS
] = "Mitigation: IBRS",
919 [RETBLEED_MITIGATION_EIBRS
] = "Mitigation: Enhanced IBRS",
920 [RETBLEED_MITIGATION_STUFF
] = "Mitigation: Stuffing",
923 static enum retbleed_mitigation retbleed_mitigation __ro_after_init
=
924 RETBLEED_MITIGATION_NONE
;
925 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init
=
928 static int __ro_after_init retbleed_nosmt
= false;
930 static int __init
retbleed_parse_cmdline(char *str
)
936 char *next
= strchr(str
, ',');
942 if (!strcmp(str
, "off")) {
943 retbleed_cmd
= RETBLEED_CMD_OFF
;
944 } else if (!strcmp(str
, "auto")) {
945 retbleed_cmd
= RETBLEED_CMD_AUTO
;
946 } else if (!strcmp(str
, "unret")) {
947 retbleed_cmd
= RETBLEED_CMD_UNRET
;
948 } else if (!strcmp(str
, "ibpb")) {
949 retbleed_cmd
= RETBLEED_CMD_IBPB
;
950 } else if (!strcmp(str
, "stuff")) {
951 retbleed_cmd
= RETBLEED_CMD_STUFF
;
952 } else if (!strcmp(str
, "nosmt")) {
953 retbleed_nosmt
= true;
954 } else if (!strcmp(str
, "force")) {
955 setup_force_cpu_bug(X86_BUG_RETBLEED
);
957 pr_err("Ignoring unknown retbleed option (%s).", str
);
965 early_param("retbleed", retbleed_parse_cmdline
);
967 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
968 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
970 static void __init
retbleed_select_mitigation(void)
972 bool mitigate_smt
= false;
974 if (!boot_cpu_has_bug(X86_BUG_RETBLEED
) || cpu_mitigations_off())
977 switch (retbleed_cmd
) {
978 case RETBLEED_CMD_OFF
:
981 case RETBLEED_CMD_UNRET
:
982 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY
)) {
983 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
985 pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
990 case RETBLEED_CMD_IBPB
:
991 if (!boot_cpu_has(X86_FEATURE_IBPB
)) {
992 pr_err("WARNING: CPU does not support IBPB.\n");
994 } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
)) {
995 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
997 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1002 case RETBLEED_CMD_STUFF
:
1003 if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING
) &&
1004 spectre_v2_enabled
== SPECTRE_V2_RETPOLINE
) {
1005 retbleed_mitigation
= RETBLEED_MITIGATION_STUFF
;
1008 if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING
))
1009 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1011 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1018 case RETBLEED_CMD_AUTO
:
1019 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1020 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
) {
1021 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY
))
1022 retbleed_mitigation
= RETBLEED_MITIGATION_UNRET
;
1023 else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
) &&
1024 boot_cpu_has(X86_FEATURE_IBPB
))
1025 retbleed_mitigation
= RETBLEED_MITIGATION_IBPB
;
1029 * The Intel mitigation (IBRS or eIBRS) was already selected in
1030 * spectre_v2_select_mitigation(). 'retbleed_mitigation' will
1031 * be set accordingly below.
1037 switch (retbleed_mitigation
) {
1038 case RETBLEED_MITIGATION_UNRET
:
1039 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1040 setup_force_cpu_cap(X86_FEATURE_UNRET
);
1042 x86_return_thunk
= retbleed_return_thunk
;
1044 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
1045 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
1046 pr_err(RETBLEED_UNTRAIN_MSG
);
1048 mitigate_smt
= true;
1051 case RETBLEED_MITIGATION_IBPB
:
1052 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
1053 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
1054 mitigate_smt
= true;
1057 case RETBLEED_MITIGATION_STUFF
:
1058 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
1059 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH
);
1061 x86_return_thunk
= call_depth_return_thunk
;
1068 if (mitigate_smt
&& !boot_cpu_has(X86_FEATURE_STIBP
) &&
1069 (retbleed_nosmt
|| cpu_mitigations_auto_nosmt()))
1070 cpu_smt_disable(false);
1073 * Let IBRS trump all on Intel without affecting the effects of the
1074 * retbleed= cmdline option except for call depth based stuffing
1076 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1077 switch (spectre_v2_enabled
) {
1078 case SPECTRE_V2_IBRS
:
1079 retbleed_mitigation
= RETBLEED_MITIGATION_IBRS
;
1081 case SPECTRE_V2_EIBRS
:
1082 case SPECTRE_V2_EIBRS_RETPOLINE
:
1083 case SPECTRE_V2_EIBRS_LFENCE
:
1084 retbleed_mitigation
= RETBLEED_MITIGATION_EIBRS
;
1087 if (retbleed_mitigation
!= RETBLEED_MITIGATION_STUFF
)
1088 pr_err(RETBLEED_INTEL_MSG
);
1092 pr_info("%s\n", retbleed_strings
[retbleed_mitigation
]);
1096 #define pr_fmt(fmt) "Spectre V2 : " fmt
1098 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init
=
1099 SPECTRE_V2_USER_NONE
;
1100 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init
=
1101 SPECTRE_V2_USER_NONE
;
1103 #ifdef CONFIG_MITIGATION_RETPOLINE
1104 static bool spectre_v2_bad_module
;
1106 bool retpoline_module_ok(bool has_retpoline
)
1108 if (spectre_v2_enabled
== SPECTRE_V2_NONE
|| has_retpoline
)
1111 pr_err("System may be vulnerable to spectre v2\n");
1112 spectre_v2_bad_module
= true;
1116 static inline const char *spectre_v2_module_string(void)
1118 return spectre_v2_bad_module
? " - vulnerable module loaded" : "";
1121 static inline const char *spectre_v2_module_string(void) { return ""; }
1124 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1125 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1126 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1127 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1129 #ifdef CONFIG_BPF_SYSCALL
1130 void unpriv_ebpf_notify(int new_state
)
1135 /* Unprivileged eBPF is enabled */
1137 switch (spectre_v2_enabled
) {
1138 case SPECTRE_V2_EIBRS
:
1139 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1141 case SPECTRE_V2_EIBRS_LFENCE
:
1142 if (sched_smt_active())
1143 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1151 static inline bool match_option(const char *arg
, int arglen
, const char *opt
)
1153 int len
= strlen(opt
);
1155 return len
== arglen
&& !strncmp(arg
, opt
, len
);
1158 /* The kernel command line selection for spectre v2 */
1159 enum spectre_v2_mitigation_cmd
{
1160 SPECTRE_V2_CMD_NONE
,
1161 SPECTRE_V2_CMD_AUTO
,
1162 SPECTRE_V2_CMD_FORCE
,
1163 SPECTRE_V2_CMD_RETPOLINE
,
1164 SPECTRE_V2_CMD_RETPOLINE_GENERIC
,
1165 SPECTRE_V2_CMD_RETPOLINE_LFENCE
,
1166 SPECTRE_V2_CMD_EIBRS
,
1167 SPECTRE_V2_CMD_EIBRS_RETPOLINE
,
1168 SPECTRE_V2_CMD_EIBRS_LFENCE
,
1169 SPECTRE_V2_CMD_IBRS
,
1172 enum spectre_v2_user_cmd
{
1173 SPECTRE_V2_USER_CMD_NONE
,
1174 SPECTRE_V2_USER_CMD_AUTO
,
1175 SPECTRE_V2_USER_CMD_FORCE
,
1176 SPECTRE_V2_USER_CMD_PRCTL
,
1177 SPECTRE_V2_USER_CMD_PRCTL_IBPB
,
1178 SPECTRE_V2_USER_CMD_SECCOMP
,
1179 SPECTRE_V2_USER_CMD_SECCOMP_IBPB
,
1182 static const char * const spectre_v2_user_strings
[] = {
1183 [SPECTRE_V2_USER_NONE
] = "User space: Vulnerable",
1184 [SPECTRE_V2_USER_STRICT
] = "User space: Mitigation: STIBP protection",
1185 [SPECTRE_V2_USER_STRICT_PREFERRED
] = "User space: Mitigation: STIBP always-on protection",
1186 [SPECTRE_V2_USER_PRCTL
] = "User space: Mitigation: STIBP via prctl",
1187 [SPECTRE_V2_USER_SECCOMP
] = "User space: Mitigation: STIBP via seccomp and prctl",
1190 static const struct {
1192 enum spectre_v2_user_cmd cmd
;
1194 } v2_user_options
[] __initconst
= {
1195 { "auto", SPECTRE_V2_USER_CMD_AUTO
, false },
1196 { "off", SPECTRE_V2_USER_CMD_NONE
, false },
1197 { "on", SPECTRE_V2_USER_CMD_FORCE
, true },
1198 { "prctl", SPECTRE_V2_USER_CMD_PRCTL
, false },
1199 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB
, false },
1200 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP
, false },
1201 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB
, false },
1204 static void __init
spec_v2_user_print_cond(const char *reason
, bool secure
)
1206 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1207 pr_info("spectre_v2_user=%s forced on command line.\n", reason
);
1210 static __ro_after_init
enum spectre_v2_mitigation_cmd spectre_v2_cmd
;
1212 static enum spectre_v2_user_cmd __init
1213 spectre_v2_parse_user_cmdline(void)
1218 switch (spectre_v2_cmd
) {
1219 case SPECTRE_V2_CMD_NONE
:
1220 return SPECTRE_V2_USER_CMD_NONE
;
1221 case SPECTRE_V2_CMD_FORCE
:
1222 return SPECTRE_V2_USER_CMD_FORCE
;
1227 ret
= cmdline_find_option(boot_command_line
, "spectre_v2_user",
1230 return SPECTRE_V2_USER_CMD_AUTO
;
1232 for (i
= 0; i
< ARRAY_SIZE(v2_user_options
); i
++) {
1233 if (match_option(arg
, ret
, v2_user_options
[i
].option
)) {
1234 spec_v2_user_print_cond(v2_user_options
[i
].option
,
1235 v2_user_options
[i
].secure
);
1236 return v2_user_options
[i
].cmd
;
1240 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg
);
1241 return SPECTRE_V2_USER_CMD_AUTO
;
1244 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode
)
1246 return spectre_v2_in_eibrs_mode(mode
) || mode
== SPECTRE_V2_IBRS
;
1250 spectre_v2_user_select_mitigation(void)
1252 enum spectre_v2_user_mitigation mode
= SPECTRE_V2_USER_NONE
;
1253 bool smt_possible
= IS_ENABLED(CONFIG_SMP
);
1254 enum spectre_v2_user_cmd cmd
;
1256 if (!boot_cpu_has(X86_FEATURE_IBPB
) && !boot_cpu_has(X86_FEATURE_STIBP
))
1259 if (cpu_smt_control
== CPU_SMT_FORCE_DISABLED
||
1260 cpu_smt_control
== CPU_SMT_NOT_SUPPORTED
)
1261 smt_possible
= false;
1263 cmd
= spectre_v2_parse_user_cmdline();
1265 case SPECTRE_V2_USER_CMD_NONE
:
1267 case SPECTRE_V2_USER_CMD_FORCE
:
1268 mode
= SPECTRE_V2_USER_STRICT
;
1270 case SPECTRE_V2_USER_CMD_AUTO
:
1271 case SPECTRE_V2_USER_CMD_PRCTL
:
1272 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1273 mode
= SPECTRE_V2_USER_PRCTL
;
1275 case SPECTRE_V2_USER_CMD_SECCOMP
:
1276 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1277 if (IS_ENABLED(CONFIG_SECCOMP
))
1278 mode
= SPECTRE_V2_USER_SECCOMP
;
1280 mode
= SPECTRE_V2_USER_PRCTL
;
1284 /* Initialize Indirect Branch Prediction Barrier */
1285 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
1286 setup_force_cpu_cap(X86_FEATURE_USE_IBPB
);
1288 spectre_v2_user_ibpb
= mode
;
1290 case SPECTRE_V2_USER_CMD_NONE
:
1292 case SPECTRE_V2_USER_CMD_FORCE
:
1293 case SPECTRE_V2_USER_CMD_PRCTL_IBPB
:
1294 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB
:
1295 static_branch_enable(&switch_mm_always_ibpb
);
1296 spectre_v2_user_ibpb
= SPECTRE_V2_USER_STRICT
;
1298 case SPECTRE_V2_USER_CMD_PRCTL
:
1299 case SPECTRE_V2_USER_CMD_AUTO
:
1300 case SPECTRE_V2_USER_CMD_SECCOMP
:
1301 static_branch_enable(&switch_mm_cond_ibpb
);
1305 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1306 static_key_enabled(&switch_mm_always_ibpb
) ?
1307 "always-on" : "conditional");
1311 * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1314 * Intel's Enhanced IBRS also protects against cross-thread branch target
1315 * injection in user-mode as the IBRS bit remains always set which
1316 * implicitly enables cross-thread protections. However, in legacy IBRS
1317 * mode, the IBRS bit is set only on kernel entry and cleared on return
1318 * to userspace. AMD Automatic IBRS also does not protect userspace.
1319 * These modes therefore disable the implicit cross-thread protection,
1320 * so allow for STIBP to be selected in those cases.
1322 if (!boot_cpu_has(X86_FEATURE_STIBP
) ||
1324 (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
1325 !boot_cpu_has(X86_FEATURE_AUTOIBRS
)))
1329 * At this point, an STIBP mode other than "off" has been set.
1330 * If STIBP support is not being forced, check if STIBP always-on
1333 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1334 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON
))
1335 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1337 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
1338 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
1339 if (mode
!= SPECTRE_V2_USER_STRICT
&&
1340 mode
!= SPECTRE_V2_USER_STRICT_PREFERRED
)
1341 pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1342 mode
= SPECTRE_V2_USER_STRICT_PREFERRED
;
1345 spectre_v2_user_stibp
= mode
;
1348 pr_info("%s\n", spectre_v2_user_strings
[mode
]);
1351 static const char * const spectre_v2_strings
[] = {
1352 [SPECTRE_V2_NONE
] = "Vulnerable",
1353 [SPECTRE_V2_RETPOLINE
] = "Mitigation: Retpolines",
1354 [SPECTRE_V2_LFENCE
] = "Mitigation: LFENCE",
1355 [SPECTRE_V2_EIBRS
] = "Mitigation: Enhanced / Automatic IBRS",
1356 [SPECTRE_V2_EIBRS_LFENCE
] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1357 [SPECTRE_V2_EIBRS_RETPOLINE
] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1358 [SPECTRE_V2_IBRS
] = "Mitigation: IBRS",
1361 static const struct {
1363 enum spectre_v2_mitigation_cmd cmd
;
1365 } mitigation_options
[] __initconst
= {
1366 { "off", SPECTRE_V2_CMD_NONE
, false },
1367 { "on", SPECTRE_V2_CMD_FORCE
, true },
1368 { "retpoline", SPECTRE_V2_CMD_RETPOLINE
, false },
1369 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1370 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE
, false },
1371 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC
, false },
1372 { "eibrs", SPECTRE_V2_CMD_EIBRS
, false },
1373 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE
, false },
1374 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE
, false },
1375 { "auto", SPECTRE_V2_CMD_AUTO
, false },
1376 { "ibrs", SPECTRE_V2_CMD_IBRS
, false },
1379 static void __init
spec_v2_print_cond(const char *reason
, bool secure
)
1381 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) != secure
)
1382 pr_info("%s selected on command line.\n", reason
);
1385 static enum spectre_v2_mitigation_cmd __init
spectre_v2_parse_cmdline(void)
1387 enum spectre_v2_mitigation_cmd cmd
= SPECTRE_V2_CMD_AUTO
;
1391 if (cmdline_find_option_bool(boot_command_line
, "nospectre_v2") ||
1392 cpu_mitigations_off())
1393 return SPECTRE_V2_CMD_NONE
;
1395 ret
= cmdline_find_option(boot_command_line
, "spectre_v2", arg
, sizeof(arg
));
1397 return SPECTRE_V2_CMD_AUTO
;
1399 for (i
= 0; i
< ARRAY_SIZE(mitigation_options
); i
++) {
1400 if (!match_option(arg
, ret
, mitigation_options
[i
].option
))
1402 cmd
= mitigation_options
[i
].cmd
;
1406 if (i
>= ARRAY_SIZE(mitigation_options
)) {
1407 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1408 return SPECTRE_V2_CMD_AUTO
;
1411 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE
||
1412 cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1413 cmd
== SPECTRE_V2_CMD_RETPOLINE_GENERIC
||
1414 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1415 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1416 !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
)) {
1417 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1418 mitigation_options
[i
].option
);
1419 return SPECTRE_V2_CMD_AUTO
;
1422 if ((cmd
== SPECTRE_V2_CMD_EIBRS
||
1423 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
||
1424 cmd
== SPECTRE_V2_CMD_EIBRS_RETPOLINE
) &&
1425 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1426 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1427 mitigation_options
[i
].option
);
1428 return SPECTRE_V2_CMD_AUTO
;
1431 if ((cmd
== SPECTRE_V2_CMD_RETPOLINE_LFENCE
||
1432 cmd
== SPECTRE_V2_CMD_EIBRS_LFENCE
) &&
1433 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC
)) {
1434 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1435 mitigation_options
[i
].option
);
1436 return SPECTRE_V2_CMD_AUTO
;
1439 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY
)) {
1440 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1441 mitigation_options
[i
].option
);
1442 return SPECTRE_V2_CMD_AUTO
;
1445 if (cmd
== SPECTRE_V2_CMD_IBRS
&& boot_cpu_data
.x86_vendor
!= X86_VENDOR_INTEL
) {
1446 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1447 mitigation_options
[i
].option
);
1448 return SPECTRE_V2_CMD_AUTO
;
1451 if (cmd
== SPECTRE_V2_CMD_IBRS
&& !boot_cpu_has(X86_FEATURE_IBRS
)) {
1452 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1453 mitigation_options
[i
].option
);
1454 return SPECTRE_V2_CMD_AUTO
;
1457 if (cmd
== SPECTRE_V2_CMD_IBRS
&& cpu_feature_enabled(X86_FEATURE_XENPV
)) {
1458 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1459 mitigation_options
[i
].option
);
1460 return SPECTRE_V2_CMD_AUTO
;
1463 spec_v2_print_cond(mitigation_options
[i
].option
,
1464 mitigation_options
[i
].secure
);
1468 static enum spectre_v2_mitigation __init
spectre_v2_select_retpoline(void)
1470 if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE
)) {
1471 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1472 return SPECTRE_V2_NONE
;
1475 return SPECTRE_V2_RETPOLINE
;
1478 /* Disable in-kernel use of non-RSB RET predictors */
1479 static void __init
spec_ctrl_disable_kernel_rrsba(void)
1483 if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL
))
1486 ia32_cap
= x86_read_arch_cap_msr();
1488 if (ia32_cap
& ARCH_CAP_RRSBA
) {
1489 x86_spec_ctrl_base
|= SPEC_CTRL_RRSBA_DIS_S
;
1490 update_spec_ctrl(x86_spec_ctrl_base
);
1494 static void __init
spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode
)
1497 * Similar to context switches, there are two types of RSB attacks
1502 * 2) Poisoned RSB entry
1504 * When retpoline is enabled, both are mitigated by filling/clearing
1507 * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1508 * prediction isolation protections, RSB still needs to be cleared
1509 * because of #2. Note that SMEP provides no protection here, unlike
1510 * user-space-poisoned RSB entries.
1512 * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1513 * bug is present then a LITE version of RSB protection is required,
1514 * just a single call needs to retire before a RET is executed.
1517 case SPECTRE_V2_NONE
:
1520 case SPECTRE_V2_EIBRS_LFENCE
:
1521 case SPECTRE_V2_EIBRS
:
1522 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
1523 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE
);
1524 pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1528 case SPECTRE_V2_EIBRS_RETPOLINE
:
1529 case SPECTRE_V2_RETPOLINE
:
1530 case SPECTRE_V2_LFENCE
:
1531 case SPECTRE_V2_IBRS
:
1532 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT
);
1533 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1537 pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1541 static void __init
spectre_v2_select_mitigation(void)
1543 enum spectre_v2_mitigation_cmd cmd
= spectre_v2_parse_cmdline();
1544 enum spectre_v2_mitigation mode
= SPECTRE_V2_NONE
;
1547 * If the CPU is not affected and the command line mode is NONE or AUTO
1548 * then nothing to do.
1550 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
) &&
1551 (cmd
== SPECTRE_V2_CMD_NONE
|| cmd
== SPECTRE_V2_CMD_AUTO
))
1555 case SPECTRE_V2_CMD_NONE
:
1558 case SPECTRE_V2_CMD_FORCE
:
1559 case SPECTRE_V2_CMD_AUTO
:
1560 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
)) {
1561 mode
= SPECTRE_V2_EIBRS
;
1565 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY
) &&
1566 boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1567 retbleed_cmd
!= RETBLEED_CMD_OFF
&&
1568 retbleed_cmd
!= RETBLEED_CMD_STUFF
&&
1569 boot_cpu_has(X86_FEATURE_IBRS
) &&
1570 boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
) {
1571 mode
= SPECTRE_V2_IBRS
;
1575 mode
= spectre_v2_select_retpoline();
1578 case SPECTRE_V2_CMD_RETPOLINE_LFENCE
:
1579 pr_err(SPECTRE_V2_LFENCE_MSG
);
1580 mode
= SPECTRE_V2_LFENCE
;
1583 case SPECTRE_V2_CMD_RETPOLINE_GENERIC
:
1584 mode
= SPECTRE_V2_RETPOLINE
;
1587 case SPECTRE_V2_CMD_RETPOLINE
:
1588 mode
= spectre_v2_select_retpoline();
1591 case SPECTRE_V2_CMD_IBRS
:
1592 mode
= SPECTRE_V2_IBRS
;
1595 case SPECTRE_V2_CMD_EIBRS
:
1596 mode
= SPECTRE_V2_EIBRS
;
1599 case SPECTRE_V2_CMD_EIBRS_LFENCE
:
1600 mode
= SPECTRE_V2_EIBRS_LFENCE
;
1603 case SPECTRE_V2_CMD_EIBRS_RETPOLINE
:
1604 mode
= SPECTRE_V2_EIBRS_RETPOLINE
;
1608 if (mode
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
1609 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG
);
1611 if (spectre_v2_in_ibrs_mode(mode
)) {
1612 if (boot_cpu_has(X86_FEATURE_AUTOIBRS
)) {
1613 msr_set_bit(MSR_EFER
, _EFER_AUTOIBRS
);
1615 x86_spec_ctrl_base
|= SPEC_CTRL_IBRS
;
1616 update_spec_ctrl(x86_spec_ctrl_base
);
1621 case SPECTRE_V2_NONE
:
1622 case SPECTRE_V2_EIBRS
:
1625 case SPECTRE_V2_IBRS
:
1626 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS
);
1627 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED
))
1628 pr_warn(SPECTRE_V2_IBRS_PERF_MSG
);
1631 case SPECTRE_V2_LFENCE
:
1632 case SPECTRE_V2_EIBRS_LFENCE
:
1633 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE
);
1636 case SPECTRE_V2_RETPOLINE
:
1637 case SPECTRE_V2_EIBRS_RETPOLINE
:
1638 setup_force_cpu_cap(X86_FEATURE_RETPOLINE
);
1643 * Disable alternate RSB predictions in kernel when indirect CALLs and
1644 * JMPs gets protection against BHI and Intramode-BTI, but RET
1645 * prediction from a non-RSB predictor is still a risk.
1647 if (mode
== SPECTRE_V2_EIBRS_LFENCE
||
1648 mode
== SPECTRE_V2_EIBRS_RETPOLINE
||
1649 mode
== SPECTRE_V2_RETPOLINE
)
1650 spec_ctrl_disable_kernel_rrsba();
1652 spectre_v2_enabled
= mode
;
1653 pr_info("%s\n", spectre_v2_strings
[mode
]);
1656 * If Spectre v2 protection has been enabled, fill the RSB during a
1657 * context switch. In general there are two types of RSB attacks
1658 * across context switches, for which the CALLs/RETs may be unbalanced.
1662 * Some Intel parts have "bottomless RSB". When the RSB is empty,
1663 * speculated return targets may come from the branch predictor,
1664 * which could have a user-poisoned BTB or BHB entry.
1666 * AMD has it even worse: *all* returns are speculated from the BTB,
1667 * regardless of the state of the RSB.
1669 * When IBRS or eIBRS is enabled, the "user -> kernel" attack
1670 * scenario is mitigated by the IBRS branch prediction isolation
1671 * properties, so the RSB buffer filling wouldn't be necessary to
1672 * protect against this type of attack.
1674 * The "user -> user" attack scenario is mitigated by RSB filling.
1676 * 2) Poisoned RSB entry
1678 * If the 'next' in-kernel return stack is shorter than 'prev',
1679 * 'next' could be tricked into speculating with a user-poisoned RSB
1682 * The "user -> kernel" attack scenario is mitigated by SMEP and
1685 * The "user -> user" scenario, also known as SpectreBHB, requires
1688 * So to mitigate all cases, unconditionally fill RSB on context
1691 * FIXME: Is this pointless for retbleed-affected AMD?
1693 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW
);
1694 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1696 spectre_v2_determine_rsb_fill_type_at_vmexit(mode
);
1699 * Retpoline protects the kernel, but doesn't protect firmware. IBRS
1700 * and Enhanced IBRS protect firmware too, so enable IBRS around
1701 * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1702 * otherwise enabled.
1704 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1705 * the user might select retpoline on the kernel command line and if
1706 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1707 * enable IBRS around firmware calls.
1709 if (boot_cpu_has_bug(X86_BUG_RETBLEED
) &&
1710 boot_cpu_has(X86_FEATURE_IBPB
) &&
1711 (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
||
1712 boot_cpu_data
.x86_vendor
== X86_VENDOR_HYGON
)) {
1714 if (retbleed_cmd
!= RETBLEED_CMD_IBPB
) {
1715 setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW
);
1716 pr_info("Enabling Speculation Barrier for firmware calls\n");
1719 } else if (boot_cpu_has(X86_FEATURE_IBRS
) && !spectre_v2_in_ibrs_mode(mode
)) {
1720 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW
);
1721 pr_info("Enabling Restricted Speculation for firmware calls\n");
1724 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1725 spectre_v2_cmd
= cmd
;
1728 static void update_stibp_msr(void * __unused
)
1730 u64 val
= spec_ctrl_current() | (x86_spec_ctrl_base
& SPEC_CTRL_STIBP
);
1731 update_spec_ctrl(val
);
1734 /* Update x86_spec_ctrl_base in case SMT state changed. */
1735 static void update_stibp_strict(void)
1737 u64 mask
= x86_spec_ctrl_base
& ~SPEC_CTRL_STIBP
;
1739 if (sched_smt_active())
1740 mask
|= SPEC_CTRL_STIBP
;
1742 if (mask
== x86_spec_ctrl_base
)
1745 pr_info("Update user space SMT mitigation: STIBP %s\n",
1746 mask
& SPEC_CTRL_STIBP
? "always-on" : "off");
1747 x86_spec_ctrl_base
= mask
;
1748 on_each_cpu(update_stibp_msr
, NULL
, 1);
1751 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1752 static void update_indir_branch_cond(void)
1754 if (sched_smt_active())
1755 static_branch_enable(&switch_to_cond_stibp
);
1757 static_branch_disable(&switch_to_cond_stibp
);
1761 #define pr_fmt(fmt) fmt
1763 /* Update the static key controlling the MDS CPU buffer clear in idle */
1764 static void update_mds_branch_idle(void)
1766 u64 ia32_cap
= x86_read_arch_cap_msr();
1769 * Enable the idle clearing if SMT is active on CPUs which are
1770 * affected only by MSBDS and not any other MDS variant.
1772 * The other variants cannot be mitigated when SMT is enabled, so
1773 * clearing the buffers on idle just to prevent the Store Buffer
1774 * repartitioning leak would be a window dressing exercise.
1776 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY
))
1779 if (sched_smt_active()) {
1780 static_branch_enable(&mds_idle_clear
);
1781 } else if (mmio_mitigation
== MMIO_MITIGATION_OFF
||
1782 (ia32_cap
& ARCH_CAP_FBSDP_NO
)) {
1783 static_branch_disable(&mds_idle_clear
);
1787 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1788 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1789 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1791 void cpu_bugs_smt_update(void)
1793 mutex_lock(&spec_ctrl_mutex
);
1795 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1796 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
1797 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG
);
1799 switch (spectre_v2_user_stibp
) {
1800 case SPECTRE_V2_USER_NONE
:
1802 case SPECTRE_V2_USER_STRICT
:
1803 case SPECTRE_V2_USER_STRICT_PREFERRED
:
1804 update_stibp_strict();
1806 case SPECTRE_V2_USER_PRCTL
:
1807 case SPECTRE_V2_USER_SECCOMP
:
1808 update_indir_branch_cond();
1812 switch (mds_mitigation
) {
1813 case MDS_MITIGATION_FULL
:
1814 case MDS_MITIGATION_VMWERV
:
1815 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY
))
1816 pr_warn_once(MDS_MSG_SMT
);
1817 update_mds_branch_idle();
1819 case MDS_MITIGATION_OFF
:
1823 switch (taa_mitigation
) {
1824 case TAA_MITIGATION_VERW
:
1825 case TAA_MITIGATION_UCODE_NEEDED
:
1826 if (sched_smt_active())
1827 pr_warn_once(TAA_MSG_SMT
);
1829 case TAA_MITIGATION_TSX_DISABLED
:
1830 case TAA_MITIGATION_OFF
:
1834 switch (mmio_mitigation
) {
1835 case MMIO_MITIGATION_VERW
:
1836 case MMIO_MITIGATION_UCODE_NEEDED
:
1837 if (sched_smt_active())
1838 pr_warn_once(MMIO_MSG_SMT
);
1840 case MMIO_MITIGATION_OFF
:
1844 mutex_unlock(&spec_ctrl_mutex
);
1848 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1850 static enum ssb_mitigation ssb_mode __ro_after_init
= SPEC_STORE_BYPASS_NONE
;
1852 /* The kernel command line selection */
1853 enum ssb_mitigation_cmd
{
1854 SPEC_STORE_BYPASS_CMD_NONE
,
1855 SPEC_STORE_BYPASS_CMD_AUTO
,
1856 SPEC_STORE_BYPASS_CMD_ON
,
1857 SPEC_STORE_BYPASS_CMD_PRCTL
,
1858 SPEC_STORE_BYPASS_CMD_SECCOMP
,
1861 static const char * const ssb_strings
[] = {
1862 [SPEC_STORE_BYPASS_NONE
] = "Vulnerable",
1863 [SPEC_STORE_BYPASS_DISABLE
] = "Mitigation: Speculative Store Bypass disabled",
1864 [SPEC_STORE_BYPASS_PRCTL
] = "Mitigation: Speculative Store Bypass disabled via prctl",
1865 [SPEC_STORE_BYPASS_SECCOMP
] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1868 static const struct {
1870 enum ssb_mitigation_cmd cmd
;
1871 } ssb_mitigation_options
[] __initconst
= {
1872 { "auto", SPEC_STORE_BYPASS_CMD_AUTO
}, /* Platform decides */
1873 { "on", SPEC_STORE_BYPASS_CMD_ON
}, /* Disable Speculative Store Bypass */
1874 { "off", SPEC_STORE_BYPASS_CMD_NONE
}, /* Don't touch Speculative Store Bypass */
1875 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL
}, /* Disable Speculative Store Bypass via prctl */
1876 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP
}, /* Disable Speculative Store Bypass via prctl and seccomp */
1879 static enum ssb_mitigation_cmd __init
ssb_parse_cmdline(void)
1881 enum ssb_mitigation_cmd cmd
= SPEC_STORE_BYPASS_CMD_AUTO
;
1885 if (cmdline_find_option_bool(boot_command_line
, "nospec_store_bypass_disable") ||
1886 cpu_mitigations_off()) {
1887 return SPEC_STORE_BYPASS_CMD_NONE
;
1889 ret
= cmdline_find_option(boot_command_line
, "spec_store_bypass_disable",
1892 return SPEC_STORE_BYPASS_CMD_AUTO
;
1894 for (i
= 0; i
< ARRAY_SIZE(ssb_mitigation_options
); i
++) {
1895 if (!match_option(arg
, ret
, ssb_mitigation_options
[i
].option
))
1898 cmd
= ssb_mitigation_options
[i
].cmd
;
1902 if (i
>= ARRAY_SIZE(ssb_mitigation_options
)) {
1903 pr_err("unknown option (%s). Switching to AUTO select\n", arg
);
1904 return SPEC_STORE_BYPASS_CMD_AUTO
;
1911 static enum ssb_mitigation __init
__ssb_select_mitigation(void)
1913 enum ssb_mitigation mode
= SPEC_STORE_BYPASS_NONE
;
1914 enum ssb_mitigation_cmd cmd
;
1916 if (!boot_cpu_has(X86_FEATURE_SSBD
))
1919 cmd
= ssb_parse_cmdline();
1920 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
) &&
1921 (cmd
== SPEC_STORE_BYPASS_CMD_NONE
||
1922 cmd
== SPEC_STORE_BYPASS_CMD_AUTO
))
1926 case SPEC_STORE_BYPASS_CMD_SECCOMP
:
1928 * Choose prctl+seccomp as the default mode if seccomp is
1931 if (IS_ENABLED(CONFIG_SECCOMP
))
1932 mode
= SPEC_STORE_BYPASS_SECCOMP
;
1934 mode
= SPEC_STORE_BYPASS_PRCTL
;
1936 case SPEC_STORE_BYPASS_CMD_ON
:
1937 mode
= SPEC_STORE_BYPASS_DISABLE
;
1939 case SPEC_STORE_BYPASS_CMD_AUTO
:
1940 case SPEC_STORE_BYPASS_CMD_PRCTL
:
1941 mode
= SPEC_STORE_BYPASS_PRCTL
;
1943 case SPEC_STORE_BYPASS_CMD_NONE
:
1948 * We have three CPU feature flags that are in play here:
1949 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1950 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1951 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1953 if (mode
== SPEC_STORE_BYPASS_DISABLE
) {
1954 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE
);
1956 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1957 * use a completely different MSR and bit dependent on family.
1959 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD
) &&
1960 !static_cpu_has(X86_FEATURE_AMD_SSBD
)) {
1961 x86_amd_ssb_disable();
1963 x86_spec_ctrl_base
|= SPEC_CTRL_SSBD
;
1964 update_spec_ctrl(x86_spec_ctrl_base
);
1971 static void ssb_select_mitigation(void)
1973 ssb_mode
= __ssb_select_mitigation();
1975 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
1976 pr_info("%s\n", ssb_strings
[ssb_mode
]);
1980 #define pr_fmt(fmt) "Speculation prctl: " fmt
1982 static void task_update_spec_tif(struct task_struct
*tsk
)
1984 /* Force the update of the real TIF bits */
1985 set_tsk_thread_flag(tsk
, TIF_SPEC_FORCE_UPDATE
);
1988 * Immediately update the speculation control MSRs for the current
1989 * task, but for a non-current task delay setting the CPU
1990 * mitigation until it is scheduled next.
1992 * This can only happen for SECCOMP mitigation. For PRCTL it's
1993 * always the current task.
1996 speculation_ctrl_update_current();
1999 static int l1d_flush_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2002 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2006 case PR_SPEC_ENABLE
:
2007 set_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2009 case PR_SPEC_DISABLE
:
2010 clear_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
);
2017 static int ssb_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2019 if (ssb_mode
!= SPEC_STORE_BYPASS_PRCTL
&&
2020 ssb_mode
!= SPEC_STORE_BYPASS_SECCOMP
)
2024 case PR_SPEC_ENABLE
:
2025 /* If speculation is force disabled, enable is not allowed */
2026 if (task_spec_ssb_force_disable(task
))
2028 task_clear_spec_ssb_disable(task
);
2029 task_clear_spec_ssb_noexec(task
);
2030 task_update_spec_tif(task
);
2032 case PR_SPEC_DISABLE
:
2033 task_set_spec_ssb_disable(task
);
2034 task_clear_spec_ssb_noexec(task
);
2035 task_update_spec_tif(task
);
2037 case PR_SPEC_FORCE_DISABLE
:
2038 task_set_spec_ssb_disable(task
);
2039 task_set_spec_ssb_force_disable(task
);
2040 task_clear_spec_ssb_noexec(task
);
2041 task_update_spec_tif(task
);
2043 case PR_SPEC_DISABLE_NOEXEC
:
2044 if (task_spec_ssb_force_disable(task
))
2046 task_set_spec_ssb_disable(task
);
2047 task_set_spec_ssb_noexec(task
);
2048 task_update_spec_tif(task
);
2056 static bool is_spec_ib_user_controlled(void)
2058 return spectre_v2_user_ibpb
== SPECTRE_V2_USER_PRCTL
||
2059 spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2060 spectre_v2_user_stibp
== SPECTRE_V2_USER_PRCTL
||
2061 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
;
2064 static int ib_prctl_set(struct task_struct
*task
, unsigned long ctrl
)
2067 case PR_SPEC_ENABLE
:
2068 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2069 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2073 * With strict mode for both IBPB and STIBP, the instruction
2074 * code paths avoid checking this task flag and instead,
2075 * unconditionally run the instruction. However, STIBP and IBPB
2076 * are independent and either can be set to conditionally
2077 * enabled regardless of the mode of the other.
2079 * If either is set to conditional, allow the task flag to be
2080 * updated, unless it was force-disabled by a previous prctl
2081 * call. Currently, this is possible on an AMD CPU which has the
2082 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2083 * kernel is booted with 'spectre_v2_user=seccomp', then
2084 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2085 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2087 if (!is_spec_ib_user_controlled() ||
2088 task_spec_ib_force_disable(task
))
2091 task_clear_spec_ib_disable(task
);
2092 task_update_spec_tif(task
);
2094 case PR_SPEC_DISABLE
:
2095 case PR_SPEC_FORCE_DISABLE
:
2097 * Indirect branch speculation is always allowed when
2098 * mitigation is force disabled.
2100 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2101 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2104 if (!is_spec_ib_user_controlled())
2107 task_set_spec_ib_disable(task
);
2108 if (ctrl
== PR_SPEC_FORCE_DISABLE
)
2109 task_set_spec_ib_force_disable(task
);
2110 task_update_spec_tif(task
);
2111 if (task
== current
)
2112 indirect_branch_prediction_barrier();
2120 int arch_prctl_spec_ctrl_set(struct task_struct
*task
, unsigned long which
,
2124 case PR_SPEC_STORE_BYPASS
:
2125 return ssb_prctl_set(task
, ctrl
);
2126 case PR_SPEC_INDIRECT_BRANCH
:
2127 return ib_prctl_set(task
, ctrl
);
2128 case PR_SPEC_L1D_FLUSH
:
2129 return l1d_flush_prctl_set(task
, ctrl
);
2135 #ifdef CONFIG_SECCOMP
2136 void arch_seccomp_spec_mitigate(struct task_struct
*task
)
2138 if (ssb_mode
== SPEC_STORE_BYPASS_SECCOMP
)
2139 ssb_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2140 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_SECCOMP
||
2141 spectre_v2_user_stibp
== SPECTRE_V2_USER_SECCOMP
)
2142 ib_prctl_set(task
, PR_SPEC_FORCE_DISABLE
);
2146 static int l1d_flush_prctl_get(struct task_struct
*task
)
2148 if (!static_branch_unlikely(&switch_mm_cond_l1d_flush
))
2149 return PR_SPEC_FORCE_DISABLE
;
2151 if (test_ti_thread_flag(&task
->thread_info
, TIF_SPEC_L1D_FLUSH
))
2152 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2154 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2157 static int ssb_prctl_get(struct task_struct
*task
)
2160 case SPEC_STORE_BYPASS_NONE
:
2161 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS
))
2162 return PR_SPEC_ENABLE
;
2163 return PR_SPEC_NOT_AFFECTED
;
2164 case SPEC_STORE_BYPASS_DISABLE
:
2165 return PR_SPEC_DISABLE
;
2166 case SPEC_STORE_BYPASS_SECCOMP
:
2167 case SPEC_STORE_BYPASS_PRCTL
:
2168 if (task_spec_ssb_force_disable(task
))
2169 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2170 if (task_spec_ssb_noexec(task
))
2171 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE_NOEXEC
;
2172 if (task_spec_ssb_disable(task
))
2173 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2174 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2179 static int ib_prctl_get(struct task_struct
*task
)
2181 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2
))
2182 return PR_SPEC_NOT_AFFECTED
;
2184 if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_NONE
&&
2185 spectre_v2_user_stibp
== SPECTRE_V2_USER_NONE
)
2186 return PR_SPEC_ENABLE
;
2187 else if (is_spec_ib_user_controlled()) {
2188 if (task_spec_ib_force_disable(task
))
2189 return PR_SPEC_PRCTL
| PR_SPEC_FORCE_DISABLE
;
2190 if (task_spec_ib_disable(task
))
2191 return PR_SPEC_PRCTL
| PR_SPEC_DISABLE
;
2192 return PR_SPEC_PRCTL
| PR_SPEC_ENABLE
;
2193 } else if (spectre_v2_user_ibpb
== SPECTRE_V2_USER_STRICT
||
2194 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2195 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
)
2196 return PR_SPEC_DISABLE
;
2198 return PR_SPEC_NOT_AFFECTED
;
2201 int arch_prctl_spec_ctrl_get(struct task_struct
*task
, unsigned long which
)
2204 case PR_SPEC_STORE_BYPASS
:
2205 return ssb_prctl_get(task
);
2206 case PR_SPEC_INDIRECT_BRANCH
:
2207 return ib_prctl_get(task
);
2208 case PR_SPEC_L1D_FLUSH
:
2209 return l1d_flush_prctl_get(task
);
2215 void x86_spec_ctrl_setup_ap(void)
2217 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL
))
2218 update_spec_ctrl(x86_spec_ctrl_base
);
2220 if (ssb_mode
== SPEC_STORE_BYPASS_DISABLE
)
2221 x86_amd_ssb_disable();
2224 bool itlb_multihit_kvm_mitigation
;
2225 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation
);
2228 #define pr_fmt(fmt) "L1TF: " fmt
2230 /* Default mitigation for L1TF-affected CPUs */
2231 enum l1tf_mitigations l1tf_mitigation __ro_after_init
= L1TF_MITIGATION_FLUSH
;
2232 #if IS_ENABLED(CONFIG_KVM_INTEL)
2233 EXPORT_SYMBOL_GPL(l1tf_mitigation
);
2235 enum vmx_l1d_flush_state l1tf_vmx_mitigation
= VMENTER_L1D_FLUSH_AUTO
;
2236 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation
);
2239 * These CPUs all support 44bits physical address space internally in the
2240 * cache but CPUID can report a smaller number of physical address bits.
2242 * The L1TF mitigation uses the top most address bit for the inversion of
2243 * non present PTEs. When the installed memory reaches into the top most
2244 * address bit due to memory holes, which has been observed on machines
2245 * which report 36bits physical address bits and have 32G RAM installed,
2246 * then the mitigation range check in l1tf_select_mitigation() triggers.
2247 * This is a false positive because the mitigation is still possible due to
2248 * the fact that the cache uses 44bit internally. Use the cache bits
2249 * instead of the reported physical bits and adjust them on the affected
2250 * machines to 44bit if the reported bits are less than 44.
2252 static void override_cache_bits(struct cpuinfo_x86
*c
)
2257 switch (c
->x86_model
) {
2258 case INTEL_FAM6_NEHALEM
:
2259 case INTEL_FAM6_WESTMERE
:
2260 case INTEL_FAM6_SANDYBRIDGE
:
2261 case INTEL_FAM6_IVYBRIDGE
:
2262 case INTEL_FAM6_HASWELL
:
2263 case INTEL_FAM6_HASWELL_L
:
2264 case INTEL_FAM6_HASWELL_G
:
2265 case INTEL_FAM6_BROADWELL
:
2266 case INTEL_FAM6_BROADWELL_G
:
2267 case INTEL_FAM6_SKYLAKE_L
:
2268 case INTEL_FAM6_SKYLAKE
:
2269 case INTEL_FAM6_KABYLAKE_L
:
2270 case INTEL_FAM6_KABYLAKE
:
2271 if (c
->x86_cache_bits
< 44)
2272 c
->x86_cache_bits
= 44;
2277 static void __init
l1tf_select_mitigation(void)
2281 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2284 if (cpu_mitigations_off())
2285 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2286 else if (cpu_mitigations_auto_nosmt())
2287 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2289 override_cache_bits(&boot_cpu_data
);
2291 switch (l1tf_mitigation
) {
2292 case L1TF_MITIGATION_OFF
:
2293 case L1TF_MITIGATION_FLUSH_NOWARN
:
2294 case L1TF_MITIGATION_FLUSH
:
2296 case L1TF_MITIGATION_FLUSH_NOSMT
:
2297 case L1TF_MITIGATION_FULL
:
2298 cpu_smt_disable(false);
2300 case L1TF_MITIGATION_FULL_FORCE
:
2301 cpu_smt_disable(true);
2305 #if CONFIG_PGTABLE_LEVELS == 2
2306 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2310 half_pa
= (u64
)l1tf_pfn_limit() << PAGE_SHIFT
;
2311 if (l1tf_mitigation
!= L1TF_MITIGATION_OFF
&&
2312 e820__mapped_any(half_pa
, ULLONG_MAX
- half_pa
, E820_TYPE_RAM
)) {
2313 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2314 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2316 pr_info("However, doing so will make a part of your RAM unusable.\n");
2317 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2321 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV
);
2324 static int __init
l1tf_cmdline(char *str
)
2326 if (!boot_cpu_has_bug(X86_BUG_L1TF
))
2332 if (!strcmp(str
, "off"))
2333 l1tf_mitigation
= L1TF_MITIGATION_OFF
;
2334 else if (!strcmp(str
, "flush,nowarn"))
2335 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOWARN
;
2336 else if (!strcmp(str
, "flush"))
2337 l1tf_mitigation
= L1TF_MITIGATION_FLUSH
;
2338 else if (!strcmp(str
, "flush,nosmt"))
2339 l1tf_mitigation
= L1TF_MITIGATION_FLUSH_NOSMT
;
2340 else if (!strcmp(str
, "full"))
2341 l1tf_mitigation
= L1TF_MITIGATION_FULL
;
2342 else if (!strcmp(str
, "full,force"))
2343 l1tf_mitigation
= L1TF_MITIGATION_FULL_FORCE
;
2347 early_param("l1tf", l1tf_cmdline
);
2350 #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt
2352 enum srso_mitigation
{
2353 SRSO_MITIGATION_NONE
,
2354 SRSO_MITIGATION_UCODE_NEEDED
,
2355 SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
,
2356 SRSO_MITIGATION_MICROCODE
,
2357 SRSO_MITIGATION_SAFE_RET
,
2358 SRSO_MITIGATION_IBPB
,
2359 SRSO_MITIGATION_IBPB_ON_VMEXIT
,
2362 enum srso_mitigation_cmd
{
2367 SRSO_CMD_IBPB_ON_VMEXIT
,
2370 static const char * const srso_strings
[] = {
2371 [SRSO_MITIGATION_NONE
] = "Vulnerable",
2372 [SRSO_MITIGATION_UCODE_NEEDED
] = "Vulnerable: No microcode",
2373 [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
] = "Vulnerable: Safe RET, no microcode",
2374 [SRSO_MITIGATION_MICROCODE
] = "Vulnerable: Microcode, no safe RET",
2375 [SRSO_MITIGATION_SAFE_RET
] = "Mitigation: Safe RET",
2376 [SRSO_MITIGATION_IBPB
] = "Mitigation: IBPB",
2377 [SRSO_MITIGATION_IBPB_ON_VMEXIT
] = "Mitigation: IBPB on VMEXIT only"
2380 static enum srso_mitigation srso_mitigation __ro_after_init
= SRSO_MITIGATION_NONE
;
2381 static enum srso_mitigation_cmd srso_cmd __ro_after_init
= SRSO_CMD_SAFE_RET
;
2383 static int __init
srso_parse_cmdline(char *str
)
2388 if (!strcmp(str
, "off"))
2389 srso_cmd
= SRSO_CMD_OFF
;
2390 else if (!strcmp(str
, "microcode"))
2391 srso_cmd
= SRSO_CMD_MICROCODE
;
2392 else if (!strcmp(str
, "safe-ret"))
2393 srso_cmd
= SRSO_CMD_SAFE_RET
;
2394 else if (!strcmp(str
, "ibpb"))
2395 srso_cmd
= SRSO_CMD_IBPB
;
2396 else if (!strcmp(str
, "ibpb-vmexit"))
2397 srso_cmd
= SRSO_CMD_IBPB_ON_VMEXIT
;
2399 pr_err("Ignoring unknown SRSO option (%s).", str
);
2403 early_param("spec_rstack_overflow", srso_parse_cmdline
);
2405 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2407 static void __init
srso_select_mitigation(void)
2409 bool has_microcode
= boot_cpu_has(X86_FEATURE_IBPB_BRTYPE
);
2411 if (cpu_mitigations_off())
2414 if (!boot_cpu_has_bug(X86_BUG_SRSO
)) {
2415 if (boot_cpu_has(X86_FEATURE_SBPB
))
2416 x86_pred_cmd
= PRED_CMD_SBPB
;
2420 if (has_microcode
) {
2422 * Zen1/2 with SMT off aren't vulnerable after the right
2423 * IBPB microcode has been applied.
2425 * Zen1/2 don't have SBPB, no need to try to enable it here.
2427 if (boot_cpu_data
.x86
< 0x19 && !cpu_smt_possible()) {
2428 setup_force_cpu_cap(X86_FEATURE_SRSO_NO
);
2432 if (retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2433 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2437 pr_warn("IBPB-extending microcode not applied!\n");
2438 pr_warn(SRSO_NOTICE
);
2440 /* may be overwritten by SRSO_CMD_SAFE_RET below */
2441 srso_mitigation
= SRSO_MITIGATION_UCODE_NEEDED
;
2446 if (boot_cpu_has(X86_FEATURE_SBPB
))
2447 x86_pred_cmd
= PRED_CMD_SBPB
;
2450 case SRSO_CMD_MICROCODE
:
2451 if (has_microcode
) {
2452 srso_mitigation
= SRSO_MITIGATION_MICROCODE
;
2453 pr_warn(SRSO_NOTICE
);
2457 case SRSO_CMD_SAFE_RET
:
2458 if (IS_ENABLED(CONFIG_MITIGATION_SRSO
)) {
2460 * Enable the return thunk for generated code
2461 * like ftrace, static_call, etc.
2463 setup_force_cpu_cap(X86_FEATURE_RETHUNK
);
2464 setup_force_cpu_cap(X86_FEATURE_UNRET
);
2466 if (boot_cpu_data
.x86
== 0x19) {
2467 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS
);
2468 x86_return_thunk
= srso_alias_return_thunk
;
2470 setup_force_cpu_cap(X86_FEATURE_SRSO
);
2471 x86_return_thunk
= srso_return_thunk
;
2474 srso_mitigation
= SRSO_MITIGATION_SAFE_RET
;
2476 srso_mitigation
= SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED
;
2478 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2483 if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY
)) {
2484 if (has_microcode
) {
2485 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB
);
2486 srso_mitigation
= SRSO_MITIGATION_IBPB
;
2489 pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2493 case SRSO_CMD_IBPB_ON_VMEXIT
:
2494 if (IS_ENABLED(CONFIG_MITIGATION_SRSO
)) {
2495 if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB
) && has_microcode
) {
2496 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT
);
2497 srso_mitigation
= SRSO_MITIGATION_IBPB_ON_VMEXIT
;
2500 pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2506 pr_info("%s\n", srso_strings
[srso_mitigation
]);
2510 #define pr_fmt(fmt) fmt
2514 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2516 #if IS_ENABLED(CONFIG_KVM_INTEL)
2517 static const char * const l1tf_vmx_states
[] = {
2518 [VMENTER_L1D_FLUSH_AUTO
] = "auto",
2519 [VMENTER_L1D_FLUSH_NEVER
] = "vulnerable",
2520 [VMENTER_L1D_FLUSH_COND
] = "conditional cache flushes",
2521 [VMENTER_L1D_FLUSH_ALWAYS
] = "cache flushes",
2522 [VMENTER_L1D_FLUSH_EPT_DISABLED
] = "EPT disabled",
2523 [VMENTER_L1D_FLUSH_NOT_REQUIRED
] = "flush not necessary"
2526 static ssize_t
l1tf_show_state(char *buf
)
2528 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_AUTO
)
2529 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2531 if (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_EPT_DISABLED
||
2532 (l1tf_vmx_mitigation
== VMENTER_L1D_FLUSH_NEVER
&&
2533 sched_smt_active())) {
2534 return sysfs_emit(buf
, "%s; VMX: %s\n", L1TF_DEFAULT_MSG
,
2535 l1tf_vmx_states
[l1tf_vmx_mitigation
]);
2538 return sysfs_emit(buf
, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG
,
2539 l1tf_vmx_states
[l1tf_vmx_mitigation
],
2540 sched_smt_active() ? "vulnerable" : "disabled");
2543 static ssize_t
itlb_multihit_show_state(char *buf
)
2545 if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL
) ||
2546 !boot_cpu_has(X86_FEATURE_VMX
))
2547 return sysfs_emit(buf
, "KVM: Mitigation: VMX unsupported\n");
2548 else if (!(cr4_read_shadow() & X86_CR4_VMXE
))
2549 return sysfs_emit(buf
, "KVM: Mitigation: VMX disabled\n");
2550 else if (itlb_multihit_kvm_mitigation
)
2551 return sysfs_emit(buf
, "KVM: Mitigation: Split huge pages\n");
2553 return sysfs_emit(buf
, "KVM: Vulnerable\n");
2556 static ssize_t
l1tf_show_state(char *buf
)
2558 return sysfs_emit(buf
, "%s\n", L1TF_DEFAULT_MSG
);
2561 static ssize_t
itlb_multihit_show_state(char *buf
)
2563 return sysfs_emit(buf
, "Processor vulnerable\n");
2567 static ssize_t
mds_show_state(char *buf
)
2569 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2570 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2571 mds_strings
[mds_mitigation
]);
2574 if (boot_cpu_has(X86_BUG_MSBDS_ONLY
)) {
2575 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2576 (mds_mitigation
== MDS_MITIGATION_OFF
? "vulnerable" :
2577 sched_smt_active() ? "mitigated" : "disabled"));
2580 return sysfs_emit(buf
, "%s; SMT %s\n", mds_strings
[mds_mitigation
],
2581 sched_smt_active() ? "vulnerable" : "disabled");
2584 static ssize_t
tsx_async_abort_show_state(char *buf
)
2586 if ((taa_mitigation
== TAA_MITIGATION_TSX_DISABLED
) ||
2587 (taa_mitigation
== TAA_MITIGATION_OFF
))
2588 return sysfs_emit(buf
, "%s\n", taa_strings
[taa_mitigation
]);
2590 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2591 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2592 taa_strings
[taa_mitigation
]);
2595 return sysfs_emit(buf
, "%s; SMT %s\n", taa_strings
[taa_mitigation
],
2596 sched_smt_active() ? "vulnerable" : "disabled");
2599 static ssize_t
mmio_stale_data_show_state(char *buf
)
2601 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
2602 return sysfs_emit(buf
, "Unknown: No mitigations\n");
2604 if (mmio_mitigation
== MMIO_MITIGATION_OFF
)
2605 return sysfs_emit(buf
, "%s\n", mmio_strings
[mmio_mitigation
]);
2607 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2608 return sysfs_emit(buf
, "%s; SMT Host state unknown\n",
2609 mmio_strings
[mmio_mitigation
]);
2612 return sysfs_emit(buf
, "%s; SMT %s\n", mmio_strings
[mmio_mitigation
],
2613 sched_smt_active() ? "vulnerable" : "disabled");
2616 static char *stibp_state(void)
2618 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled
) &&
2619 !boot_cpu_has(X86_FEATURE_AUTOIBRS
))
2622 switch (spectre_v2_user_stibp
) {
2623 case SPECTRE_V2_USER_NONE
:
2624 return ", STIBP: disabled";
2625 case SPECTRE_V2_USER_STRICT
:
2626 return ", STIBP: forced";
2627 case SPECTRE_V2_USER_STRICT_PREFERRED
:
2628 return ", STIBP: always-on";
2629 case SPECTRE_V2_USER_PRCTL
:
2630 case SPECTRE_V2_USER_SECCOMP
:
2631 if (static_key_enabled(&switch_to_cond_stibp
))
2632 return ", STIBP: conditional";
2637 static char *ibpb_state(void)
2639 if (boot_cpu_has(X86_FEATURE_IBPB
)) {
2640 if (static_key_enabled(&switch_mm_always_ibpb
))
2641 return ", IBPB: always-on";
2642 if (static_key_enabled(&switch_mm_cond_ibpb
))
2643 return ", IBPB: conditional";
2644 return ", IBPB: disabled";
2649 static char *pbrsb_eibrs_state(void)
2651 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB
)) {
2652 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE
) ||
2653 boot_cpu_has(X86_FEATURE_RSB_VMEXIT
))
2654 return ", PBRSB-eIBRS: SW sequence";
2656 return ", PBRSB-eIBRS: Vulnerable";
2658 return ", PBRSB-eIBRS: Not affected";
2662 static ssize_t
spectre_v2_show_state(char *buf
)
2664 if (spectre_v2_enabled
== SPECTRE_V2_LFENCE
)
2665 return sysfs_emit(buf
, "Vulnerable: LFENCE\n");
2667 if (spectre_v2_enabled
== SPECTRE_V2_EIBRS
&& unprivileged_ebpf_enabled())
2668 return sysfs_emit(buf
, "Vulnerable: eIBRS with unprivileged eBPF\n");
2670 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2671 spectre_v2_enabled
== SPECTRE_V2_EIBRS_LFENCE
)
2672 return sysfs_emit(buf
, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2674 return sysfs_emit(buf
, "%s%s%s%s%s%s%s\n",
2675 spectre_v2_strings
[spectre_v2_enabled
],
2677 boot_cpu_has(X86_FEATURE_USE_IBRS_FW
) ? ", IBRS_FW" : "",
2679 boot_cpu_has(X86_FEATURE_RSB_CTXSW
) ? ", RSB filling" : "",
2680 pbrsb_eibrs_state(),
2681 spectre_v2_module_string());
2684 static ssize_t
srbds_show_state(char *buf
)
2686 return sysfs_emit(buf
, "%s\n", srbds_strings
[srbds_mitigation
]);
2689 static ssize_t
retbleed_show_state(char *buf
)
2691 if (retbleed_mitigation
== RETBLEED_MITIGATION_UNRET
||
2692 retbleed_mitigation
== RETBLEED_MITIGATION_IBPB
) {
2693 if (boot_cpu_data
.x86_vendor
!= X86_VENDOR_AMD
&&
2694 boot_cpu_data
.x86_vendor
!= X86_VENDOR_HYGON
)
2695 return sysfs_emit(buf
, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2697 return sysfs_emit(buf
, "%s; SMT %s\n", retbleed_strings
[retbleed_mitigation
],
2698 !sched_smt_active() ? "disabled" :
2699 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT
||
2700 spectre_v2_user_stibp
== SPECTRE_V2_USER_STRICT_PREFERRED
?
2701 "enabled with STIBP protection" : "vulnerable");
2704 return sysfs_emit(buf
, "%s\n", retbleed_strings
[retbleed_mitigation
]);
2707 static ssize_t
srso_show_state(char *buf
)
2709 if (boot_cpu_has(X86_FEATURE_SRSO_NO
))
2710 return sysfs_emit(buf
, "Mitigation: SMT disabled\n");
2712 return sysfs_emit(buf
, "%s\n", srso_strings
[srso_mitigation
]);
2715 static ssize_t
gds_show_state(char *buf
)
2717 return sysfs_emit(buf
, "%s\n", gds_strings
[gds_mitigation
]);
2720 static ssize_t
cpu_show_common(struct device
*dev
, struct device_attribute
*attr
,
2721 char *buf
, unsigned int bug
)
2723 if (!boot_cpu_has_bug(bug
))
2724 return sysfs_emit(buf
, "Not affected\n");
2727 case X86_BUG_CPU_MELTDOWN
:
2728 if (boot_cpu_has(X86_FEATURE_PTI
))
2729 return sysfs_emit(buf
, "Mitigation: PTI\n");
2731 if (hypervisor_is_type(X86_HYPER_XEN_PV
))
2732 return sysfs_emit(buf
, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2736 case X86_BUG_SPECTRE_V1
:
2737 return sysfs_emit(buf
, "%s\n", spectre_v1_strings
[spectre_v1_mitigation
]);
2739 case X86_BUG_SPECTRE_V2
:
2740 return spectre_v2_show_state(buf
);
2742 case X86_BUG_SPEC_STORE_BYPASS
:
2743 return sysfs_emit(buf
, "%s\n", ssb_strings
[ssb_mode
]);
2746 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV
))
2747 return l1tf_show_state(buf
);
2751 return mds_show_state(buf
);
2754 return tsx_async_abort_show_state(buf
);
2756 case X86_BUG_ITLB_MULTIHIT
:
2757 return itlb_multihit_show_state(buf
);
2760 return srbds_show_state(buf
);
2762 case X86_BUG_MMIO_STALE_DATA
:
2763 case X86_BUG_MMIO_UNKNOWN
:
2764 return mmio_stale_data_show_state(buf
);
2766 case X86_BUG_RETBLEED
:
2767 return retbleed_show_state(buf
);
2770 return srso_show_state(buf
);
2773 return gds_show_state(buf
);
2779 return sysfs_emit(buf
, "Vulnerable\n");
2782 ssize_t
cpu_show_meltdown(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2784 return cpu_show_common(dev
, attr
, buf
, X86_BUG_CPU_MELTDOWN
);
2787 ssize_t
cpu_show_spectre_v1(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2789 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V1
);
2792 ssize_t
cpu_show_spectre_v2(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2794 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPECTRE_V2
);
2797 ssize_t
cpu_show_spec_store_bypass(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2799 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SPEC_STORE_BYPASS
);
2802 ssize_t
cpu_show_l1tf(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2804 return cpu_show_common(dev
, attr
, buf
, X86_BUG_L1TF
);
2807 ssize_t
cpu_show_mds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2809 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MDS
);
2812 ssize_t
cpu_show_tsx_async_abort(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2814 return cpu_show_common(dev
, attr
, buf
, X86_BUG_TAA
);
2817 ssize_t
cpu_show_itlb_multihit(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2819 return cpu_show_common(dev
, attr
, buf
, X86_BUG_ITLB_MULTIHIT
);
2822 ssize_t
cpu_show_srbds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2824 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRBDS
);
2827 ssize_t
cpu_show_mmio_stale_data(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2829 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN
))
2830 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_UNKNOWN
);
2832 return cpu_show_common(dev
, attr
, buf
, X86_BUG_MMIO_STALE_DATA
);
2835 ssize_t
cpu_show_retbleed(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2837 return cpu_show_common(dev
, attr
, buf
, X86_BUG_RETBLEED
);
2840 ssize_t
cpu_show_spec_rstack_overflow(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2842 return cpu_show_common(dev
, attr
, buf
, X86_BUG_SRSO
);
2845 ssize_t
cpu_show_gds(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2847 return cpu_show_common(dev
, attr
, buf
, X86_BUG_GDS
);
2851 void __warn_thunk(void)
2853 WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");