]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/bugs: Add a Transient Scheduler Attacks mitigation
authorBorislav Petkov (AMD) <bp@alien8.de>
Wed, 11 Sep 2024 08:53:08 +0000 (10:53 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 10 Jul 2025 13:59:54 +0000 (15:59 +0200)
commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream.

Add the required features detection glue to bugs.c et all in order to
support the TSA mitigation.

Co-developed-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
14 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
arch/x86/Kconfig
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kvm/svm/vmenter.S
drivers/base/cpu.c
include/linux/cpu.h

index 1468609052c7a31c58812d54c0a7df149c20e120..97e695efa95995afe8bd1525d094c740b080c545 100644 (file)
@@ -526,6 +526,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/spectre_v1
                /sys/devices/system/cpu/vulnerabilities/spectre_v2
                /sys/devices/system/cpu/vulnerabilities/srbds
+               /sys/devices/system/cpu/vulnerabilities/tsa
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
index 6938c8cd7a6f6298b94e69f6b7de9566e5a9cfb3..eaeabff9beff6afd045d461b2d854481cc8081aa 100644 (file)
                        If not specified, "default" is used. In this case,
                        the RNG's choice is left to each individual trust source.
 
+       tsa=            [X86] Control mitigation for Transient Scheduler
+                       Attacks on AMD CPUs. Search the following in your
+                       favourite search engine for more details:
+
+                       "Technical guidance for mitigating transient scheduler
+                       attacks".
+
+                       off             - disable the mitigation
+                       on              - enable the mitigation (default)
+                       user            - mitigate only user/kernel transitions
+                       vm              - mitigate only guest/host transitions
+
+
        tsc=            Disable clocksource stability checks for TSC.
                        Format: <string>
                        [x86] reliable: mark tsc clocksource as reliable, this
index 8e66bb4433512428a80160251608563fed6faea9..1da950b1d41ac9203c8dba0dfd5cbbdab10ccab3 100644 (file)
@@ -2586,6 +2586,15 @@ config MITIGATION_ITS
          disabled, mitigation cannot be enabled via cmdline.
          See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
 
+config MITIGATION_TSA
+       bool "Mitigate Transient Scheduler Attacks"
+       depends on CPU_SUP_AMD
+       default y
+       help
+         Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
+         security vulnerability on AMD CPUs which can lead to forwarding of
+         invalid info to subsequent instructions and thus can affect their
+         timing and thereby cause a leakage.
 endif
 
 config ARCH_HAS_ADD_PAGES
index 37639a2d9c34fce02a08443772ac8386768965ac..c976bbf909e0c9f0b0e8702ebeaadcb101e761ad 100644 (file)
@@ -98,4 +98,16 @@ extern u64 x86_read_arch_cap_msr(void);
 
 extern struct cpumask cpus_stop_mask;
 
+union zen_patch_rev {
+       struct {
+               __u32 rev        : 8,
+                     stepping   : 4,
+                     model      : 4,
+                     __reserved : 4,
+                     ext_model  : 4,
+                     ext_fam    : 8;
+       };
+       __u32 ucode_rev;
+};
+
 #endif /* _ASM_X86_CPU_H */
index 28edef597282e5b3a1b5c97bc35873f28b52b802..1c71f947b426a66a92b1097ea3e4d319fd7b22a8 100644 (file)
 #define X86_FEATURE_SME_COHERENT       (19*32+10) /* "" AMD hardware-enforced cache coherency */
 
 #define X86_FEATURE_AUTOIBRS           (20*32+ 8) /* "" Automatic IBRS */
+#define X86_FEATURE_VERW_CLEAR         (20*32+ 10) /* "" The memory form of VERW mitigates TSA */
 #define X86_FEATURE_SBPB               (20*32+27) /* "" Selective Branch Prediction Barrier */
 #define X86_FEATURE_IBPB_BRTYPE                (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
 #define X86_FEATURE_SRSO_NO            (20*32+29) /* "" CPU is not affected by SRSO */
 #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
 #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
 
+#define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
+#define X86_FEATURE_TSA_L1_NO          (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
+#define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
+
 /*
  * BUG word(s)
  */
 #define X86_BUG_IBPB_NO_RET            X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
 #define X86_BUG_ITS                    X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
 #define X86_BUG_ITS_NATIVE_ONLY                X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
+#define X86_BUG_TSA                    X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
 #endif /* _ASM_X86_CPUFEATURES_H */
index 209dce2c79b74891a9b53a16c12441d874028145..2c6020729dd145d9a25e1a83ebedba45470f8400 100644 (file)
@@ -80,7 +80,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
 static inline void __mwaitx(unsigned long eax, unsigned long ebx,
                            unsigned long ecx)
 {
-       /* No MDS buffer clear as this is AMD/HYGON only */
+       /* No need for TSA buffer clearing on AMD */
 
        /* "mwaitx %eax, %ebx, %ecx;" */
        asm volatile(".byte 0x0f, 0x01, 0xfb;"
index 3713b6dab7f447a701bd0725524a9ad909672b71..c77a65a3e5f14a278986db394c8fadeb29220197 100644 (file)
  * CFLAGS.ZF.
  * Note: Only the memory operand variant of VERW clears the CPU buffers.
  */
-.macro CLEAR_CPU_BUFFERS
-       ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
+.macro __CLEAR_CPU_BUFFERS feature
+       ALTERNATIVE "jmp .Lskip_verw_\@", "", \feature
 #ifdef CONFIG_X86_64
        verw x86_verw_sel(%rip)
 #else
 .Lskip_verw_\@:
 .endm
 
+#define CLEAR_CPU_BUFFERS \
+       __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
+
+#define VM_CLEAR_CPU_BUFFERS \
+       __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
+
 #ifdef CONFIG_X86_64
 .macro CLEAR_BRANCH_HISTORY
        ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@@ -462,7 +468,7 @@ static __always_inline void x86_clear_cpu_buffers(void)
 
 /**
  * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
- * vulnerability
+ * and TSA vulnerabilities.
  *
  * Clear CPU buffers if the corresponding static key is enabled
  */
index 9ac93b4ba67b4957757c0aff1c253104eee096b4..3e3679709e903222c79c4a588b5d49b8cb1ff46e 100644 (file)
@@ -553,6 +553,61 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
 #endif
 }
 
+static bool amd_check_tsa_microcode(void)
+{
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+       union zen_patch_rev p;
+       u32 min_rev = 0;
+
+       p.ext_fam       = c->x86 - 0xf;
+       p.model         = c->x86_model;
+       p.stepping      = c->x86_stepping;
+
+       if (c->x86 == 0x19) {
+               switch (p.ucode_rev >> 8) {
+               case 0xa0011:   min_rev = 0x0a0011d7; break;
+               case 0xa0012:   min_rev = 0x0a00123b; break;
+               case 0xa0082:   min_rev = 0x0a00820d; break;
+               case 0xa1011:   min_rev = 0x0a10114c; break;
+               case 0xa1012:   min_rev = 0x0a10124c; break;
+               case 0xa1081:   min_rev = 0x0a108109; break;
+               case 0xa2010:   min_rev = 0x0a20102e; break;
+               case 0xa2012:   min_rev = 0x0a201211; break;
+               case 0xa4041:   min_rev = 0x0a404108; break;
+               case 0xa5000:   min_rev = 0x0a500012; break;
+               case 0xa6012:   min_rev = 0x0a60120a; break;
+               case 0xa7041:   min_rev = 0x0a704108; break;
+               case 0xa7052:   min_rev = 0x0a705208; break;
+               case 0xa7080:   min_rev = 0x0a708008; break;
+               case 0xa70c0:   min_rev = 0x0a70c008; break;
+               case 0xaa002:   min_rev = 0x0aa00216; break;
+               default:
+                       pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
+                                __func__, p.ucode_rev, c->microcode);
+                       return false;
+               }
+       }
+
+       if (!min_rev)
+               return false;
+
+       return c->microcode >= min_rev;
+}
+
+static void tsa_init(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+               return;
+
+       if (c->x86 == 0x19) {
+               if (amd_check_tsa_microcode())
+                       setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
+       } else {
+               setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
+               setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
+       }
+}
+
 static void bsp_init_amd(struct cpuinfo_x86 *c)
 {
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -663,6 +718,9 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
                if (!(msr & MSR_K7_HWCR_SMMLOCK))
                        goto clear_sev;
 
+
+       tsa_init(c);
+
                return;
 
 clear_all:
index d0a5df576e902205389e3ec6d5248cfc340ff740..dba5262e15094523fbd014a595fc33c0bd62e7d9 100644 (file)
@@ -49,6 +49,7 @@ static void __init l1d_flush_select_mitigation(void);
 static void __init gds_select_mitigation(void);
 static void __init srso_select_mitigation(void);
 static void __init its_select_mitigation(void);
+static void __init tsa_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
 u64 x86_spec_ctrl_base;
@@ -184,6 +185,7 @@ void __init cpu_select_mitigations(void)
        srso_select_mitigation();
        gds_select_mitigation();
        its_select_mitigation();
+       tsa_select_mitigation();
 }
 
 /*
@@ -2039,6 +2041,94 @@ static void update_mds_branch_idle(void)
 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "Transient Scheduler Attacks: " fmt
+
+enum tsa_mitigations {
+       TSA_MITIGATION_NONE,
+       TSA_MITIGATION_UCODE_NEEDED,
+       TSA_MITIGATION_USER_KERNEL,
+       TSA_MITIGATION_VM,
+       TSA_MITIGATION_FULL,
+};
+
+static const char * const tsa_strings[] = {
+       [TSA_MITIGATION_NONE]           = "Vulnerable",
+       [TSA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
+       [TSA_MITIGATION_USER_KERNEL]    = "Mitigation: Clear CPU buffers: user/kernel boundary",
+       [TSA_MITIGATION_VM]             = "Mitigation: Clear CPU buffers: VM",
+       [TSA_MITIGATION_FULL]           = "Mitigation: Clear CPU buffers",
+};
+
+static enum tsa_mitigations tsa_mitigation __ro_after_init =
+       IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
+
+static int __init tsa_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off"))
+               tsa_mitigation = TSA_MITIGATION_NONE;
+       else if (!strcmp(str, "on"))
+               tsa_mitigation = TSA_MITIGATION_FULL;
+       else if (!strcmp(str, "user"))
+               tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
+       else if (!strcmp(str, "vm"))
+               tsa_mitigation = TSA_MITIGATION_VM;
+       else
+               pr_err("Ignoring unknown tsa=%s option.\n", str);
+
+       return 0;
+}
+early_param("tsa", tsa_parse_cmdline);
+
+static void __init tsa_select_mitigation(void)
+{
+       if (tsa_mitigation == TSA_MITIGATION_NONE)
+               return;
+
+       if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
+               tsa_mitigation = TSA_MITIGATION_NONE;
+               return;
+       }
+
+       if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
+               tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
+
+       switch (tsa_mitigation) {
+       case TSA_MITIGATION_USER_KERNEL:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+               break;
+
+       case TSA_MITIGATION_VM:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
+               break;
+
+       case TSA_MITIGATION_UCODE_NEEDED:
+               if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+                       goto out;
+
+               pr_notice("Forcing mitigation on in a VM\n");
+
+               /*
+                * On the off-chance that microcode has been updated
+                * on the host, enable the mitigation in the guest just
+                * in case.
+                */
+               fallthrough;
+       case TSA_MITIGATION_FULL:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
+               break;
+       default:
+               break;
+       }
+
+out:
+       pr_info("%s\n", tsa_strings[tsa_mitigation]);
+}
+
 void cpu_bugs_smt_update(void)
 {
        mutex_lock(&spec_ctrl_mutex);
@@ -2092,6 +2182,24 @@ void cpu_bugs_smt_update(void)
                break;
        }
 
+       switch (tsa_mitigation) {
+       case TSA_MITIGATION_USER_KERNEL:
+       case TSA_MITIGATION_VM:
+       case TSA_MITIGATION_FULL:
+       case TSA_MITIGATION_UCODE_NEEDED:
+               /*
+                * TSA-SQ can potentially lead to info leakage between
+                * SMT threads.
+                */
+               if (sched_smt_active())
+                       static_branch_enable(&cpu_buf_idle_clear);
+               else
+                       static_branch_disable(&cpu_buf_idle_clear);
+               break;
+       case TSA_MITIGATION_NONE:
+               break;
+       }
+
        mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -3026,6 +3134,11 @@ static ssize_t srso_show_state(char *buf)
                          boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
 }
 
+static ssize_t tsa_show_state(char *buf)
+{
+       return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -3087,6 +3200,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_ITS:
                return its_show_state(buf);
 
+       case X86_BUG_TSA:
+               return tsa_show_state(buf);
+
        default:
                break;
        }
@@ -3171,4 +3287,9 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
 }
+
+ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
+}
 #endif
index 722eac51beae6f35d8c83327fbac4b282b7f1357..9c849a4160cda7cbe89a79ed63664e58dd26ed20 100644 (file)
@@ -1256,6 +1256,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
 #define ITS            BIT(8)
 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
 #define ITS_NATIVE_ONLY        BIT(9)
+/* CPU is affected by Transient Scheduler Attacks */
+#define TSA            BIT(10)
 
 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
@@ -1303,7 +1305,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_AMD(0x16, RETBLEED),
        VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
        VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
-       VULNBL_AMD(0x19, SRSO),
+       VULNBL_AMD(0x19, SRSO | TSA),
        {}
 };
 
@@ -1508,6 +1510,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                        setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
        }
 
+       if (c->x86_vendor == X86_VENDOR_AMD) {
+               if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
+                   !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
+                       if (cpu_matches(cpu_vuln_blacklist, TSA) ||
+                           /* Enable bug on Zen guests to allow for live migration. */
+                           (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
+                               setup_force_cpu_bug(X86_BUG_TSA);
+               }
+       }
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
index 28c357cf7c75ee04660416c13bdad14c4ce0fbc3..b9e39c9eb274c133287f596aa1d5b171ccd08143 100644 (file)
@@ -45,6 +45,8 @@ static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
        { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
        { X86_FEATURE_MBA,              CPUID_EBX,  6, 0x80000008, 0 },
+       { X86_FEATURE_TSA_SQ_NO,        CPUID_ECX,  1, 0x80000021, 0 },
+       { X86_FEATURE_TSA_L1_NO,        CPUID_ECX,  2, 0x80000021, 0 },
        { X86_FEATURE_PERFMON_V2,       CPUID_EAX,  0, 0x80000022, 0 },
        { X86_FEATURE_AMD_LBR_V2,       CPUID_EAX,  1, 0x80000022, 0 },
        { X86_FEATURE_AMD_LBR_PMC_FREEZE,       CPUID_EAX,  2, 0x80000022, 0 },
index 5be9a63f09fff2a6ac9e23f61758377924aa572b..42824f9b06a25dbc43d63b49cdf1579fd36d989a 100644 (file)
@@ -166,6 +166,9 @@ SYM_FUNC_START(__svm_vcpu_run)
 #endif
        mov VCPU_RDI(%_ASM_DI), %_ASM_DI
 
+       /* Clobbers EFLAGS.ZF */
+       VM_CLEAR_CPU_BUFFERS
+
        /* Enter guest mode */
        sti
 
@@ -336,6 +339,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
        mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
 
+       /* Clobbers EFLAGS.ZF */
+       VM_CLEAR_CPU_BUFFERS
+
        /* Enter guest mode */
        sti
 
index 27aff2503765ba7e2f07937ac5d2119979c00378..d68c60f357640ce8273c973bf4022b72a7d8338e 100644 (file)
@@ -601,6 +601,11 @@ ssize_t __weak cpu_show_indirect_target_selection(struct device *dev,
        return sysfs_emit(buf, "Not affected\n");
 }
 
+ssize_t __weak cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "Not affected\n");
+}
+
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
 static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
@@ -616,6 +621,7 @@ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
 static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
 static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -633,6 +639,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_spec_rstack_overflow.attr,
        &dev_attr_reg_file_data_sampling.attr,
        &dev_attr_indirect_target_selection.attr,
+       &dev_attr_tsa.attr,
        NULL
 };
 
index 186e0e0f2e40c5196dbf845a7852c48cc51eb4aa..3d3ceccf822450a0bec69e25d6c1712d0c061525 100644 (file)
@@ -78,6 +78,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
                                               struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
                                                  struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,