--- /dev/null
+From fb6149207288551c9e3770b30b4a1a7c6c68ea87 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Wed, 11 Sep 2024 10:53:08 +0200
+Subject: x86/bugs: Add a Transient Scheduler Attacks mitigation
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream.
+
+Add the required features detection glue to bugs.c et all in order to
+support the TSA mitigation.
+
+Co-developed-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 1
+ Documentation/admin-guide/kernel-parameters.txt | 13 ++
+ arch/x86/Kconfig | 9 +
+ arch/x86/include/asm/cpu.h | 13 ++
+ arch/x86/include/asm/cpufeatures.h | 6 +
+ arch/x86/include/asm/mwait.h | 2
+ arch/x86/include/asm/nospec-branch.h | 12 +-
+ arch/x86/kernel/cpu/amd.c | 58 ++++++++++
+ arch/x86/kernel/cpu/bugs.c | 121 +++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c | 14 ++
+ arch/x86/kernel/cpu/scattered.c | 2
+ arch/x86/kvm/svm/vmenter.S | 6 +
+ drivers/base/cpu.c | 2
+ include/linux/cpu.h | 1
+ 14 files changed, 255 insertions(+), 5 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -524,6 +524,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/srbds
++ /sys/devices/system/cpu/vulnerabilities/tsa
+ /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5990,6 +5990,19 @@
+ If not specified, "default" is used. In this case,
+ the RNG's choice is left to each individual trust source.
+
++ tsa= [X86] Control mitigation for Transient Scheduler
++ Attacks on AMD CPUs. Search the following in your
++ favourite search engine for more details:
++
++ "Technical guidance for mitigating transient scheduler
++ attacks".
++
++ off - disable the mitigation
++ on - enable the mitigation (default)
++ user - mitigate only user/kernel transitions
++ vm - mitigate only guest/host transitions
++
++
+ tsc= Disable clocksource stability checks for TSC.
+ Format: <string>
+ [x86] reliable: mark tsc clocksource as reliable, this
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2528,6 +2528,15 @@ config MITIGATION_ITS
+ disabled, mitigation cannot be enabled via cmdline.
+ See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
+
++config MITIGATION_TSA
++ bool "Mitigate Transient Scheduler Attacks"
++ depends on CPU_SUP_AMD
++ default y
++ help
++ Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
++ security vulnerability on AMD CPUs which can lead to forwarding of
++ invalid info to subsequent instructions and thus can affect their
++ timing and thereby cause a leakage.
+ endif
+
+ config ARCH_HAS_ADD_PAGES
+--- a/arch/x86/include/asm/cpu.h
++++ b/arch/x86/include/asm/cpu.h
+@@ -72,4 +72,17 @@ void init_ia32_feat_ctl(struct cpuinfo_x
+ #else
+ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+ #endif
++
++union zen_patch_rev {
++ struct {
++ __u32 rev : 8,
++ stepping : 4,
++ model : 4,
++ __reserved : 4,
++ ext_model : 4,
++ ext_fam : 8;
++ };
++ __u32 ucode_rev;
++};
++
+ #endif /* _ASM_X86_CPU_H */
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -419,6 +419,7 @@
+ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+
+ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
++#define X86_FEATURE_VERW_CLEAR (20*32+ 10) /* "" The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
+@@ -435,6 +436,10 @@
+ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
+ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
+
++#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
++#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
++#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
++
+ /*
+ * BUG word(s)
+ */
+@@ -486,4 +491,5 @@
+ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
++#define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -79,7 +79,7 @@ static inline void __mwait(unsigned long
+ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+ unsigned long ecx)
+ {
+- /* No MDS buffer clear as this is AMD/HYGON only */
++ /* No need for TSA buffer clearing on AMD */
+
+ /* "mwaitx %eax, %ebx, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xfb;"
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -208,8 +208,8 @@
+ * CFLAGS.ZF.
+ * Note: Only the memory operand variant of VERW clears the CPU buffers.
+ */
+-.macro CLEAR_CPU_BUFFERS
+- ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
++.macro __CLEAR_CPU_BUFFERS feature
++ ALTERNATIVE "jmp .Lskip_verw_\@", "", \feature
+ #ifdef CONFIG_X86_64
+ verw x86_verw_sel(%rip)
+ #else
+@@ -223,6 +223,12 @@
+ .Lskip_verw_\@:
+ .endm
+
++#define CLEAR_CPU_BUFFERS \
++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
++
++#define VM_CLEAR_CPU_BUFFERS \
++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
++
+ #ifdef CONFIG_X86_64
+ .macro CLEAR_BRANCH_HISTORY
+ ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
+@@ -464,7 +470,7 @@ static __always_inline void x86_clear_cp
+
+ /**
+ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
+- * vulnerability
++ * and TSA vulnerabilities.
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -582,6 +582,61 @@ static void early_init_amd_mc(struct cpu
+ #endif
+ }
+
++static bool amd_check_tsa_microcode(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++ union zen_patch_rev p;
++ u32 min_rev = 0;
++
++ p.ext_fam = c->x86 - 0xf;
++ p.model = c->x86_model;
++ p.stepping = c->x86_stepping;
++
++ if (c->x86 == 0x19) {
++ switch (p.ucode_rev >> 8) {
++ case 0xa0011: min_rev = 0x0a0011d7; break;
++ case 0xa0012: min_rev = 0x0a00123b; break;
++ case 0xa0082: min_rev = 0x0a00820d; break;
++ case 0xa1011: min_rev = 0x0a10114c; break;
++ case 0xa1012: min_rev = 0x0a10124c; break;
++ case 0xa1081: min_rev = 0x0a108109; break;
++ case 0xa2010: min_rev = 0x0a20102e; break;
++ case 0xa2012: min_rev = 0x0a201211; break;
++ case 0xa4041: min_rev = 0x0a404108; break;
++ case 0xa5000: min_rev = 0x0a500012; break;
++ case 0xa6012: min_rev = 0x0a60120a; break;
++ case 0xa7041: min_rev = 0x0a704108; break;
++ case 0xa7052: min_rev = 0x0a705208; break;
++ case 0xa7080: min_rev = 0x0a708008; break;
++ case 0xa70c0: min_rev = 0x0a70c008; break;
++ case 0xaa002: min_rev = 0x0aa00216; break;
++ default:
++ pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
++ __func__, p.ucode_rev, c->microcode);
++ return false;
++ }
++ }
++
++ if (!min_rev)
++ return false;
++
++ return c->microcode >= min_rev;
++}
++
++static void tsa_init(struct cpuinfo_x86 *c)
++{
++ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++ return;
++
++ if (c->x86 == 0x19) {
++ if (amd_check_tsa_microcode())
++ setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
++ } else {
++ setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
++ setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
++ }
++}
++
+ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ {
+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+@@ -687,6 +742,9 @@ static void early_detect_mem_encrypt(str
+ if (!(msr & MSR_K7_HWCR_SMMLOCK))
+ goto clear_sev;
+
++
++ tsa_init(c);
++
+ return;
+
+ clear_all:
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -49,6 +49,7 @@ static void __init l1d_flush_select_miti
+ static void __init gds_select_mitigation(void);
+ static void __init srso_select_mitigation(void);
+ static void __init its_select_mitigation(void);
++static void __init tsa_select_mitigation(void);
+
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -184,6 +185,7 @@ void __init cpu_select_mitigations(void)
+ srso_select_mitigation();
+ gds_select_mitigation();
+ its_select_mitigation();
++ tsa_select_mitigation();
+ }
+
+ /*
+@@ -2039,6 +2041,94 @@ static void update_mds_branch_idle(void)
+ #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+ #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
+
++#undef pr_fmt
++#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
++
++enum tsa_mitigations {
++ TSA_MITIGATION_NONE,
++ TSA_MITIGATION_UCODE_NEEDED,
++ TSA_MITIGATION_USER_KERNEL,
++ TSA_MITIGATION_VM,
++ TSA_MITIGATION_FULL,
++};
++
++static const char * const tsa_strings[] = {
++ [TSA_MITIGATION_NONE] = "Vulnerable",
++ [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
++ [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
++ [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
++ [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
++};
++
++static enum tsa_mitigations tsa_mitigation __ro_after_init =
++ IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
++
++static int __init tsa_parse_cmdline(char *str)
++{
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ tsa_mitigation = TSA_MITIGATION_NONE;
++ else if (!strcmp(str, "on"))
++ tsa_mitigation = TSA_MITIGATION_FULL;
++ else if (!strcmp(str, "user"))
++ tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
++ else if (!strcmp(str, "vm"))
++ tsa_mitigation = TSA_MITIGATION_VM;
++ else
++ pr_err("Ignoring unknown tsa=%s option.\n", str);
++
++ return 0;
++}
++early_param("tsa", tsa_parse_cmdline);
++
++static void __init tsa_select_mitigation(void)
++{
++ if (tsa_mitigation == TSA_MITIGATION_NONE)
++ return;
++
++ if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
++ tsa_mitigation = TSA_MITIGATION_NONE;
++ return;
++ }
++
++ if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
++ tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
++
++ switch (tsa_mitigation) {
++ case TSA_MITIGATION_USER_KERNEL:
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++ break;
++
++ case TSA_MITIGATION_VM:
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++ break;
++
++ case TSA_MITIGATION_UCODE_NEEDED:
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
++ goto out;
++
++ pr_notice("Forcing mitigation on in a VM\n");
++
++ /*
++ * On the off-chance that microcode has been updated
++ * on the host, enable the mitigation in the guest just
++ * in case.
++ */
++ fallthrough;
++ case TSA_MITIGATION_FULL:
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++ break;
++ default:
++ break;
++ }
++
++out:
++ pr_info("%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ void cpu_bugs_smt_update(void)
+ {
+ mutex_lock(&spec_ctrl_mutex);
+@@ -2092,6 +2182,24 @@ void cpu_bugs_smt_update(void)
+ break;
+ }
+
++ switch (tsa_mitigation) {
++ case TSA_MITIGATION_USER_KERNEL:
++ case TSA_MITIGATION_VM:
++ case TSA_MITIGATION_FULL:
++ case TSA_MITIGATION_UCODE_NEEDED:
++ /*
++ * TSA-SQ can potentially lead to info leakage between
++ * SMT threads.
++ */
++ if (sched_smt_active())
++ static_branch_enable(&cpu_buf_idle_clear);
++ else
++ static_branch_disable(&cpu_buf_idle_clear);
++ break;
++ case TSA_MITIGATION_NONE:
++ break;
++ }
++
+ mutex_unlock(&spec_ctrl_mutex);
+ }
+
+@@ -3026,6 +3134,11 @@ static ssize_t srso_show_state(char *buf
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ }
+
++static ssize_t tsa_show_state(char *buf)
++{
++ return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -3087,6 +3200,9 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_ITS:
+ return its_show_state(buf);
+
++ case X86_BUG_TSA:
++ return tsa_show_state(buf);
++
+ default:
+ break;
+ }
+@@ -3171,4 +3287,9 @@ ssize_t cpu_show_indirect_target_selecti
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+ }
++
++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1146,6 +1146,8 @@ static const __initconst struct x86_cpu_
+ #define ITS BIT(8)
+ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
+ #define ITS_NATIVE_ONLY BIT(9)
++/* CPU is affected by Transient Scheduler Attacks */
++#define TSA BIT(10)
+
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+ VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
+@@ -1193,7 +1195,7 @@ static const struct x86_cpu_id cpu_vuln_
+ VULNBL_AMD(0x16, RETBLEED),
+ VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+- VULNBL_AMD(0x19, SRSO),
++ VULNBL_AMD(0x19, SRSO | TSA),
+ {}
+ };
+
+@@ -1398,6 +1400,16 @@ static void __init cpu_set_bug_bits(stru
+ setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
+ }
+
++ if (c->x86_vendor == X86_VENDOR_AMD) {
++ if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
++ !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
++ if (cpu_matches(cpu_vuln_blacklist, TSA) ||
++ /* Enable bug on Zen guests to allow for live migration. */
++ (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
++ setup_force_cpu_bug(X86_BUG_TSA);
++ }
++ }
++
+ if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+ return;
+
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -44,6 +44,8 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
++ { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
++ { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -77,6 +77,9 @@ SYM_FUNC_START(__svm_vcpu_run)
+ /* "POP" @vmcb to RAX. */
+ pop %_ASM_AX
+
++ /* Clobbers EFLAGS.ZF */
++ VM_CLEAR_CPU_BUFFERS
++
+ /* Enter guest mode */
+ sti
+
+@@ -190,6 +193,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ /* Move @vmcb to RAX. */
+ mov %_ASM_ARG1, %_ASM_AX
+
++ /* Clobbers EFLAGS.ZF */
++ VM_CLEAR_CPU_BUFFERS
++
+ /* Enter guest mode */
+ sti
+
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -616,6 +616,7 @@ static DEVICE_ATTR(gather_data_sampling,
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+@@ -633,6 +634,7 @@ static struct attribute *cpu_root_vulner
+ &dev_attr_spec_rstack_overflow.attr,
+ &dev_attr_reg_file_data_sampling.attr,
+ &dev_attr_indirect_target_selection.attr,
++ &dev_attr_tsa.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -78,6 +78,7 @@ extern ssize_t cpu_show_reg_file_data_sa
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
--- /dev/null
+From a566c5df43db7cbee4f594b0d4db78768bdbe3c3 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Wed, 11 Sep 2024 05:13:46 +0200
+Subject: x86/bugs: Rename MDS machinery to something more generic
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit f9af88a3d384c8b55beb5dc5483e5da0135fadbd upstream.
+
+It will be used by other x86 mitigations.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst | 4 -
+ arch/x86/entry/entry.S | 8 +-
+ arch/x86/include/asm/irqflags.h | 4 -
+ arch/x86/include/asm/mwait.h | 5 +
+ arch/x86/include/asm/nospec-branch.h | 29 +++++-----
+ arch/x86/kernel/cpu/bugs.c | 12 ++--
+ arch/x86/kvm/vmx/vmx.c | 2
+ 7 files changed, 32 insertions(+), 32 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+@@ -157,9 +157,7 @@ This is achieved by using the otherwise
+ combination with a microcode update. The microcode clears the affected CPU
+ buffers when the VERW instruction is executed.
+
+-Kernel reuses the MDS function to invoke the buffer clearing:
+-
+- mds_clear_cpu_buffers()
++Kernel does the buffer clearing with x86_clear_cpu_buffers().
+
+ On MDS affected CPUs, the kernel already invokes CPU buffer clear on
+ kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
+
+ /*
+ * Define the VERW operand that is disguised as entry code so that
+- * it can be referenced with KPTI enabled. This ensure VERW can be
++ * it can be referenced with KPTI enabled. This ensures VERW can be
+ * used late in exit-to-user path after page tables are switched.
+ */
+ .pushsection .entry.text, "ax"
+
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_START_NOALIGN(mds_verw_sel)
++SYM_CODE_START_NOALIGN(x86_verw_sel)
+ UNWIND_HINT_EMPTY
+ ANNOTATE_NOENDBR
+ .word __KERNEL_DS
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_END(mds_verw_sel);
++SYM_CODE_END(x86_verw_sel);
+ /* For KVM */
+-EXPORT_SYMBOL_GPL(mds_verw_sel);
++EXPORT_SYMBOL_GPL(x86_verw_sel);
+
+ .popsection
+
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -47,13 +47,13 @@ static __always_inline void native_irq_e
+
+ static inline __cpuidle void native_safe_halt(void)
+ {
+- mds_idle_clear_cpu_buffers();
++ x86_idle_clear_cpu_buffers();
+ asm volatile("sti; hlt": : :"memory");
+ }
+
+ static inline __cpuidle void native_halt(void)
+ {
+- mds_idle_clear_cpu_buffers();
++ x86_idle_clear_cpu_buffers();
+ asm volatile("hlt": : :"memory");
+ }
+
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -43,7 +43,7 @@ static inline void __monitorx(const void
+
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+- mds_idle_clear_cpu_buffers();
++ x86_idle_clear_cpu_buffers();
+
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+@@ -88,7 +88,8 @@ static inline void __mwaitx(unsigned lon
+
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+- mds_idle_clear_cpu_buffers();
++ x86_idle_clear_cpu_buffers();
++
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -202,23 +202,23 @@
+ .endm
+
+ /*
+- * Macro to execute VERW instruction that mitigate transient data sampling
+- * attacks such as MDS. On affected systems a microcode update overloaded VERW
+- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+- *
++ * Macro to execute VERW insns that mitigate transient data sampling
++ * attacks such as MDS or TSA. On affected systems a microcode update
++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
++ * CFLAGS.ZF.
+ * Note: Only the memory operand variant of VERW clears the CPU buffers.
+ */
+ .macro CLEAR_CPU_BUFFERS
+ ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
+ #ifdef CONFIG_X86_64
+- verw mds_verw_sel(%rip)
++ verw x86_verw_sel(%rip)
+ #else
+ /*
+ * In 32bit mode, the memory operand must be a %cs reference. The data
+ * segments may not be usable (vm86 mode), and the stack segment may not
+ * be flat (ESPFIX32).
+ */
+- verw %cs:mds_verw_sel
++ verw %cs:x86_verw_sel
+ #endif
+ .Lskip_verw_\@:
+ .endm
+@@ -429,24 +429,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+-DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
+
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+
+-extern u16 mds_verw_sel;
++extern u16 x86_verw_sel;
+
+ #include <asm/segment.h>
+
+ /**
+- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+-static __always_inline void mds_clear_cpu_buffers(void)
++static __always_inline void x86_clear_cpu_buffers(void)
+ {
+ static const u16 ds = __KERNEL_DS;
+
+@@ -463,14 +463,15 @@ static __always_inline void mds_clear_cp
+ }
+
+ /**
+- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
++ * vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+-static inline void mds_idle_clear_cpu_buffers(void)
++static __always_inline void x86_idle_clear_cpu_buffers(void)
+ {
+- if (static_branch_likely(&mds_idle_clear))
+- mds_clear_cpu_buffers();
++ if (static_branch_likely(&cpu_buf_idle_clear))
++ x86_clear_cpu_buffers();
+ }
+
+ #endif /* __ASSEMBLY__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -121,9 +121,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
+-/* Control MDS CPU buffer clear before idling (halt, mwait) */
+-DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+-EXPORT_SYMBOL_GPL(mds_idle_clear);
++/* Control CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
+
+ /*
+ * Controls whether l1d flush based mitigations are enabled,
+@@ -451,7 +451,7 @@ static void __init mmio_select_mitigatio
+ * is required irrespective of SMT state.
+ */
+ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+- static_branch_enable(&mds_idle_clear);
++ static_branch_enable(&cpu_buf_idle_clear);
+
+ /*
+ * Check if the system has the right microcode.
+@@ -2028,10 +2028,10 @@ static void update_mds_branch_idle(void)
+ return;
+
+ if (sched_smt_active()) {
+- static_branch_enable(&mds_idle_clear);
++ static_branch_enable(&cpu_buf_idle_clear);
+ } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+- static_branch_disable(&mds_idle_clear);
++ static_branch_disable(&cpu_buf_idle_clear);
+ }
+ }
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6771,7 +6771,7 @@ static noinstr void vmx_vcpu_enter_exit(
+ vmx_l1d_flush(vcpu);
+ else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+ kvm_arch_has_assigned_device(vcpu->kvm))
+- mds_clear_cpu_buffers();
++ x86_clear_cpu_buffers();
+
+ vmx_disable_fb_clear(vmx);
+