From 957863fa975116190c9e97bfd880526df46e1c29 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 8 Jul 2025 18:03:59 +0200 Subject: [PATCH] 6.1-stable patches added patches: kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch x86-bugs-rename-mds-machinery-to-something-more-generic.patch x86-process-move-the-buffer-clearing-before-monitor.patch --- ...m-advertise-tsa-cpuid-bits-to-guests.patch | 89 +++ queue-6.1/series | 4 + ...ansient-scheduler-attacks-mitigation.patch | 518 ++++++++++++++++++ ...-machinery-to-something-more-generic.patch | 234 ++++++++ ...e-the-buffer-clearing-before-monitor.patch | 104 ++++ 5 files changed, 949 insertions(+) create mode 100644 queue-6.1/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch create mode 100644 queue-6.1/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch create mode 100644 queue-6.1/x86-bugs-rename-mds-machinery-to-something-more-generic.patch create mode 100644 queue-6.1/x86-process-move-the-buffer-clearing-before-monitor.patch diff --git a/queue-6.1/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch b/queue-6.1/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch new file mode 100644 index 0000000000..6affa5495c --- /dev/null +++ b/queue-6.1/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch @@ -0,0 +1,89 @@ +From 3c1c8ba250b67e41ce096b65f34cf2b9aea44ea7 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Wed, 11 Sep 2024 11:00:50 +0200 +Subject: KVM: SVM: Advertise TSA CPUID bits to guests + +From: "Borislav Petkov (AMD)" + +Commit 31272abd5974b38ba312e9cf2ec2f09f9dd7dcba upstream. + +Synthesize the TSA CPUID feature bits for guests. Set TSA_{SQ,L1}_NO on +unaffected machines. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kvm/cpuid.c | 9 ++++++++- + arch/x86/kvm/reverse_cpuid.h | 8 ++++++++ + 2 files changed, 16 insertions(+), 1 deletion(-) + +--- a/arch/x86/kvm/cpuid.c ++++ b/arch/x86/kvm/cpuid.c +@@ -758,6 +758,12 @@ void kvm_set_cpu_caps(void) + if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + ++ kvm_cpu_cap_mask(CPUID_8000_0021_EAX, F(VERW_CLEAR)); ++ ++ kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX, ++ F(TSA_SQ_NO) | F(TSA_L1_NO) ++ ); ++ + /* + * Hide RDTSCP and RDPID if either feature is reported as supported but + * probing MSR_TSC_AUX failed. This is purely a sanity check and +@@ -1243,7 +1249,7 @@ static inline int __do_cpuid_func(struct + entry->eax = entry->ebx = entry->ecx = entry->edx = 0; + break; + case 0x80000021: +- entry->ebx = entry->ecx = entry->edx = 0; ++ entry->ebx = entry->edx = 0; + /* + * Pass down these bits: + * EAX 0 NNDBP, Processor ignores nested data breakpoints +@@ -1259,6 +1265,7 @@ static inline int __do_cpuid_func(struct + entry->eax |= BIT(2); + if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) + entry->eax |= BIT(6); ++ cpuid_entry_override(entry, CPUID_8000_0021_ECX); + break; + /*Add support for Centaur's CPUID instruction*/ + case 0xC0000000: +--- a/arch/x86/kvm/reverse_cpuid.h ++++ b/arch/x86/kvm/reverse_cpuid.h +@@ -14,6 +14,7 @@ + enum kvm_only_cpuid_leafs { + CPUID_12_EAX = NCAPINTS, + CPUID_7_2_EDX, ++ CPUID_8000_0021_ECX, + NR_KVM_CPU_CAPS, + + NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, +@@ -45,6 +46,10 @@ enum kvm_only_cpuid_leafs { + #define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4) + #define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5) + ++/* CPUID level 0x80000021 (ECX) */ ++#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1) ++#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2) ++ + struct cpuid_reg { + u32 function; + u32 index; +@@ -71,6 +76,7 @@ static const struct cpuid_reg reverse_cp + [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, + [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, + [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, ++ [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX}, + }; + + /* +@@ -107,6 +113,8 @@ static __always_inline u32 __feature_tra + KVM_X86_TRANSLATE_FEATURE(SGX2); + KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); + KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); ++ KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO); ++ KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO); + default: + return x86_feature; + } diff --git a/queue-6.1/series b/queue-6.1/series index cf35a9dc15..46d6c3c296 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -75,3 +75,7 @@ logitech-c-270-even-more-broken.patch platform-x86-think-lmi-create-ksets-consecutively.patch platform-x86-think-lmi-fix-kobject-cleanup.patch usb-typec-displayport-fix-potential-deadlock.patch +x86-bugs-rename-mds-machinery-to-something-more-generic.patch +x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch +kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch +x86-process-move-the-buffer-clearing-before-monitor.patch diff --git a/queue-6.1/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch b/queue-6.1/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch new file mode 100644 index 0000000000..2045d6341b --- /dev/null +++ b/queue-6.1/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch @@ -0,0 +1,518 @@ +From 4e155de21ab99124e98272454461a5e94a3c542a Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Wed, 11 Sep 2024 10:53:08 +0200 +Subject: x86/bugs: Add a Transient Scheduler Attacks mitigation + +From: "Borislav Petkov (AMD)" + +Commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream. + +Add the required features detection glue to bugs.c et all in order to +support the TSA mitigation. + +Co-developed-by: Kim Phillips +Signed-off-by: Kim Phillips +Signed-off-by: Borislav Petkov (AMD) +Reviewed-by: Pawan Gupta +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/ABI/testing/sysfs-devices-system-cpu | 1 + Documentation/admin-guide/kernel-parameters.txt | 13 ++ + arch/x86/Kconfig | 9 + + arch/x86/include/asm/cpu.h | 12 ++ + arch/x86/include/asm/cpufeatures.h | 6 + + arch/x86/include/asm/mwait.h | 2 + arch/x86/include/asm/nospec-branch.h | 12 +- + arch/x86/kernel/cpu/amd.c | 58 ++++++++++ + arch/x86/kernel/cpu/bugs.c | 121 +++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 14 ++ + arch/x86/kernel/cpu/scattered.c | 2 + arch/x86/kvm/svm/vmenter.S | 6 + + drivers/base/cpu.c | 2 + include/linux/cpu.h | 1 + 14 files changed, 254 insertions(+), 5 deletions(-) + +--- a/Documentation/ABI/testing/sysfs-devices-system-cpu ++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu +@@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabi + /sys/devices/system/cpu/vulnerabilities/spectre_v1 + /sys/devices/system/cpu/vulnerabilities/spectre_v2 + /sys/devices/system/cpu/vulnerabilities/srbds ++ /sys/devices/system/cpu/vulnerabilities/tsa + /sys/devices/system/cpu/vulnerabilities/tsx_async_abort + Date: January 2018 + Contact: Linux kernel mailing list +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -6400,6 +6400,19 @@ + If not specified, "default" is used. In this case, + the RNG's choice is left to each individual trust source. + ++ tsa= [X86] Control mitigation for Transient Scheduler ++ Attacks on AMD CPUs. Search the following in your ++ favourite search engine for more details: ++ ++ "Technical guidance for mitigating transient scheduler ++ attacks". ++ ++ off - disable the mitigation ++ on - enable the mitigation (default) ++ user - mitigate only user/kernel transitions ++ vm - mitigate only guest/host transitions ++ ++ + tsc= Disable clocksource stability checks for TSC. + Format: + [x86] reliable: mark tsc clocksource as reliable, this +--- a/arch/x86/Kconfig ++++ b/arch/x86/Kconfig +@@ -2586,6 +2586,15 @@ config MITIGATION_ITS + disabled, mitigation cannot be enabled via cmdline. + See + ++config MITIGATION_TSA ++ bool "Mitigate Transient Scheduler Attacks" ++ depends on CPU_SUP_AMD ++ default y ++ help ++ Enable mitigation for Transient Scheduler Attacks. TSA is a hardware ++ security vulnerability on AMD CPUs which can lead to forwarding of ++ invalid info to subsequent instructions and thus can affect their ++ timing and thereby cause a leakage. + endif + + config ARCH_HAS_ADD_PAGES +--- a/arch/x86/include/asm/cpu.h ++++ b/arch/x86/include/asm/cpu.h +@@ -98,4 +98,16 @@ extern u64 x86_read_arch_cap_msr(void); + + extern struct cpumask cpus_stop_mask; + ++union zen_patch_rev { ++ struct { ++ __u32 rev : 8, ++ stepping : 4, ++ model : 4, ++ __reserved : 4, ++ ext_model : 4, ++ ext_fam : 8; ++ }; ++ __u32 ucode_rev; ++}; ++ + #endif /* _ASM_X86_CPU_H */ +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -430,6 +430,7 @@ + #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ + + #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ ++#define X86_FEATURE_VERW_CLEAR (20*32+ 10) /* "" The memory form of VERW mitigates TSA */ + #define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ + #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ + #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ +@@ -447,6 +448,10 @@ + #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ + #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ + ++#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */ ++#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */ ++#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */ ++ + /* + * BUG word(s) + */ +@@ -498,4 +503,5 @@ + #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ + #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ + #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ ++#define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */ + #endif /* _ASM_X86_CPUFEATURES_H */ +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -80,7 +80,7 @@ static inline void __mwait(unsigned long + static inline void __mwaitx(unsigned long eax, unsigned long ebx, + unsigned long ecx) + { +- /* No MDS buffer clear as this is AMD/HYGON only */ ++ /* No need for TSA buffer clearing on AMD */ + + /* "mwaitx %eax, %ebx, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xfb;" +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -208,8 +208,8 @@ + * CFLAGS.ZF. + * Note: Only the memory operand variant of VERW clears the CPU buffers. + */ +-.macro CLEAR_CPU_BUFFERS +- ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF ++.macro __CLEAR_CPU_BUFFERS feature ++ ALTERNATIVE "jmp .Lskip_verw_\@", "", \feature + #ifdef CONFIG_X86_64 + verw x86_verw_sel(%rip) + #else +@@ -223,6 +223,12 @@ + .Lskip_verw_\@: + .endm + ++#define CLEAR_CPU_BUFFERS \ ++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF ++ ++#define VM_CLEAR_CPU_BUFFERS \ ++ __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM ++ + #ifdef CONFIG_X86_64 + .macro CLEAR_BRANCH_HISTORY + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP +@@ -462,7 +468,7 @@ static __always_inline void x86_clear_cp + + /** + * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS +- * vulnerability ++ * and TSA vulnerabilities. + * + * Clear CPU buffers if the corresponding static key is enabled + */ +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -553,6 +553,61 @@ static void early_init_amd_mc(struct cpu + #endif + } + ++static bool amd_check_tsa_microcode(void) ++{ ++ struct cpuinfo_x86 *c = &boot_cpu_data; ++ union zen_patch_rev p; ++ u32 min_rev = 0; ++ ++ p.ext_fam = c->x86 - 0xf; ++ p.model = c->x86_model; ++ p.stepping = c->x86_stepping; ++ ++ if (c->x86 == 0x19) { ++ switch (p.ucode_rev >> 8) { ++ case 0xa0011: min_rev = 0x0a0011d7; break; ++ case 0xa0012: min_rev = 0x0a00123b; break; ++ case 0xa0082: min_rev = 0x0a00820d; break; ++ case 0xa1011: min_rev = 0x0a10114c; break; ++ case 0xa1012: min_rev = 0x0a10124c; break; ++ case 0xa1081: min_rev = 0x0a108109; break; ++ case 0xa2010: min_rev = 0x0a20102e; break; ++ case 0xa2012: min_rev = 0x0a201211; break; ++ case 0xa4041: min_rev = 0x0a404108; break; ++ case 0xa5000: min_rev = 0x0a500012; break; ++ case 0xa6012: min_rev = 0x0a60120a; break; ++ case 0xa7041: min_rev = 0x0a704108; break; ++ case 0xa7052: min_rev = 0x0a705208; break; ++ case 0xa7080: min_rev = 0x0a708008; break; ++ case 0xa70c0: min_rev = 0x0a70c008; break; ++ case 0xaa002: min_rev = 0x0aa00216; break; ++ default: ++ pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n", ++ __func__, p.ucode_rev, c->microcode); ++ return false; ++ } ++ } ++ ++ if (!min_rev) ++ return false; ++ ++ return c->microcode >= min_rev; ++} ++ ++static void tsa_init(struct cpuinfo_x86 *c) ++{ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (c->x86 == 0x19) { ++ if (amd_check_tsa_microcode()) ++ setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR); ++ } else { ++ setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO); ++ setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO); ++ } ++} ++ + static void bsp_init_amd(struct cpuinfo_x86 *c) + { + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { +@@ -663,6 +718,9 @@ static void early_detect_mem_encrypt(str + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_sev; + ++ ++ tsa_init(c); ++ + return; + + clear_all: +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -49,6 +49,7 @@ static void __init l1d_flush_select_miti + static void __init gds_select_mitigation(void); + static void __init srso_select_mitigation(void); + static void __init its_select_mitigation(void); ++static void __init tsa_select_mitigation(void); + + /* The base value of the SPEC_CTRL MSR without task-specific bits set */ + u64 x86_spec_ctrl_base; +@@ -184,6 +185,7 @@ void __init cpu_select_mitigations(void) + srso_select_mitigation(); + gds_select_mitigation(); + its_select_mitigation(); ++ tsa_select_mitigation(); + } + + /* +@@ -2039,6 +2041,94 @@ static void update_mds_branch_idle(void) + #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" + #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" + ++#undef pr_fmt ++#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt ++ ++enum tsa_mitigations { ++ TSA_MITIGATION_NONE, ++ TSA_MITIGATION_UCODE_NEEDED, ++ TSA_MITIGATION_USER_KERNEL, ++ TSA_MITIGATION_VM, ++ TSA_MITIGATION_FULL, ++}; ++ ++static const char * const tsa_strings[] = { ++ [TSA_MITIGATION_NONE] = "Vulnerable", ++ [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", ++ [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", ++ [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", ++ [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", ++}; ++ ++static enum tsa_mitigations tsa_mitigation __ro_after_init = ++ IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE; ++ ++static int __init tsa_parse_cmdline(char *str) ++{ ++ if (!str) ++ return -EINVAL; ++ ++ if (!strcmp(str, "off")) ++ tsa_mitigation = TSA_MITIGATION_NONE; ++ else if (!strcmp(str, "on")) ++ tsa_mitigation = TSA_MITIGATION_FULL; ++ else if (!strcmp(str, "user")) ++ tsa_mitigation = TSA_MITIGATION_USER_KERNEL; ++ else if (!strcmp(str, "vm")) ++ tsa_mitigation = TSA_MITIGATION_VM; ++ else ++ pr_err("Ignoring unknown tsa=%s option.\n", str); ++ ++ return 0; ++} ++early_param("tsa", tsa_parse_cmdline); ++ ++static void __init tsa_select_mitigation(void) ++{ ++ if (tsa_mitigation == TSA_MITIGATION_NONE) ++ return; ++ ++ if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { ++ tsa_mitigation = TSA_MITIGATION_NONE; ++ return; ++ } ++ ++ if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) ++ tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; ++ ++ switch (tsa_mitigation) { ++ case TSA_MITIGATION_USER_KERNEL: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); ++ break; ++ ++ case TSA_MITIGATION_VM: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); ++ break; ++ ++ case TSA_MITIGATION_UCODE_NEEDED: ++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) ++ goto out; ++ ++ pr_notice("Forcing mitigation on in a VM\n"); ++ ++ /* ++ * On the off-chance that microcode has been updated ++ * on the host, enable the mitigation in the guest just ++ * in case. ++ */ ++ fallthrough; ++ case TSA_MITIGATION_FULL: ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); ++ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); ++ break; ++ default: ++ break; ++ } ++ ++out: ++ pr_info("%s\n", tsa_strings[tsa_mitigation]); ++} ++ + void cpu_bugs_smt_update(void) + { + mutex_lock(&spec_ctrl_mutex); +@@ -2092,6 +2182,24 @@ void cpu_bugs_smt_update(void) + break; + } + ++ switch (tsa_mitigation) { ++ case TSA_MITIGATION_USER_KERNEL: ++ case TSA_MITIGATION_VM: ++ case TSA_MITIGATION_FULL: ++ case TSA_MITIGATION_UCODE_NEEDED: ++ /* ++ * TSA-SQ can potentially lead to info leakage between ++ * SMT threads. ++ */ ++ if (sched_smt_active()) ++ static_branch_enable(&cpu_buf_idle_clear); ++ else ++ static_branch_disable(&cpu_buf_idle_clear); ++ break; ++ case TSA_MITIGATION_NONE: ++ break; ++ } ++ + mutex_unlock(&spec_ctrl_mutex); + } + +@@ -3026,6 +3134,11 @@ static ssize_t srso_show_state(char *buf + boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode"); + } + ++static ssize_t tsa_show_state(char *buf) ++{ ++ return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); ++} ++ + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, + char *buf, unsigned int bug) + { +@@ -3087,6 +3200,9 @@ static ssize_t cpu_show_common(struct de + case X86_BUG_ITS: + return its_show_state(buf); + ++ case X86_BUG_TSA: ++ return tsa_show_state(buf); ++ + default: + break; + } +@@ -3171,4 +3287,9 @@ ssize_t cpu_show_indirect_target_selecti + { + return cpu_show_common(dev, attr, buf, X86_BUG_ITS); + } ++ ++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) ++{ ++ return cpu_show_common(dev, attr, buf, X86_BUG_TSA); ++} + #endif +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1256,6 +1256,8 @@ static const __initconst struct x86_cpu_ + #define ITS BIT(8) + /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ + #define ITS_NATIVE_ONLY BIT(9) ++/* CPU is affected by Transient Scheduler Attacks */ ++#define TSA BIT(10) + + static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { + VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), +@@ -1303,7 +1305,7 @@ static const struct x86_cpu_id cpu_vuln_ + VULNBL_AMD(0x16, RETBLEED), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), + VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), +- VULNBL_AMD(0x19, SRSO), ++ VULNBL_AMD(0x19, SRSO | TSA), + {} + }; + +@@ -1508,6 +1510,16 @@ static void __init cpu_set_bug_bits(stru + setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); + } + ++ if (c->x86_vendor == X86_VENDOR_AMD) { ++ if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) || ++ !cpu_has(c, X86_FEATURE_TSA_L1_NO)) { ++ if (cpu_matches(cpu_vuln_blacklist, TSA) || ++ /* Enable bug on Zen guests to allow for live migration. */ ++ (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN))) ++ setup_force_cpu_bug(X86_BUG_TSA); ++ } ++ } ++ + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) + return; + +--- a/arch/x86/kernel/cpu/scattered.c ++++ b/arch/x86/kernel/cpu/scattered.c +@@ -45,6 +45,8 @@ static const struct cpuid_bit cpuid_bits + { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 }, + { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, + { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, ++ { X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 }, ++ { X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 }, + { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, + { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, + { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, +--- a/arch/x86/kvm/svm/vmenter.S ++++ b/arch/x86/kvm/svm/vmenter.S +@@ -166,6 +166,9 @@ SYM_FUNC_START(__svm_vcpu_run) + #endif + mov VCPU_RDI(%_ASM_DI), %_ASM_DI + ++ /* Clobbers EFLAGS.ZF */ ++ VM_CLEAR_CPU_BUFFERS ++ + /* Enter guest mode */ + sti + +@@ -336,6 +339,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) + mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX + mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX + ++ /* Clobbers EFLAGS.ZF */ ++ VM_CLEAR_CPU_BUFFERS ++ + /* Enter guest mode */ + sti + +--- a/drivers/base/cpu.c ++++ b/drivers/base/cpu.c +@@ -616,6 +616,7 @@ static DEVICE_ATTR(gather_data_sampling, + static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); + static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); + static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); ++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); + + static struct attribute *cpu_root_vulnerabilities_attrs[] = { + &dev_attr_meltdown.attr, +@@ -633,6 +634,7 @@ static struct attribute *cpu_root_vulner + &dev_attr_spec_rstack_overflow.attr, + &dev_attr_reg_file_data_sampling.attr, + &dev_attr_indirect_target_selection.attr, ++ &dev_attr_tsa.attr, + NULL + }; + +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -78,6 +78,7 @@ extern ssize_t cpu_show_reg_file_data_sa + struct device_attribute *attr, char *buf); + extern ssize_t cpu_show_indirect_target_selection(struct device *dev, + struct device_attribute *attr, char *buf); ++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf); + + extern __printf(4, 5) + struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/queue-6.1/x86-bugs-rename-mds-machinery-to-something-more-generic.patch b/queue-6.1/x86-bugs-rename-mds-machinery-to-something-more-generic.patch new file mode 100644 index 0000000000..40671f903b --- /dev/null +++ b/queue-6.1/x86-bugs-rename-mds-machinery-to-something-more-generic.patch @@ -0,0 +1,234 @@ +From b6317fa54c8fe28f160cd38e1ed1f66ab2f9e605 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Wed, 11 Sep 2024 05:13:46 +0200 +Subject: x86/bugs: Rename MDS machinery to something more generic + +From: "Borislav Petkov (AMD)" + +Commit f9af88a3d384c8b55beb5dc5483e5da0135fadbd upstream. + +It will be used by other x86 mitigations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Reviewed-by: Pawan Gupta +Signed-off-by: Greg Kroah-Hartman +--- + Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst | 4 - + arch/x86/entry/entry.S | 8 +- + arch/x86/include/asm/irqflags.h | 4 - + arch/x86/include/asm/mwait.h | 5 + + arch/x86/include/asm/nospec-branch.h | 29 +++++----- + arch/x86/kernel/cpu/bugs.c | 12 ++-- + arch/x86/kvm/vmx/vmx.c | 2 + 7 files changed, 32 insertions(+), 32 deletions(-) + +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst +@@ -157,9 +157,7 @@ This is achieved by using the otherwise + combination with a microcode update. The microcode clears the affected CPU + buffers when the VERW instruction is executed. + +-Kernel reuses the MDS function to invoke the buffer clearing: +- +- mds_clear_cpu_buffers() ++Kernel does the buffer clearing with x86_clear_cpu_buffers(). + + On MDS affected CPUs, the kernel already invokes CPU buffer clear on + kernel/userspace, hypervisor/guest and C-state (idle) transitions. No +--- a/arch/x86/entry/entry.S ++++ b/arch/x86/entry/entry.S +@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb); + + /* + * Define the VERW operand that is disguised as entry code so that +- * it can be referenced with KPTI enabled. This ensure VERW can be ++ * it can be referenced with KPTI enabled. This ensures VERW can be + * used late in exit-to-user path after page tables are switched. + */ + .pushsection .entry.text, "ax" + + .align L1_CACHE_BYTES, 0xcc +-SYM_CODE_START_NOALIGN(mds_verw_sel) ++SYM_CODE_START_NOALIGN(x86_verw_sel) + UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR + .word __KERNEL_DS + .align L1_CACHE_BYTES, 0xcc +-SYM_CODE_END(mds_verw_sel); ++SYM_CODE_END(x86_verw_sel); + /* For KVM */ +-EXPORT_SYMBOL_GPL(mds_verw_sel); ++EXPORT_SYMBOL_GPL(x86_verw_sel); + + .popsection + +--- a/arch/x86/include/asm/irqflags.h ++++ b/arch/x86/include/asm/irqflags.h +@@ -47,13 +47,13 @@ static __always_inline void native_irq_e + + static inline __cpuidle void native_safe_halt(void) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); + asm volatile("sti; hlt": : :"memory"); + } + + static inline __cpuidle void native_halt(void) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); + asm volatile("hlt": : :"memory"); + } + +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -44,7 +44,7 @@ static inline void __monitorx(const void + + static inline void __mwait(unsigned long eax, unsigned long ecx) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); + + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" +@@ -89,7 +89,8 @@ static inline void __mwaitx(unsigned lon + + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) + { +- mds_idle_clear_cpu_buffers(); ++ x86_idle_clear_cpu_buffers(); ++ + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -202,23 +202,23 @@ + .endm + + /* +- * Macro to execute VERW instruction that mitigate transient data sampling +- * attacks such as MDS. On affected systems a microcode update overloaded VERW +- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. +- * ++ * Macro to execute VERW insns that mitigate transient data sampling ++ * attacks such as MDS or TSA. On affected systems a microcode update ++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers ++ * CFLAGS.ZF. + * Note: Only the memory operand variant of VERW clears the CPU buffers. + */ + .macro CLEAR_CPU_BUFFERS + ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF + #ifdef CONFIG_X86_64 +- verw mds_verw_sel(%rip) ++ verw x86_verw_sel(%rip) + #else + /* + * In 32bit mode, the memory operand must be a %cs reference. The data + * segments may not be usable (vm86 mode), and the stack segment may not + * be flat (ESPFIX32). + */ +- verw %cs:mds_verw_sel ++ verw %cs:x86_verw_sel + #endif + .Lskip_verw_\@: + .endm +@@ -427,24 +427,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_ + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); + DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +-DECLARE_STATIC_KEY_FALSE(mds_idle_clear); ++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear); + + DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); + + DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); + +-extern u16 mds_verw_sel; ++extern u16 x86_verw_sel; + + #include + + /** +- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability ++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns + * + * This uses the otherwise unused and obsolete VERW instruction in + * combination with microcode which triggers a CPU buffer flush when the + * instruction is executed. + */ +-static __always_inline void mds_clear_cpu_buffers(void) ++static __always_inline void x86_clear_cpu_buffers(void) + { + static const u16 ds = __KERNEL_DS; + +@@ -461,14 +461,15 @@ static __always_inline void mds_clear_cp + } + + /** +- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability ++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS ++ * vulnerability + * + * Clear CPU buffers if the corresponding static key is enabled + */ +-static inline void mds_idle_clear_cpu_buffers(void) ++static __always_inline void x86_idle_clear_cpu_buffers(void) + { +- if (static_branch_likely(&mds_idle_clear)) +- mds_clear_cpu_buffers(); ++ if (static_branch_likely(&cpu_buf_idle_clear)) ++ x86_clear_cpu_buffers(); + } + + #endif /* __ASSEMBLY__ */ +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -121,9 +121,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i + /* Control unconditional IBPB in switch_mm() */ + DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); + +-/* Control MDS CPU buffer clear before idling (halt, mwait) */ +-DEFINE_STATIC_KEY_FALSE(mds_idle_clear); +-EXPORT_SYMBOL_GPL(mds_idle_clear); ++/* Control CPU buffer clear before idling (halt, mwait) */ ++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); ++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); + + /* + * Controls whether l1d flush based mitigations are enabled, +@@ -444,7 +444,7 @@ static void __init mmio_select_mitigatio + * is required irrespective of SMT state. + */ + if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) +- static_branch_enable(&mds_idle_clear); ++ static_branch_enable(&cpu_buf_idle_clear); + + /* + * Check if the system has the right microcode. +@@ -2028,10 +2028,10 @@ static void update_mds_branch_idle(void) + return; + + if (sched_smt_active()) { +- static_branch_enable(&mds_idle_clear); ++ static_branch_enable(&cpu_buf_idle_clear); + } else if (mmio_mitigation == MMIO_MITIGATION_OFF || + (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { +- static_branch_disable(&mds_idle_clear); ++ static_branch_disable(&cpu_buf_idle_clear); + } + } + +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -7144,7 +7144,7 @@ static noinstr void vmx_vcpu_enter_exit( + vmx_l1d_flush(vcpu); + else if (static_branch_unlikely(&mmio_stale_data_clear) && + kvm_arch_has_assigned_device(vcpu->kvm)) +- mds_clear_cpu_buffers(); ++ x86_clear_cpu_buffers(); + + vmx_disable_fb_clear(vmx); + diff --git a/queue-6.1/x86-process-move-the-buffer-clearing-before-monitor.patch b/queue-6.1/x86-process-move-the-buffer-clearing-before-monitor.patch new file mode 100644 index 0000000000..47ef0e3952 --- /dev/null +++ b/queue-6.1/x86-process-move-the-buffer-clearing-before-monitor.patch @@ -0,0 +1,104 @@ +From 9685ffdf6a7fa36c0259854aac3440cd699b617d Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Mon, 14 Apr 2025 15:33:19 +0200 +Subject: x86/process: Move the buffer clearing before MONITOR + +From: "Borislav Petkov (AMD)" + +Commit 8e786a85c0a3c0fffae6244733fb576eeabd9dec upstream. + +Move the VERW clearing before the MONITOR so that VERW doesn't disarm it +and the machine never enters C1. + +Original idea by Kim Phillips . + +Suggested-by: Andrew Cooper +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/mwait.h | 16 +++++++++++----- + arch/x86/kernel/process.c | 15 ++++++++++++--- + 2 files changed, 23 insertions(+), 8 deletions(-) + +--- a/arch/x86/include/asm/mwait.h ++++ b/arch/x86/include/asm/mwait.h +@@ -44,8 +44,6 @@ static inline void __monitorx(const void + + static inline void __mwait(unsigned long eax, unsigned long ecx) + { +- x86_idle_clear_cpu_buffers(); +- + /* "mwait %eax, %ecx;" */ + asm volatile(".byte 0x0f, 0x01, 0xc9;" + :: "a" (eax), "c" (ecx)); +@@ -89,7 +87,6 @@ static inline void __mwaitx(unsigned lon + + static inline void __sti_mwait(unsigned long eax, unsigned long ecx) + { +- x86_idle_clear_cpu_buffers(); + + /* "mwait %eax, %ecx;" */ + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" +@@ -108,6 +105,11 @@ static inline void __sti_mwait(unsigned + */ + static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) + { ++ if (need_resched()) ++ return; ++ ++ x86_idle_clear_cpu_buffers(); ++ + if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { + if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { + mb(); +@@ -116,9 +118,13 @@ static inline void mwait_idle_with_hints + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); +- if (!need_resched()) +- __mwait(eax, ecx); ++ if (need_resched()) ++ goto out; ++ ++ __mwait(eax, ecx); + } ++ ++out: + current_clr_polling(); + } + +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -887,6 +887,11 @@ static int prefer_mwait_c1_over_halt(con + */ + static __cpuidle void mwait_idle(void) + { ++ if (need_resched()) ++ return; ++ ++ x86_idle_clear_cpu_buffers(); ++ + if (!current_set_polling_and_test()) { + if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { + mb(); /* quirk */ +@@ -895,13 +900,17 @@ static __cpuidle void mwait_idle(void) + } + + __monitor((void *)¤t_thread_info()->flags, 0, 0); +- if (!need_resched()) +- __sti_mwait(0, 0); +- else ++ if (need_resched()) { + raw_local_irq_enable(); ++ goto out; ++ } ++ ++ __sti_mwait(0, 0); + } else { + raw_local_irq_enable(); + } ++ ++out: + __current_clr_polling(); + } + -- 2.47.2