]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2025 16:04:22 +0000 (18:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Jul 2025 16:04:22 +0000 (18:04 +0200)
added patches:
kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
kvm-x86-sort-cpuid_8000_0021_eax-leaf-bits-properly.patch
x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
x86-bugs-rename-mds-machinery-to-something-more-generic.patch
x86-microcode-amd-add-tsa-microcode-shas.patch
x86-process-move-the-buffer-clearing-before-monitor.patch

queue-6.15/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch [new file with mode: 0644]
queue-6.15/kvm-x86-sort-cpuid_8000_0021_eax-leaf-bits-properly.patch [new file with mode: 0644]
queue-6.15/series
queue-6.15/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch [new file with mode: 0644]
queue-6.15/x86-bugs-rename-mds-machinery-to-something-more-generic.patch [new file with mode: 0644]
queue-6.15/x86-microcode-amd-add-tsa-microcode-shas.patch [new file with mode: 0644]
queue-6.15/x86-process-move-the-buffer-clearing-before-monitor.patch [new file with mode: 0644]

diff --git a/queue-6.15/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch b/queue-6.15/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
new file mode 100644 (file)
index 0000000..a3c09ef
--- /dev/null
@@ -0,0 +1,94 @@
+From d9214006b89ff652c3d01ad184b74235b4efaecc Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Wed, 11 Sep 2024 11:00:50 +0200
+Subject: KVM: SVM: Advertise TSA CPUID bits to guests
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit 31272abd5974b38ba312e9cf2ec2f09f9dd7dcba upstream.
+
+Synthesize the TSA CPUID feature bits for guests. Set TSA_{SQ,L1}_NO on
+unaffected machines.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kvm_host.h |    1 +
+ arch/x86/kvm/cpuid.c            |   10 +++++++++-
+ arch/x86/kvm/reverse_cpuid.h    |    7 +++++++
+ 3 files changed, 17 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -756,6 +756,7 @@ enum kvm_only_cpuid_leafs {
+       CPUID_8000_0022_EAX,
+       CPUID_7_2_EDX,
+       CPUID_24_0_EBX,
++      CPUID_8000_0021_ECX,
+       NR_KVM_CPU_CAPS,
+       NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1177,6 +1177,8 @@ void kvm_set_cpu_caps(void)
+                */
+               SYNTHESIZED_F(LFENCE_RDTSC),
+               /* SmmPgCfgLock */
++              /* 4: Resv */
++              SYNTHESIZED_F(VERW_CLEAR),
+               F(NULL_SEL_CLR_BASE),
+               /* UpperAddressIgnore */
+               F(AUTOIBRS),
+@@ -1190,6 +1192,11 @@ void kvm_set_cpu_caps(void)
+               F(SRSO_USER_KERNEL_NO),
+       );
++      kvm_cpu_cap_init(CPUID_8000_0021_ECX,
++              SYNTHESIZED_F(TSA_SQ_NO),
++              SYNTHESIZED_F(TSA_L1_NO),
++      );
++
+       kvm_cpu_cap_init(CPUID_8000_0022_EAX,
+               F(PERFMON_V2),
+       );
+@@ -1759,8 +1766,9 @@ static inline int __do_cpuid_func(struct
+               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       case 0x80000021:
+-              entry->ebx = entry->ecx = entry->edx = 0;
++              entry->ebx = entry->edx = 0;
+               cpuid_entry_override(entry, CPUID_8000_0021_EAX);
++              cpuid_entry_override(entry, CPUID_8000_0021_ECX);
+               break;
+       /* AMD Extended Performance Monitoring and Debug */
+       case 0x80000022: {
+--- a/arch/x86/kvm/reverse_cpuid.h
++++ b/arch/x86/kvm/reverse_cpuid.h
+@@ -52,6 +52,10 @@
+ /* CPUID level 0x80000022 (EAX) */
+ #define KVM_X86_FEATURE_PERFMON_V2    KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
++/* CPUID level 0x80000021 (ECX) */
++#define KVM_X86_FEATURE_TSA_SQ_NO     KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
++#define KVM_X86_FEATURE_TSA_L1_NO     KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
++
+ struct cpuid_reg {
+       u32 function;
+       u32 index;
+@@ -82,6 +86,7 @@ static const struct cpuid_reg reverse_cp
+       [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
+       [CPUID_7_2_EDX]       = {         7, 2, CPUID_EDX},
+       [CPUID_24_0_EBX]      = {      0x24, 0, CPUID_EBX},
++      [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
+ };
+ /*
+@@ -121,6 +126,8 @@ static __always_inline u32 __feature_tra
+       KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
+       KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+       KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
++      KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
++      KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
+       default:
+               return x86_feature;
+       }
diff --git a/queue-6.15/kvm-x86-sort-cpuid_8000_0021_eax-leaf-bits-properly.patch b/queue-6.15/kvm-x86-sort-cpuid_8000_0021_eax-leaf-bits-properly.patch
new file mode 100644 (file)
index 0000000..108fe9b
--- /dev/null
@@ -0,0 +1,45 @@
+From e4aba4869cd8ce26d4070b2a6453aef994ebe309 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@alien8.de>
+Date: Mon, 24 Mar 2025 17:06:17 +0100
+Subject: KVM: x86: Sort CPUID_8000_0021_EAX leaf bits properly
+
+From: Borislav Petkov <bp@alien8.de>
+
+Commit 49c140d5af127ef4faf19f06a89a0714edf0316f upstream.
+
+WRMSR_XX_BASE_NS is bit 1 so put it there, add some new bits as
+comments only.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20250324160617.15379-1-bp@kernel.org
+[sean: skip the FSRS/FSRC placeholders to avoid confusion]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -1164,6 +1164,7 @@ void kvm_set_cpu_caps(void)
+       kvm_cpu_cap_init(CPUID_8000_0021_EAX,
+               F(NO_NESTED_DATA_BP),
++              F(WRMSR_XX_BASE_NS),
+               /*
+                * Synthesize "LFENCE is serializing" into the AMD-defined entry
+                * in KVM's supported CPUID, i.e. if the feature is reported as
+@@ -1177,10 +1178,12 @@ void kvm_set_cpu_caps(void)
+               SYNTHESIZED_F(LFENCE_RDTSC),
+               /* SmmPgCfgLock */
+               F(NULL_SEL_CLR_BASE),
++              /* UpperAddressIgnore */
+               F(AUTOIBRS),
+               EMULATED_F(NO_SMM_CTL_MSR),
+               /* PrefetchCtlMsr */
+-              F(WRMSR_XX_BASE_NS),
++              /* GpOnUserCpuid */
++              /* EPSF */
+               SYNTHESIZED_F(SBPB),
+               SYNTHESIZED_F(IBPB_BRTYPE),
+               SYNTHESIZED_F(SRSO_NO),
index 4d971527a278b5b8b4ad7fe54550e91c80069b6f..ea6e1dd90be5a369539eee16ea30b1648efcec12 100644 (file)
@@ -170,3 +170,9 @@ platform-x86-think-lmi-fix-kobject-cleanup.patch
 platform-x86-think-lmi-fix-sysfs-group-cleanup.patch
 usb-typec-displayport-fix-potential-deadlock.patch
 mm-vmalloc-fix-data-race-in-show_numa_info.patch
+x86-bugs-rename-mds-machinery-to-something-more-generic.patch
+x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
+kvm-x86-sort-cpuid_8000_0021_eax-leaf-bits-properly.patch
+kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
+x86-microcode-amd-add-tsa-microcode-shas.patch
+x86-process-move-the-buffer-clearing-before-monitor.patch
diff --git a/queue-6.15/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch b/queue-6.15/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
new file mode 100644 (file)
index 0000000..a8ccdda
--- /dev/null
@@ -0,0 +1,498 @@
+From 03de3b6a2543b0de7a87031bf3fe37f56fe85711 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Wed, 11 Sep 2024 10:53:08 +0200
+Subject: x86/bugs: Add a Transient Scheduler Attacks mitigation
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream.
+
+Add the required features detection glue to bugs.c et all in order to
+support the TSA mitigation.
+
+Co-developed-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu |    1 
+ Documentation/admin-guide/kernel-parameters.txt    |   13 ++
+ arch/x86/Kconfig                                   |    9 +
+ arch/x86/include/asm/cpufeatures.h                 |    5 
+ arch/x86/include/asm/mwait.h                       |    2 
+ arch/x86/include/asm/nospec-branch.h               |   14 +-
+ arch/x86/kernel/cpu/amd.c                          |   44 +++++++
+ arch/x86/kernel/cpu/bugs.c                         |  121 +++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c                       |   14 ++
+ arch/x86/kernel/cpu/scattered.c                    |    2 
+ arch/x86/kvm/svm/vmenter.S                         |    6 +
+ drivers/base/cpu.c                                 |    3 
+ include/linux/cpu.h                                |    1 
+ 13 files changed, 229 insertions(+), 6 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -523,6 +523,7 @@ What:              /sys/devices/system/cpu/vulnerabi
+               /sys/devices/system/cpu/vulnerabilities/spectre_v1
+               /sys/devices/system/cpu/vulnerabilities/spectre_v2
+               /sys/devices/system/cpu/vulnerabilities/srbds
++              /sys/devices/system/cpu/vulnerabilities/tsa
+               /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ Date:         January 2018
+ Contact:      Linux kernel mailing list <linux-kernel@vger.kernel.org>
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -7423,6 +7423,19 @@
+                       having this key zero'ed is acceptable. E.g. in testing
+                       scenarios.
++      tsa=            [X86] Control mitigation for Transient Scheduler
++                      Attacks on AMD CPUs. Search the following in your
++                      favourite search engine for more details:
++
++                      "Technical guidance for mitigating transient scheduler
++                      attacks".
++
++                      off             - disable the mitigation
++                      on              - enable the mitigation (default)
++                      user            - mitigate only user/kernel transitions
++                      vm              - mitigate only guest/host transitions
++
++
+       tsc=            Disable clocksource stability checks for TSC.
+                       Format: <string>
+                       [x86] reliable: mark tsc clocksource as reliable, this
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2723,6 +2723,15 @@ config MITIGATION_ITS
+         disabled, mitigation cannot be enabled via cmdline.
+         See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
++config MITIGATION_TSA
++      bool "Mitigate Transient Scheduler Attacks"
++      depends on CPU_SUP_AMD
++      default y
++      help
++        Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
++        security vulnerability on AMD CPUs which can lead to forwarding of
++        invalid info to subsequent instructions and thus can affect their
++        timing and thereby cause a leakage.
+ endif
+ config ARCH_HAS_ADD_PAGES
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -453,6 +453,7 @@
+ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
+ #define X86_FEATURE_WRMSR_XX_BASE_NS  (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
+ #define X86_FEATURE_LFENCE_RDTSC      (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
++#define X86_FEATURE_VERW_CLEAR                (20*32+ 5) /* The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
+ #define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* Automatic IBRS */
+ #define X86_FEATURE_NO_SMM_CTL_MSR    (20*32+ 9) /* SMM_CTL MSR is not present */
+@@ -482,6 +483,9 @@
+ #define X86_FEATURE_AMD_WORKLOAD_CLASS        (21*32 + 7) /* Workload Classification */
+ #define X86_FEATURE_PREFER_YMM                (21*32 + 8) /* Avoid ZMM registers due to downclocking */
+ #define X86_FEATURE_INDIRECT_THUNK_ITS        (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
++#define X86_FEATURE_TSA_SQ_NO         (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
++#define X86_FEATURE_TSA_L1_NO         (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
++#define X86_FEATURE_CLEAR_CPU_BUF_VM  (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
+ /*
+  * BUG word(s)
+@@ -536,4 +540,5 @@
+ #define X86_BUG_SPECTRE_V2_USER               X86_BUG(1*32 + 5) /* "spectre_v2_user" CPU is affected by Spectre variant 2 attack between user processes */
+ #define X86_BUG_ITS                   X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY               X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
++#define X86_BUG_TSA                   X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -79,7 +79,7 @@ static __always_inline void __mwait(unsi
+ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
+                                    unsigned long ecx)
+ {
+-      /* No MDS buffer clear as this is AMD/HYGON only */
++      /* No need for TSA buffer clearing on AMD */
+       /* "mwaitx %eax, %ebx, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xfb;"
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -308,19 +308,25 @@
+  * CFLAGS.ZF.
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+-.macro CLEAR_CPU_BUFFERS
++.macro __CLEAR_CPU_BUFFERS feature
+ #ifdef CONFIG_X86_64
+-      ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
++      ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
+ #else
+       /*
+        * In 32bit mode, the memory operand must be a %cs reference. The data
+        * segments may not be usable (vm86 mode), and the stack segment may not
+        * be flat (ESPFIX32).
+        */
+-      ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
++      ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
+ #endif
+ .endm
++#define CLEAR_CPU_BUFFERS \
++      __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
++
++#define VM_CLEAR_CPU_BUFFERS \
++      __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
++
+ #ifdef CONFIG_X86_64
+ .macro CLEAR_BRANCH_HISTORY
+       ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
+@@ -602,7 +608,7 @@ static __always_inline void x86_clear_cp
+ /**
+  * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
+- * vulnerability
++ * and TSA vulnerabilities.
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -375,6 +375,47 @@ static void bsp_determine_snp(struct cpu
+ #endif
+ }
++#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
++      X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
++                          step, step, ucode)
++
++static const struct x86_cpu_id amd_tsa_microcode[] = {
++      ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
++      ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
++      ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
++      {},
++};
++
++static void tsa_init(struct cpuinfo_x86 *c)
++{
++      if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++              return;
++
++      if (cpu_has(c, X86_FEATURE_ZEN3) ||
++          cpu_has(c, X86_FEATURE_ZEN4)) {
++              if (x86_match_min_microcode_rev(amd_tsa_microcode))
++                      setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
++              else
++                      pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
++      } else {
++              setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
++              setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
++      }
++}
++
+ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ {
+       if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+@@ -487,6 +528,9 @@ static void bsp_init_amd(struct cpuinfo_
+       }
+       bsp_determine_snp(c);
++
++      tsa_init(c);
++
+       return;
+ warn:
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -50,6 +50,7 @@ static void __init l1d_flush_select_miti
+ static void __init srso_select_mitigation(void);
+ static void __init gds_select_mitigation(void);
+ static void __init its_select_mitigation(void);
++static void __init tsa_select_mitigation(void);
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -188,6 +189,7 @@ void __init cpu_select_mitigations(void)
+       srso_select_mitigation();
+       gds_select_mitigation();
+       its_select_mitigation();
++      tsa_select_mitigation();
+ }
+ /*
+@@ -2074,6 +2076,94 @@ static void update_mds_branch_idle(void)
+ #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+ #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
++#undef pr_fmt
++#define pr_fmt(fmt)   "Transient Scheduler Attacks: " fmt
++
++enum tsa_mitigations {
++      TSA_MITIGATION_NONE,
++      TSA_MITIGATION_UCODE_NEEDED,
++      TSA_MITIGATION_USER_KERNEL,
++      TSA_MITIGATION_VM,
++      TSA_MITIGATION_FULL,
++};
++
++static const char * const tsa_strings[] = {
++      [TSA_MITIGATION_NONE]           = "Vulnerable",
++      [TSA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
++      [TSA_MITIGATION_USER_KERNEL]    = "Mitigation: Clear CPU buffers: user/kernel boundary",
++      [TSA_MITIGATION_VM]             = "Mitigation: Clear CPU buffers: VM",
++      [TSA_MITIGATION_FULL]           = "Mitigation: Clear CPU buffers",
++};
++
++static enum tsa_mitigations tsa_mitigation __ro_after_init =
++      IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
++
++static int __init tsa_parse_cmdline(char *str)
++{
++      if (!str)
++              return -EINVAL;
++
++      if (!strcmp(str, "off"))
++              tsa_mitigation = TSA_MITIGATION_NONE;
++      else if (!strcmp(str, "on"))
++              tsa_mitigation = TSA_MITIGATION_FULL;
++      else if (!strcmp(str, "user"))
++              tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
++      else if (!strcmp(str, "vm"))
++              tsa_mitigation = TSA_MITIGATION_VM;
++      else
++              pr_err("Ignoring unknown tsa=%s option.\n", str);
++
++      return 0;
++}
++early_param("tsa", tsa_parse_cmdline);
++
++static void __init tsa_select_mitigation(void)
++{
++      if (tsa_mitigation == TSA_MITIGATION_NONE)
++              return;
++
++      if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
++              tsa_mitigation = TSA_MITIGATION_NONE;
++              return;
++      }
++
++      if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
++              tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
++
++      switch (tsa_mitigation) {
++      case TSA_MITIGATION_USER_KERNEL:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++              break;
++
++      case TSA_MITIGATION_VM:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++              break;
++
++      case TSA_MITIGATION_UCODE_NEEDED:
++              if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
++                      goto out;
++
++              pr_notice("Forcing mitigation on in a VM\n");
++
++              /*
++               * On the off-chance that microcode has been updated
++               * on the host, enable the mitigation in the guest just
++               * in case.
++               */
++              fallthrough;
++      case TSA_MITIGATION_FULL:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++              break;
++      default:
++              break;
++      }
++
++out:
++      pr_info("%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ void cpu_bugs_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+@@ -2130,6 +2220,24 @@ void cpu_bugs_smt_update(void)
+               break;
+       }
++      switch (tsa_mitigation) {
++      case TSA_MITIGATION_USER_KERNEL:
++      case TSA_MITIGATION_VM:
++      case TSA_MITIGATION_FULL:
++      case TSA_MITIGATION_UCODE_NEEDED:
++              /*
++               * TSA-SQ can potentially lead to info leakage between
++               * SMT threads.
++               */
++              if (sched_smt_active())
++                      static_branch_enable(&cpu_buf_idle_clear);
++              else
++                      static_branch_disable(&cpu_buf_idle_clear);
++              break;
++      case TSA_MITIGATION_NONE:
++              break;
++      }
++
+       mutex_unlock(&spec_ctrl_mutex);
+ }
+@@ -3078,6 +3186,11 @@ static ssize_t gds_show_state(char *buf)
+       return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
+ }
++static ssize_t tsa_show_state(char *buf)
++{
++      return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
+ {
+@@ -3139,6 +3252,9 @@ static ssize_t cpu_show_common(struct de
+       case X86_BUG_ITS:
+               return its_show_state(buf);
++      case X86_BUG_TSA:
++              return tsa_show_state(buf);
++
+       default:
+               break;
+       }
+@@ -3223,6 +3339,11 @@ ssize_t cpu_show_indirect_target_selecti
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+ }
++
++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
++{
++      return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
++}
+ #endif
+ void __warn_thunk(void)
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1232,6 +1232,8 @@ static const __initconst struct x86_cpu_
+ #define ITS           BIT(8)
+ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
+ #define ITS_NATIVE_ONLY       BIT(9)
++/* CPU is affected by Transient Scheduler Attacks */
++#define TSA           BIT(10)
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+       VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,          X86_STEP_MAX,      SRBDS),
+@@ -1279,7 +1281,7 @@ static const struct x86_cpu_id cpu_vuln_
+       VULNBL_AMD(0x16, RETBLEED),
+       VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+       VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+-      VULNBL_AMD(0x19, SRSO),
++      VULNBL_AMD(0x19, SRSO | TSA),
+       VULNBL_AMD(0x1a, SRSO),
+       {}
+ };
+@@ -1492,6 +1494,16 @@ static void __init cpu_set_bug_bits(stru
+                       setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
+       }
++      if (c->x86_vendor == X86_VENDOR_AMD) {
++              if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
++                  !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
++                      if (cpu_matches(cpu_vuln_blacklist, TSA) ||
++                          /* Enable bug on Zen guests to allow for live migration. */
++                          (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
++                              setup_force_cpu_bug(X86_BUG_TSA);
++              }
++      }
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -49,6 +49,8 @@ static const struct cpuid_bit cpuid_bits
+       { X86_FEATURE_MBA,                      CPUID_EBX,  6, 0x80000008, 0 },
+       { X86_FEATURE_SMBA,                     CPUID_EBX,  2, 0x80000020, 0 },
+       { X86_FEATURE_BMEC,                     CPUID_EBX,  3, 0x80000020, 0 },
++      { X86_FEATURE_TSA_SQ_NO,                CPUID_ECX,  1, 0x80000021, 0 },
++      { X86_FEATURE_TSA_L1_NO,                CPUID_ECX,  2, 0x80000021, 0 },
+       { X86_FEATURE_AMD_WORKLOAD_CLASS,       CPUID_EAX, 22, 0x80000021, 0 },
+       { X86_FEATURE_PERFMON_V2,               CPUID_EAX,  0, 0x80000022, 0 },
+       { X86_FEATURE_AMD_LBR_V2,               CPUID_EAX,  1, 0x80000022, 0 },
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
+ #endif
+       mov VCPU_RDI(%_ASM_DI), %_ASM_DI
++      /* Clobbers EFLAGS.ZF */
++      VM_CLEAR_CPU_BUFFERS
++
+       /* Enter guest mode */
+ 3:    vmrun %_ASM_AX
+ 4:
+@@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+       mov SVM_current_vmcb(%rdi), %rax
+       mov KVM_VMCB_pa(%rax), %rax
++      /* Clobbers EFLAGS.ZF */
++      VM_CLEAR_CPU_BUFFERS
++
+       /* Enter guest mode */
+ 1:    vmrun %rax
+ 2:
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -601,6 +601,7 @@ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+ CPU_SHOW_VULN_FALLBACK(ghostwrite);
+ CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
++CPU_SHOW_VULN_FALLBACK(tsa);
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -618,6 +619,7 @@ static DEVICE_ATTR(gather_data_sampling,
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+       &dev_attr_meltdown.attr,
+@@ -636,6 +638,7 @@ static struct attribute *cpu_root_vulner
+       &dev_attr_reg_file_data_sampling.attr,
+       &dev_attr_ghostwrite.attr,
+       &dev_attr_indirect_target_selection.attr,
++      &dev_attr_tsa.attr,
+       NULL
+ };
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -80,6 +80,7 @@ extern ssize_t cpu_show_reg_file_data_sa
+ extern ssize_t cpu_show_ghostwrite(struct device *dev, struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+                                                 struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/queue-6.15/x86-bugs-rename-mds-machinery-to-something-more-generic.patch b/queue-6.15/x86-bugs-rename-mds-machinery-to-something-more-generic.patch
new file mode 100644 (file)
index 0000000..d7ed1ed
--- /dev/null
@@ -0,0 +1,258 @@
+From f52ff28f54f6e767e7fa43b1993eab6a97f1e2e4 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Wed, 11 Sep 2024 05:13:46 +0200
+Subject: x86/bugs: Rename MDS machinery to something more generic
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit f9af88a3d384c8b55beb5dc5483e5da0135fadbd upstream.
+
+It will be used by other x86 mitigations.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst |    4 -
+ Documentation/arch/x86/mds.rst                                  |    8 +-
+ arch/x86/entry/entry.S                                          |    8 +-
+ arch/x86/include/asm/irqflags.h                                 |    4 -
+ arch/x86/include/asm/mwait.h                                    |    5 +
+ arch/x86/include/asm/nospec-branch.h                            |   29 +++++-----
+ arch/x86/kernel/cpu/bugs.c                                      |   12 ++--
+ arch/x86/kvm/vmx/vmx.c                                          |    2 
+ 8 files changed, 36 insertions(+), 36 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+@@ -157,9 +157,7 @@ This is achieved by using the otherwise
+ combination with a microcode update. The microcode clears the affected CPU
+ buffers when the VERW instruction is executed.
+-Kernel reuses the MDS function to invoke the buffer clearing:
+-
+-      mds_clear_cpu_buffers()
++Kernel does the buffer clearing with x86_clear_cpu_buffers().
+ On MDS affected CPUs, the kernel already invokes CPU buffer clear on
+ kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+--- a/Documentation/arch/x86/mds.rst
++++ b/Documentation/arch/x86/mds.rst
+@@ -93,7 +93,7 @@ enters a C-state.
+ The kernel provides a function to invoke the buffer clearing:
+-    mds_clear_cpu_buffers()
++    x86_clear_cpu_buffers()
+ Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
+ Other than CFLAGS.ZF, this macro doesn't clobber any registers.
+@@ -185,9 +185,9 @@ Mitigation points
+    idle clearing would be a window dressing exercise and is therefore not
+    activated.
+-   The invocation is controlled by the static key mds_idle_clear which is
+-   switched depending on the chosen mitigation mode and the SMT state of
+-   the system.
++   The invocation is controlled by the static key cpu_buf_idle_clear which is
++   switched depending on the chosen mitigation mode and the SMT state of the
++   system.
+    The buffer clear is only invoked before entering the C-State to prevent
+    that stale data from the idling CPU from spilling to the Hyper-Thread
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -36,20 +36,20 @@ EXPORT_SYMBOL_GPL(write_ibpb);
+ /*
+  * Define the VERW operand that is disguised as entry code so that
+- * it can be referenced with KPTI enabled. This ensure VERW can be
++ * it can be referenced with KPTI enabled. This ensures VERW can be
+  * used late in exit-to-user path after page tables are switched.
+  */
+ .pushsection .entry.text, "ax"
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_START_NOALIGN(mds_verw_sel)
++SYM_CODE_START_NOALIGN(x86_verw_sel)
+       UNWIND_HINT_UNDEFINED
+       ANNOTATE_NOENDBR
+       .word __KERNEL_DS
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_END(mds_verw_sel);
++SYM_CODE_END(x86_verw_sel);
+ /* For KVM */
+-EXPORT_SYMBOL_GPL(mds_verw_sel);
++EXPORT_SYMBOL_GPL(x86_verw_sel);
+ .popsection
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -44,13 +44,13 @@ static __always_inline void native_irq_e
+ static __always_inline void native_safe_halt(void)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       asm volatile("sti; hlt": : :"memory");
+ }
+ static __always_inline void native_halt(void)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       asm volatile("hlt": : :"memory");
+ }
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -43,7 +43,7 @@ static __always_inline void __monitorx(c
+ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       /* "mwait %eax, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc9;"
+@@ -97,7 +97,8 @@ static __always_inline void __mwaitx(uns
+  */
+ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
++
+       /* "mwait %eax, %ecx;" */
+       asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+                    :: "a" (eax), "c" (ecx));
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -302,22 +302,22 @@
+ .endm
+ /*
+- * Macro to execute VERW instruction that mitigate transient data sampling
+- * attacks such as MDS. On affected systems a microcode update overloaded VERW
+- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+- *
++ * Macro to execute VERW insns that mitigate transient data sampling
++ * attacks such as MDS or TSA. On affected systems a microcode update
++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
++ * CFLAGS.ZF.
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+ .macro CLEAR_CPU_BUFFERS
+ #ifdef CONFIG_X86_64
+-      ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
++      ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
+ #else
+       /*
+        * In 32bit mode, the memory operand must be a %cs reference. The data
+        * segments may not be usable (vm86 mode), and the stack segment may not
+        * be flat (ESPFIX32).
+        */
+-      ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
++      ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
+ #endif
+ .endm
+@@ -567,24 +567,24 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_alway
+ DECLARE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
+-DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+-extern u16 mds_verw_sel;
++extern u16 x86_verw_sel;
+ #include <asm/segment.h>
+ /**
+- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
+  *
+  * This uses the otherwise unused and obsolete VERW instruction in
+  * combination with microcode which triggers a CPU buffer flush when the
+  * instruction is executed.
+  */
+-static __always_inline void mds_clear_cpu_buffers(void)
++static __always_inline void x86_clear_cpu_buffers(void)
+ {
+       static const u16 ds = __KERNEL_DS;
+@@ -601,14 +601,15 @@ static __always_inline void mds_clear_cp
+ }
+ /**
+- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
++ * vulnerability
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+-static __always_inline void mds_idle_clear_cpu_buffers(void)
++static __always_inline void x86_idle_clear_cpu_buffers(void)
+ {
+-      if (static_branch_likely(&mds_idle_clear))
+-              mds_clear_cpu_buffers();
++      if (static_branch_likely(&cpu_buf_idle_clear))
++              x86_clear_cpu_buffers();
+ }
+ #endif /* __ASSEMBLER__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -125,9 +125,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_always
+ DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb);
+ EXPORT_SYMBOL_GPL(switch_vcpu_ibpb);
+-/* Control MDS CPU buffer clear before idling (halt, mwait) */
+-DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+-EXPORT_SYMBOL_GPL(mds_idle_clear);
++/* Control CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
+ /*
+  * Controls whether l1d flush based mitigations are enabled,
+@@ -469,7 +469,7 @@ static void __init mmio_select_mitigatio
+        * is required irrespective of SMT state.
+        */
+       if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
+-              static_branch_enable(&mds_idle_clear);
++              static_branch_enable(&cpu_buf_idle_clear);
+       /*
+        * Check if the system has the right microcode.
+@@ -2063,10 +2063,10 @@ static void update_mds_branch_idle(void)
+               return;
+       if (sched_smt_active()) {
+-              static_branch_enable(&mds_idle_clear);
++              static_branch_enable(&cpu_buf_idle_clear);
+       } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+                  (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
+-              static_branch_disable(&mds_idle_clear);
++              static_branch_disable(&cpu_buf_idle_clear);
+       }
+ }
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -7366,7 +7366,7 @@ static noinstr void vmx_vcpu_enter_exit(
+               vmx_l1d_flush(vcpu);
+       else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+                kvm_arch_has_assigned_device(vcpu->kvm))
+-              mds_clear_cpu_buffers();
++              x86_clear_cpu_buffers();
+       vmx_disable_fb_clear(vmx);
diff --git a/queue-6.15/x86-microcode-amd-add-tsa-microcode-shas.patch b/queue-6.15/x86-microcode-amd-add-tsa-microcode-shas.patch
new file mode 100644 (file)
index 0000000..a74b091
--- /dev/null
@@ -0,0 +1,239 @@
+From 673323d878d7788ae39fe4cbf508e5b6c539276b Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Thu, 27 Mar 2025 12:23:55 +0100
+Subject: x86/microcode/AMD: Add TSA microcode SHAs
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit 2329f250e04d3b8e78b36a68b9880ca7750a07ef upstream.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/amd_shas.c |  112 +++++++++++++++++++++++++++++++
+ 1 file changed, 112 insertions(+)
+
+--- a/arch/x86/kernel/cpu/microcode/amd_shas.c
++++ b/arch/x86/kernel/cpu/microcode/amd_shas.c
+@@ -231,6 +231,13 @@ static const struct patch_digest phashes
+               0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
+       }
+  },
++ { 0xa0011d7, {
++                0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
++                0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
++                0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
++                0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
++        }
++ },
+  { 0xa001223, {
+               0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
+               0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
+@@ -294,6 +301,13 @@ static const struct patch_digest phashes
+               0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
+       }
+  },
++ { 0xa00123b, {
++              0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
++              0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
++              0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
++              0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
++      }
++ },
+  { 0xa00820c, {
+               0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
+               0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
+@@ -301,6 +315,13 @@ static const struct patch_digest phashes
+               0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
+       }
+  },
++ { 0xa00820d, {
++              0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
++              0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
++              0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
++              0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
++      }
++ },
+  { 0xa10113e, {
+               0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
+               0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
+@@ -322,6 +343,13 @@ static const struct patch_digest phashes
+               0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
+       }
+  },
++ { 0xa10114c, {
++              0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
++              0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
++              0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
++              0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
++      }
++ },
+  { 0xa10123e, {
+               0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
+               0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
+@@ -343,6 +371,13 @@ static const struct patch_digest phashes
+               0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
+       }
+  },
++ { 0xa10124c, {
++              0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
++              0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
++              0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
++              0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
++      }
++ },
+  { 0xa108108, {
+               0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
+               0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
+@@ -350,6 +385,13 @@ static const struct patch_digest phashes
+               0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
+       }
+  },
++ { 0xa108109, {
++              0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
++              0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
++              0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
++              0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
++      }
++ },
+  { 0xa20102d, {
+               0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
+               0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
+@@ -357,6 +399,13 @@ static const struct patch_digest phashes
+               0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
+       }
+  },
++ { 0xa20102e, {
++              0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
++              0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
++              0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
++              0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
++      }
++ },
+  { 0xa201210, {
+               0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
+               0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
+@@ -364,6 +413,13 @@ static const struct patch_digest phashes
+               0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
+       }
+  },
++ { 0xa201211, {
++              0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
++              0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
++              0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
++              0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
++      }
++ },
+  { 0xa404107, {
+               0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
+               0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
+@@ -371,6 +427,13 @@ static const struct patch_digest phashes
+               0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
+       }
+  },
++ { 0xa404108, {
++              0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
++              0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
++              0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
++              0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
++      }
++ },
+  { 0xa500011, {
+               0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
+               0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
+@@ -378,6 +441,13 @@ static const struct patch_digest phashes
+               0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
+       }
+  },
++ { 0xa500012, {
++              0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
++              0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
++              0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
++              0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
++      }
++ },
+  { 0xa601209, {
+               0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
+               0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
+@@ -385,6 +455,13 @@ static const struct patch_digest phashes
+               0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
+       }
+  },
++ { 0xa60120a, {
++              0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
++              0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
++              0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
++              0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
++      }
++ },
+  { 0xa704107, {
+               0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
+               0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
+@@ -392,6 +469,13 @@ static const struct patch_digest phashes
+               0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
+       }
+  },
++ { 0xa704108, {
++              0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
++              0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
++              0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
++              0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
++      }
++ },
+  { 0xa705206, {
+               0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
+               0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
+@@ -399,6 +483,13 @@ static const struct patch_digest phashes
+               0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
+       }
+  },
++ { 0xa705208, {
++              0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
++              0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
++              0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
++              0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
++      }
++ },
+  { 0xa708007, {
+               0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
+               0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
+@@ -406,6 +497,13 @@ static const struct patch_digest phashes
+               0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
+       }
+  },
++ { 0xa708008, {
++              0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
++              0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
++              0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
++              0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
++      }
++ },
+  { 0xa70c005, {
+               0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
+               0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
+@@ -413,6 +511,13 @@ static const struct patch_digest phashes
+               0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
+       }
+  },
++ { 0xa70c008, {
++              0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
++              0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
++              0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
++              0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
++      }
++ },
+  { 0xaa00116, {
+               0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
+               0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
+@@ -441,4 +546,11 @@ static const struct patch_digest phashes
+               0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
+       }
+  },
++ { 0xaa00216, {
++              0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
++              0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
++              0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
++              0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
++      }
++ },
+ };
diff --git a/queue-6.15/x86-process-move-the-buffer-clearing-before-monitor.patch b/queue-6.15/x86-process-move-the-buffer-clearing-before-monitor.patch
new file mode 100644 (file)
index 0000000..ae7ad7d
--- /dev/null
@@ -0,0 +1,109 @@
+From 873eb6f53dbc508d1448e64b0555c772804d97d5 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Mon, 14 Apr 2025 15:33:19 +0200
+Subject: x86/process: Move the buffer clearing before MONITOR
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit 8e786a85c0a3c0fffae6244733fb576eeabd9dec upstream.
+
+Move the VERW clearing before the MONITOR so that VERW doesn't disarm it
+and the machine never enters C1.
+
+Original idea by Kim Phillips <kim.phillips@amd.com>.
+
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/mwait.h |   25 +++++++++++++++----------
+ arch/x86/kernel/process.c    |   16 ++++++++++++----
+ 2 files changed, 27 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -43,8 +43,6 @@ static __always_inline void __monitorx(c
+ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+-      x86_idle_clear_cpu_buffers();
+-
+       /* "mwait %eax, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc9;"
+                    :: "a" (eax), "c" (ecx));
+@@ -97,7 +95,6 @@ static __always_inline void __mwaitx(uns
+  */
+ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-      x86_idle_clear_cpu_buffers();
+       /* "mwait %eax, %ecx;" */
+       asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+@@ -116,21 +113,29 @@ static __always_inline void __sti_mwait(
+  */
+ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+ {
++      if (need_resched())
++              return;
++
++      x86_idle_clear_cpu_buffers();
++
+       if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
+               const void *addr = &current_thread_info()->flags;
+               alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+               __monitor(addr, 0, 0);
+-              if (!need_resched()) {
+-                      if (ecx & 1) {
+-                              __mwait(eax, ecx);
+-                      } else {
+-                              __sti_mwait(eax, ecx);
+-                              raw_local_irq_disable();
+-                      }
++              if (need_resched())
++                      goto out;
++
++              if (ecx & 1) {
++                      __mwait(eax, ecx);
++              } else {
++                      __sti_mwait(eax, ecx);
++                      raw_local_irq_disable();
+               }
+       }
++
++out:
+       current_clr_polling();
+ }
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -912,16 +912,24 @@ static __init bool prefer_mwait_c1_over_
+  */
+ static __cpuidle void mwait_idle(void)
+ {
++      if (need_resched())
++              return;
++
++      x86_idle_clear_cpu_buffers();
++
+       if (!current_set_polling_and_test()) {
+               const void *addr = &current_thread_info()->flags;
+               alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
+               __monitor(addr, 0, 0);
+-              if (!need_resched()) {
+-                      __sti_mwait(0, 0);
+-                      raw_local_irq_disable();
+-              }
++              if (need_resched())
++                      goto out;
++
++              __sti_mwait(0, 0);
++              raw_local_irq_disable();
+       }
++
++out:
+       __current_clr_polling();
+ }