]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2025 13:04:29 +0000 (15:04 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Jul 2025 13:04:29 +0000 (15:04 +0200)
added patches:
kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
kvm-x86-add-support-for-cpuid-leaf-0x80000021.patch
x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
x86-bugs-rename-mds-machinery-to-something-more-generic.patch
x86-process-move-the-buffer-clearing-before-monitor.patch

queue-5.10/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch [new file with mode: 0644]
queue-5.10/kvm-x86-add-support-for-cpuid-leaf-0x80000021.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch [new file with mode: 0644]
queue-5.10/x86-bugs-rename-mds-machinery-to-something-more-generic.patch [new file with mode: 0644]
queue-5.10/x86-process-move-the-buffer-clearing-before-monitor.patch [new file with mode: 0644]

diff --git a/queue-5.10/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch b/queue-5.10/kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
new file mode 100644 (file)
index 0000000..dfd2e2c
--- /dev/null
@@ -0,0 +1,167 @@
+From stable+bounces-161971-greg=kroah.com@vger.kernel.org Tue Jul 15 14:38:03 2025
+From: Borislav Petkov <bp@kernel.org>
+Date: Tue, 15 Jul 2025 14:37:48 +0200
+Subject: KVM: SVM: Advertise TSA CPUID bits to guests
+To: <stable@vger.kernel.org>
+Message-ID: <20250715123749.4610-5-bp@kernel.org>
+
+From: Borislav Petkov <bp@kernel.org>
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit 31272abd5974b38ba312e9cf2ec2f09f9dd7dcba upstream.
+
+Synthesize the TSA CPUID feature bits for guests. Set TSA_{SQ,L1}_NO on
+unaffected machines.
+
+  [ backporting notes: 5.10 doesn't have the KVM-only CPUID leafs so
+    allocate a separate capability leaf for CPUID_8000_0021_ECX to avoid
+    backporting the world and more. ]
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeature.h        |    5 +++--
+ arch/x86/include/asm/cpufeatures.h       |    8 ++++----
+ arch/x86/include/asm/disabled-features.h |    2 +-
+ arch/x86/include/asm/required-features.h |    2 +-
+ arch/x86/kernel/cpu/scattered.c          |    2 --
+ arch/x86/kvm/cpuid.c                     |   16 ++++++++++++++--
+ arch/x86/kvm/cpuid.h                     |    1 +
+ 7 files changed, 24 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -34,6 +34,7 @@ enum cpuid_leafs
+       CPUID_8000_001F_EAX,
+       CPUID_8000_0021_EAX,
+       CPUID_LNX_5,
++      CPUID_8000_0021_ECX,
+       NR_CPUID_WORDS,
+ };
+@@ -97,7 +98,7 @@ extern const char * const x86_bug_flags[
+          CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) ||    \
+          CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) ||    \
+          REQUIRED_MASK_CHECK                                    ||    \
+-         BUILD_BUG_ON_ZERO(NCAPINTS != 22))
++         BUILD_BUG_ON_ZERO(NCAPINTS != 23))
+ #define DISABLED_MASK_BIT_SET(feature_bit)                            \
+        ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK,  0, feature_bit) ||    \
+@@ -123,7 +124,7 @@ extern const char * const x86_bug_flags[
+          CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) ||    \
+          CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) ||    \
+          DISABLED_MASK_CHECK                                    ||    \
+-         BUILD_BUG_ON_ZERO(NCAPINTS != 22))
++         BUILD_BUG_ON_ZERO(NCAPINTS != 23))
+ #define cpu_has(c, bit)                                                       \
+       (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 :  \
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -13,7 +13,7 @@
+ /*
+  * Defines x86 CPU feature bits
+  */
+-#define NCAPINTS                      22         /* N 32-bit words worth of info */
++#define NCAPINTS                      23         /* N 32-bit words worth of info */
+ #define NBUGINTS                      2          /* N 32-bit bug flags */
+ /*
+@@ -412,9 +412,9 @@
+ #define X86_FEATURE_IBPB_BRTYPE               (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO           (20*32+29) /* "" CPU is not affected by SRSO */
+-#define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
+-#define X86_FEATURE_TSA_L1_NO          (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
+-#define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
++#define X86_FEATURE_TSA_SQ_NO          (22*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
++#define X86_FEATURE_TSA_L1_NO          (22*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
++#define X86_FEATURE_CLEAR_CPU_BUF_VM   (22*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
+ /*
+  * BUG word(s)
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -104,6 +104,6 @@
+ #define DISABLED_MASK19       0
+ #define DISABLED_MASK20       0
+ #define DISABLED_MASK21       0
+-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
++#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
+ #endif /* _ASM_X86_DISABLED_FEATURES_H */
+--- a/arch/x86/include/asm/required-features.h
++++ b/arch/x86/include/asm/required-features.h
+@@ -104,6 +104,6 @@
+ #define REQUIRED_MASK19       0
+ #define REQUIRED_MASK20       0
+ #define REQUIRED_MASK21       0
+-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
++#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23)
+ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -42,8 +42,6 @@ static const struct cpuid_bit cpuid_bits
+       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
+       { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
+       { X86_FEATURE_MBA,              CPUID_EBX,  6, 0x80000008, 0 },
+-      { X86_FEATURE_TSA_SQ_NO,        CPUID_ECX,  1, 0x80000021, 0 },
+-      { X86_FEATURE_TSA_L1_NO,        CPUID_ECX,  2, 0x80000021, 0 },
+       { 0, 0, 0, 0, 0 }
+ };
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -500,6 +500,15 @@ void kvm_set_cpu_caps(void)
+        */
+       kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
++      if (cpu_feature_enabled(X86_FEATURE_VERW_CLEAR))
++              kvm_cpu_cap_set(X86_FEATURE_VERW_CLEAR);
++
++      if (cpu_feature_enabled(X86_FEATURE_TSA_SQ_NO))
++              kvm_cpu_cap_set(X86_FEATURE_TSA_SQ_NO);
++
++      if (cpu_feature_enabled(X86_FEATURE_TSA_L1_NO))
++              kvm_cpu_cap_set(X86_FEATURE_TSA_L1_NO);
++
+       kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
+               F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
+               F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
+@@ -879,18 +888,21 @@ static inline int __do_cpuid_func(struct
+               entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
+       case 0x80000021:
+-              entry->ebx = entry->ecx = entry->edx = 0;
++              entry->ebx = entry->edx = 0;
+               /*
+                * Pass down these bits:
+                *    EAX      0      NNDBP, Processor ignores nested data breakpoints
+                *    EAX      2      LAS, LFENCE always serializing
++               *    EAX      5      VERW_CLEAR, mitigate TSA
+                *    EAX      6      NSCB, Null selector clear base
+                *
+                * Other defined bits are for MSRs that KVM does not expose:
+                *   EAX      3      SPCL, SMM page configuration lock
+                *   EAX      13     PCMSR, Prefetch control MSR
+                */
+-              entry->eax &= BIT(0) | BIT(2) | BIT(6);
++              cpuid_entry_override(entry, CPUID_8000_0021_EAX);
++              entry->eax &= BIT(0) | BIT(2) | BIT(5) | BIT(6);
++              cpuid_entry_override(entry, CPUID_8000_0021_ECX);
+               break;
+       /*Add support for Centaur's CPUID instruction*/
+       case 0xC0000000:
+--- a/arch/x86/kvm/cpuid.h
++++ b/arch/x86/kvm/cpuid.h
+@@ -64,6 +64,7 @@ static const struct cpuid_reg reverse_cp
+       [CPUID_7_EDX]         = {         7, 0, CPUID_EDX},
+       [CPUID_7_1_EAX]       = {         7, 1, CPUID_EAX},
+       [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
++      [CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
+ };
+ /*
diff --git a/queue-5.10/kvm-x86-add-support-for-cpuid-leaf-0x80000021.patch b/queue-5.10/kvm-x86-add-support-for-cpuid-leaf-0x80000021.patch
new file mode 100644 (file)
index 0000000..2b2c47a
--- /dev/null
@@ -0,0 +1,58 @@
+From stable+bounces-161970-greg=kroah.com@vger.kernel.org Tue Jul 15 14:38:02 2025
+From: Borislav Petkov <bp@kernel.org>
+Date: Tue, 15 Jul 2025 14:37:47 +0200
+Subject: KVM: x86: add support for CPUID leaf 0x80000021
+To: <stable@vger.kernel.org>
+Message-ID: <20250715123749.4610-4-bp@kernel.org>
+
+From: Borislav Petkov <bp@kernel.org>
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+Commit 58b3d12c0a860cda34ed9d2378078ea5134e6812 upstream.
+
+CPUID leaf 0x80000021 defines some features (or lack of bugs) of AMD
+processors.  Expose the ones that make sense via KVM_GET_SUPPORTED_CPUID.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/cpuid.c |   19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -810,7 +810,7 @@ static inline int __do_cpuid_func(struct
+               entry->edx = 0;
+               break;
+       case 0x80000000:
+-              entry->eax = min(entry->eax, 0x8000001f);
++              entry->eax = min(entry->eax, 0x80000021);
+               break;
+       case 0x80000001:
+               entry->ebx &= ~GENMASK(27, 16);
+@@ -875,6 +875,23 @@ static inline int __do_cpuid_func(struct
+               if (!boot_cpu_has(X86_FEATURE_SEV))
+                       entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+               break;
++      case 0x80000020:
++              entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
++              break;
++      case 0x80000021:
++              entry->ebx = entry->ecx = entry->edx = 0;
++              /*
++               * Pass down these bits:
++               *    EAX      0      NNDBP, Processor ignores nested data breakpoints
++               *    EAX      2      LAS, LFENCE always serializing
++               *    EAX      6      NSCB, Null selector clear base
++               *
++               * Other defined bits are for MSRs that KVM does not expose:
++               *   EAX      3      SPCL, SMM page configuration lock
++               *   EAX      13     PCMSR, Prefetch control MSR
++               */
++              entry->eax &= BIT(0) | BIT(2) | BIT(6);
++              break;
+       /*Add support for Centaur's CPUID instruction*/
+       case 0xC0000000:
+               /*Just support up to 0xC0000004 now*/
index 32ff78e589df4a82363356ef1f2d2a01f74b6fdb..e41cab6d59f609bd1b892cd67b0d0eecc77ae6a8 100644 (file)
@@ -201,3 +201,8 @@ hid-quirks-add-quirk-for-2-chicony-electronics-hp-5m.patch
 input-atkbd-do-not-skip-atkbd_deactivate-when-skipping-atkbd_cmd_getid.patch
 vhost-scsi-protect-vq-log_used-with-vq-mutex.patch
 x86-mm-disable-hugetlb-page-table-sharing-on-32-bit.patch
+x86-bugs-rename-mds-machinery-to-something-more-generic.patch
+x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
+kvm-x86-add-support-for-cpuid-leaf-0x80000021.patch
+kvm-svm-advertise-tsa-cpuid-bits-to-guests.patch
+x86-process-move-the-buffer-clearing-before-monitor.patch
diff --git a/queue-5.10/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch b/queue-5.10/x86-bugs-add-a-transient-scheduler-attacks-mitigation.patch
new file mode 100644 (file)
index 0000000..a9d15bb
--- /dev/null
@@ -0,0 +1,511 @@
+From stable+bounces-161969-greg=kroah.com@vger.kernel.org Tue Jul 15 14:38:01 2025
+From: Borislav Petkov <bp@kernel.org>
+Date: Tue, 15 Jul 2025 14:37:46 +0200
+Subject: x86/bugs: Add a Transient Scheduler Attacks mitigation
+To: <stable@vger.kernel.org>
+Message-ID: <20250715123749.4610-3-bp@kernel.org>
+
+From: Borislav Petkov <bp@kernel.org>
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit d8010d4ba43e9f790925375a7de100604a5e2dba upstream.
+
+Add the required features detection glue to bugs.c et all in order to
+support the TSA mitigation.
+
+Co-developed-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Kim Phillips <kim.phillips@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu |    1 
+ Documentation/admin-guide/kernel-parameters.txt    |   13 ++
+ arch/x86/Kconfig                                   |    9 +
+ arch/x86/include/asm/cpu.h                         |   13 ++
+ arch/x86/include/asm/cpufeatures.h                 |    6 +
+ arch/x86/include/asm/mwait.h                       |    2 
+ arch/x86/include/asm/nospec-branch.h               |   12 +-
+ arch/x86/kernel/cpu/amd.c                          |   58 ++++++++++
+ arch/x86/kernel/cpu/bugs.c                         |  121 +++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c                       |   14 ++
+ arch/x86/kernel/cpu/scattered.c                    |    2 
+ arch/x86/kvm/svm/vmenter.S                         |    3 
+ drivers/base/cpu.c                                 |    2 
+ include/linux/cpu.h                                |    1 
+ 14 files changed, 252 insertions(+), 5 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -514,6 +514,7 @@ What:              /sys/devices/system/cpu/vulnerabi
+               /sys/devices/system/cpu/vulnerabilities/spectre_v1
+               /sys/devices/system/cpu/vulnerabilities/spectre_v2
+               /sys/devices/system/cpu/vulnerabilities/srbds
++              /sys/devices/system/cpu/vulnerabilities/tsa
+               /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+ Date:         January 2018
+ Contact:      Linux kernel mailing list <linux-kernel@vger.kernel.org>
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -5619,6 +5619,19 @@
+                       See Documentation/admin-guide/mm/transhuge.rst
+                       for more details.
++      tsa=            [X86] Control mitigation for Transient Scheduler
++                      Attacks on AMD CPUs. Search the following in your
++                      favourite search engine for more details:
++
++                      "Technical guidance for mitigating transient scheduler
++                      attacks".
++
++                      off             - disable the mitigation
++                      on              - enable the mitigation (default)
++                      user            - mitigate only user/kernel transitions
++                      vm              - mitigate only guest/host transitions
++
++
+       tsc=            Disable clocksource stability checks for TSC.
+                       Format: <string>
+                       [x86] reliable: mark tsc clocksource as reliable, this
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2532,6 +2532,15 @@ config MITIGATION_ITS
+         disabled, mitigation cannot be enabled via cmdline.
+         See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
++config MITIGATION_TSA
++      bool "Mitigate Transient Scheduler Attacks"
++      depends on CPU_SUP_AMD
++      default y
++      help
++        Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
++        security vulnerability on AMD CPUs which can lead to forwarding of
++        invalid info to subsequent instructions and thus can affect their
++        timing and thereby cause a leakage.
+ endif
+ config ARCH_HAS_ADD_PAGES
+--- a/arch/x86/include/asm/cpu.h
++++ b/arch/x86/include/asm/cpu.h
+@@ -63,4 +63,17 @@ void init_ia32_feat_ctl(struct cpuinfo_x
+ #else
+ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+ #endif
++
++union zen_patch_rev {
++      struct {
++              __u32 rev        : 8,
++                    stepping   : 4,
++                    model      : 4,
++                    __reserved : 4,
++                    ext_model  : 4,
++                    ext_fam    : 8;
++      };
++      __u32 ucode_rev;
++};
++
+ #endif /* _ASM_X86_CPU_H */
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -406,11 +406,16 @@
+ #define X86_FEATURE_SEV_ES            (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+ #define X86_FEATURE_SME_COHERENT      (19*32+10) /* "" AMD hardware-enforced cache coherency */
++#define X86_FEATURE_VERW_CLEAR                (20*32+ 5) /* "" The memory form of VERW mitigates TSA */
+ #define X86_FEATURE_AUTOIBRS          (20*32+ 8) /* "" Automatic IBRS */
+ #define X86_FEATURE_SBPB              (20*32+27) /* "" Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE               (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO           (20*32+29) /* "" CPU is not affected by SRSO */
++#define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
++#define X86_FEATURE_TSA_L1_NO          (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
++#define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
++
+ /*
+  * BUG word(s)
+  */
+@@ -461,4 +466,5 @@
+ #define X86_BUG_IBPB_NO_RET           X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
+ #define X86_BUG_ITS                   X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY               X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
++#define X86_BUG_TSA                   X86_BUG(1*32 + 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -79,7 +79,7 @@ static inline void __mwait(unsigned long
+ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+                           unsigned long ecx)
+ {
+-      /* No MDS buffer clear as this is AMD/HYGON only */
++      /* No need for TSA buffer clearing on AMD */
+       /* "mwaitx %eax, %ebx, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xfb;"
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -197,8 +197,8 @@
+  * CFLAGS.ZF.
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+-.macro CLEAR_CPU_BUFFERS
+-      ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
++.macro __CLEAR_CPU_BUFFERS feature
++      ALTERNATIVE "jmp .Lskip_verw_\@", "", \feature
+ #ifdef CONFIG_X86_64
+       verw x86_verw_sel(%rip)
+ #else
+@@ -212,6 +212,12 @@
+ .Lskip_verw_\@:
+ .endm
++#define CLEAR_CPU_BUFFERS \
++      __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
++
++#define VM_CLEAR_CPU_BUFFERS \
++      __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
++
+ #else /* __ASSEMBLY__ */
+ #define ANNOTATE_RETPOLINE_SAFE                                       \
+@@ -431,7 +437,7 @@ static __always_inline void x86_clear_cp
+ /**
+  * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
+- * vulnerability
++ * and TSA vulnerabilities.
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -589,6 +589,62 @@ static void early_init_amd_mc(struct cpu
+ #endif
+ }
++static bool amd_check_tsa_microcode(void)
++{
++      struct cpuinfo_x86 *c = &boot_cpu_data;
++      union zen_patch_rev p;
++      u32 min_rev = 0;
++
++      p.ext_fam       = c->x86 - 0xf;
++      p.model         = c->x86_model;
++      p.ext_model     = c->x86_model >> 4;
++      p.stepping      = c->x86_stepping;
++
++      if (c->x86 == 0x19) {
++              switch (p.ucode_rev >> 8) {
++              case 0xa0011:   min_rev = 0x0a0011d7; break;
++              case 0xa0012:   min_rev = 0x0a00123b; break;
++              case 0xa0082:   min_rev = 0x0a00820d; break;
++              case 0xa1011:   min_rev = 0x0a10114c; break;
++              case 0xa1012:   min_rev = 0x0a10124c; break;
++              case 0xa1081:   min_rev = 0x0a108109; break;
++              case 0xa2010:   min_rev = 0x0a20102e; break;
++              case 0xa2012:   min_rev = 0x0a201211; break;
++              case 0xa4041:   min_rev = 0x0a404108; break;
++              case 0xa5000:   min_rev = 0x0a500012; break;
++              case 0xa6012:   min_rev = 0x0a60120a; break;
++              case 0xa7041:   min_rev = 0x0a704108; break;
++              case 0xa7052:   min_rev = 0x0a705208; break;
++              case 0xa7080:   min_rev = 0x0a708008; break;
++              case 0xa70c0:   min_rev = 0x0a70c008; break;
++              case 0xaa002:   min_rev = 0x0aa00216; break;
++              default:
++                      pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
++                               __func__, p.ucode_rev, c->microcode);
++                      return false;
++              }
++      }
++
++      if (!min_rev)
++              return false;
++
++      return c->microcode >= min_rev;
++}
++
++static void tsa_init(struct cpuinfo_x86 *c)
++{
++      if (cpu_has(c, X86_FEATURE_HYPERVISOR))
++              return;
++
++      if (c->x86 == 0x19) {
++              if (amd_check_tsa_microcode())
++                      setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
++      } else {
++              setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
++              setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
++      }
++}
++
+ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ {
+@@ -676,6 +732,8 @@ static void bsp_init_amd(struct cpuinfo_
+       }
+       resctrl_cpu_detect(c);
++
++      tsa_init(c);
+ }
+ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -48,6 +48,7 @@ static void __init srbds_select_mitigati
+ static void __init gds_select_mitigation(void);
+ static void __init srso_select_mitigation(void);
+ static void __init its_select_mitigation(void);
++static void __init tsa_select_mitigation(void);
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -171,6 +172,7 @@ void __init cpu_select_mitigations(void)
+       srso_select_mitigation();
+       gds_select_mitigation();
+       its_select_mitigation();
++      tsa_select_mitigation();
+ }
+ /*
+@@ -1933,6 +1935,94 @@ static void update_mds_branch_idle(void)
+ #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
+ #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
++#undef pr_fmt
++#define pr_fmt(fmt)   "Transient Scheduler Attacks: " fmt
++
++enum tsa_mitigations {
++      TSA_MITIGATION_NONE,
++      TSA_MITIGATION_UCODE_NEEDED,
++      TSA_MITIGATION_USER_KERNEL,
++      TSA_MITIGATION_VM,
++      TSA_MITIGATION_FULL,
++};
++
++static const char * const tsa_strings[] = {
++      [TSA_MITIGATION_NONE]           = "Vulnerable",
++      [TSA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
++      [TSA_MITIGATION_USER_KERNEL]    = "Mitigation: Clear CPU buffers: user/kernel boundary",
++      [TSA_MITIGATION_VM]             = "Mitigation: Clear CPU buffers: VM",
++      [TSA_MITIGATION_FULL]           = "Mitigation: Clear CPU buffers",
++};
++
++static enum tsa_mitigations tsa_mitigation __ro_after_init =
++      IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
++
++static int __init tsa_parse_cmdline(char *str)
++{
++      if (!str)
++              return -EINVAL;
++
++      if (!strcmp(str, "off"))
++              tsa_mitigation = TSA_MITIGATION_NONE;
++      else if (!strcmp(str, "on"))
++              tsa_mitigation = TSA_MITIGATION_FULL;
++      else if (!strcmp(str, "user"))
++              tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
++      else if (!strcmp(str, "vm"))
++              tsa_mitigation = TSA_MITIGATION_VM;
++      else
++              pr_err("Ignoring unknown tsa=%s option.\n", str);
++
++      return 0;
++}
++early_param("tsa", tsa_parse_cmdline);
++
++static void __init tsa_select_mitigation(void)
++{
++      if (tsa_mitigation == TSA_MITIGATION_NONE)
++              return;
++
++      if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
++              tsa_mitigation = TSA_MITIGATION_NONE;
++              return;
++      }
++
++      if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
++              tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
++
++      switch (tsa_mitigation) {
++      case TSA_MITIGATION_USER_KERNEL:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++              break;
++
++      case TSA_MITIGATION_VM:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++              break;
++
++      case TSA_MITIGATION_UCODE_NEEDED:
++              if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
++                      goto out;
++
++              pr_notice("Forcing mitigation on in a VM\n");
++
++              /*
++               * On the off-chance that microcode has been updated
++               * on the host, enable the mitigation in the guest just
++               * in case.
++               */
++              fallthrough;
++      case TSA_MITIGATION_FULL:
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
++              setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
++              break;
++      default:
++              break;
++      }
++
++out:
++      pr_info("%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ void cpu_bugs_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+@@ -1986,6 +2076,24 @@ void cpu_bugs_smt_update(void)
+               break;
+       }
++      switch (tsa_mitigation) {
++      case TSA_MITIGATION_USER_KERNEL:
++      case TSA_MITIGATION_VM:
++      case TSA_MITIGATION_FULL:
++      case TSA_MITIGATION_UCODE_NEEDED:
++              /*
++               * TSA-SQ can potentially lead to info leakage between
++               * SMT threads.
++               */
++              if (sched_smt_active())
++                      static_branch_enable(&cpu_buf_idle_clear);
++              else
++                      static_branch_disable(&cpu_buf_idle_clear);
++              break;
++      case TSA_MITIGATION_NONE:
++              break;
++      }
++
+       mutex_unlock(&spec_ctrl_mutex);
+ }
+@@ -2867,6 +2975,11 @@ static ssize_t srso_show_state(char *buf
+                         boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
+ }
++static ssize_t tsa_show_state(char *buf)
++{
++      return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
+ {
+@@ -2928,6 +3041,9 @@ static ssize_t cpu_show_common(struct de
+       case X86_BUG_ITS:
+               return its_show_state(buf);
++      case X86_BUG_TSA:
++              return tsa_show_state(buf);
++
+       default:
+               break;
+       }
+@@ -3012,4 +3128,9 @@ ssize_t cpu_show_indirect_target_selecti
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
+ }
++
++ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
++{
++      return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1139,6 +1139,8 @@ static const __initconst struct x86_cpu_
+ #define ITS           BIT(8)
+ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
+ #define ITS_NATIVE_ONLY       BIT(9)
++/* CPU is affected by Transient Scheduler Attacks */
++#define TSA           BIT(10)
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+       VULNBL_INTEL_STEPPINGS(IVYBRIDGE,       X86_STEPPING_ANY,               SRBDS),
+@@ -1186,7 +1188,7 @@ static const struct x86_cpu_id cpu_vuln_
+       VULNBL_AMD(0x16, RETBLEED),
+       VULNBL_AMD(0x17, RETBLEED | SRSO),
+       VULNBL_HYGON(0x18, RETBLEED | SRSO),
+-      VULNBL_AMD(0x19, SRSO),
++      VULNBL_AMD(0x19, SRSO | TSA),
+       {}
+ };
+@@ -1378,6 +1380,16 @@ static void __init cpu_set_bug_bits(stru
+                       setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
+       }
++      if (c->x86_vendor == X86_VENDOR_AMD) {
++              if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
++                  !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
++                      if (cpu_matches(cpu_vuln_blacklist, TSA) ||
++                          /* Enable bug on Zen guests to allow for live migration. */
++                          (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
++                              setup_force_cpu_bug(X86_BUG_TSA);
++              }
++      }
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -42,6 +42,8 @@ static const struct cpuid_bit cpuid_bits
+       { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
+       { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
+       { X86_FEATURE_MBA,              CPUID_EBX,  6, 0x80000008, 0 },
++      { X86_FEATURE_TSA_SQ_NO,        CPUID_ECX,  1, 0x80000021, 0 },
++      { X86_FEATURE_TSA_L1_NO,        CPUID_ECX,  2, 0x80000021, 0 },
+       { 0, 0, 0, 0, 0 }
+ };
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -77,6 +77,9 @@ SYM_FUNC_START(__svm_vcpu_run)
+       /* "POP" @vmcb to RAX. */
+       pop %_ASM_AX
++      /* Clobbers EFLAGS.ZF */
++      VM_CLEAR_CPU_BUFFERS
++
+       /* Enter guest mode */
+       sti
+ 1:    vmload %_ASM_AX
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -618,6 +618,7 @@ static DEVICE_ATTR(gather_data_sampling,
+ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
++static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+       &dev_attr_meltdown.attr,
+@@ -635,6 +636,7 @@ static struct attribute *cpu_root_vulner
+       &dev_attr_spec_rstack_overflow.attr,
+       &dev_attr_reg_file_data_sampling.attr,
+       &dev_attr_indirect_target_selection.attr,
++      &dev_attr_tsa.attr,
+       NULL
+ };
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -78,6 +78,7 @@ extern ssize_t cpu_show_reg_file_data_sa
+                                              struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+                                                 struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/queue-5.10/x86-bugs-rename-mds-machinery-to-something-more-generic.patch b/queue-5.10/x86-bugs-rename-mds-machinery-to-something-more-generic.patch
new file mode 100644 (file)
index 0000000..8462005
--- /dev/null
@@ -0,0 +1,236 @@
+From stable+bounces-161968-greg=kroah.com@vger.kernel.org Tue Jul 15 14:38:06 2025
+From: Borislav Petkov <bp@kernel.org>
+Date: Tue, 15 Jul 2025 14:37:45 +0200
+Subject: x86/bugs: Rename MDS machinery to something more generic
+To: <stable@vger.kernel.org>
+Message-ID: <20250715123749.4610-2-bp@kernel.org>
+
+From: Borislav Petkov <bp@kernel.org>
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit f9af88a3d384c8b55beb5dc5483e5da0135fadbd upstream.
+
+It will be used by other x86 mitigations.
+
+No functional changes.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst |    4 -
+ arch/x86/entry/entry.S                                          |    8 +-
+ arch/x86/include/asm/irqflags.h                                 |    4 -
+ arch/x86/include/asm/mwait.h                                    |    5 +
+ arch/x86/include/asm/nospec-branch.h                            |   29 +++++-----
+ arch/x86/kernel/cpu/bugs.c                                      |   12 ++--
+ arch/x86/kvm/vmx/vmx.c                                          |    2 
+ 7 files changed, 32 insertions(+), 32 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
+@@ -157,9 +157,7 @@ This is achieved by using the otherwise
+ combination with a microcode update. The microcode clears the affected CPU
+ buffers when the VERW instruction is executed.
+-Kernel reuses the MDS function to invoke the buffer clearing:
+-
+-      mds_clear_cpu_buffers()
++Kernel does the buffer clearing with x86_clear_cpu_buffers().
+ On MDS affected CPUs, the kernel already invokes CPU buffer clear on
+ kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
+--- a/arch/x86/entry/entry.S
++++ b/arch/x86/entry/entry.S
+@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
+ /*
+  * Define the VERW operand that is disguised as entry code so that
+- * it can be referenced with KPTI enabled. This ensure VERW can be
++ * it can be referenced with KPTI enabled. This ensures VERW can be
+  * used late in exit-to-user path after page tables are switched.
+  */
+ .pushsection .entry.text, "ax"
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_START_NOALIGN(mds_verw_sel)
++SYM_CODE_START_NOALIGN(x86_verw_sel)
+       UNWIND_HINT_EMPTY
+       ANNOTATE_NOENDBR
+       .word __KERNEL_DS
+ .align L1_CACHE_BYTES, 0xcc
+-SYM_CODE_END(mds_verw_sel);
++SYM_CODE_END(x86_verw_sel);
+ /* For KVM */
+-EXPORT_SYMBOL_GPL(mds_verw_sel);
++EXPORT_SYMBOL_GPL(x86_verw_sel);
+ .popsection
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -56,13 +56,13 @@ static __always_inline void native_irq_e
+ static inline __cpuidle void native_safe_halt(void)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       asm volatile("sti; hlt": : :"memory");
+ }
+ static inline __cpuidle void native_halt(void)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       asm volatile("hlt": : :"memory");
+ }
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -43,7 +43,7 @@ static inline void __monitorx(const void
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
+       /* "mwait %eax, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc9;"
+@@ -88,7 +88,8 @@ static inline void __mwaitx(unsigned lon
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-      mds_idle_clear_cpu_buffers();
++      x86_idle_clear_cpu_buffers();
++
+       /* "mwait %eax, %ecx;" */
+       asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+                    :: "a" (eax), "c" (ecx));
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -191,23 +191,23 @@
+ .endm
+ /*
+- * Macro to execute VERW instruction that mitigate transient data sampling
+- * attacks such as MDS. On affected systems a microcode update overloaded VERW
+- * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
+- *
++ * Macro to execute VERW insns that mitigate transient data sampling
++ * attacks such as MDS or TSA. On affected systems a microcode update
++ * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
++ * CFLAGS.ZF.
+  * Note: Only the memory operand variant of VERW clears the CPU buffers.
+  */
+ .macro CLEAR_CPU_BUFFERS
+       ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
+ #ifdef CONFIG_X86_64
+-      verw mds_verw_sel(%rip)
++      verw x86_verw_sel(%rip)
+ #else
+       /*
+        * In 32bit mode, the memory operand must be a %cs reference. The data
+        * segments may not be usable (vm86 mode), and the stack segment may not
+        * be flat (ESPFIX32).
+        */
+-      verw %cs:mds_verw_sel
++      verw %cs:x86_verw_sel
+ #endif
+ .Lskip_verw_\@:
+ .endm
+@@ -398,22 +398,22 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_
+ DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+-DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
++DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
+ DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+-extern u16 mds_verw_sel;
++extern u16 x86_verw_sel;
+ #include <asm/segment.h>
+ /**
+- * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
++ * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
+  *
+  * This uses the otherwise unused and obsolete VERW instruction in
+  * combination with microcode which triggers a CPU buffer flush when the
+  * instruction is executed.
+  */
+-static __always_inline void mds_clear_cpu_buffers(void)
++static __always_inline void x86_clear_cpu_buffers(void)
+ {
+       static const u16 ds = __KERNEL_DS;
+@@ -430,14 +430,15 @@ static __always_inline void mds_clear_cp
+ }
+ /**
+- * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
++ * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
++ * vulnerability
+  *
+  * Clear CPU buffers if the corresponding static key is enabled
+  */
+-static inline void mds_idle_clear_cpu_buffers(void)
++static __always_inline void x86_idle_clear_cpu_buffers(void)
+ {
+-      if (static_branch_likely(&mds_idle_clear))
+-              mds_clear_cpu_buffers();
++      if (static_branch_likely(&cpu_buf_idle_clear))
++              x86_clear_cpu_buffers();
+ }
+ #endif /* __ASSEMBLY__ */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -118,9 +118,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i
+ /* Control unconditional IBPB in switch_mm() */
+ DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+-/* Control MDS CPU buffer clear before idling (halt, mwait) */
+-DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+-EXPORT_SYMBOL_GPL(mds_idle_clear);
++/* Control CPU buffer clear before idling (halt, mwait) */
++DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
++EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
+ /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
+ DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
+@@ -445,7 +445,7 @@ static void __init mmio_select_mitigatio
+        * is required irrespective of SMT state.
+        */
+       if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+-              static_branch_enable(&mds_idle_clear);
++              static_branch_enable(&cpu_buf_idle_clear);
+       /*
+        * Check if the system has the right microcode.
+@@ -1922,10 +1922,10 @@ static void update_mds_branch_idle(void)
+               return;
+       if (sched_smt_active()) {
+-              static_branch_enable(&mds_idle_clear);
++              static_branch_enable(&cpu_buf_idle_clear);
+       } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
+                  (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+-              static_branch_disable(&mds_idle_clear);
++              static_branch_disable(&cpu_buf_idle_clear);
+       }
+ }
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6810,7 +6810,7 @@ static noinstr void vmx_vcpu_enter_exit(
+               vmx_l1d_flush(vcpu);
+       else if (static_branch_unlikely(&mmio_stale_data_clear) &&
+                kvm_arch_has_assigned_device(vcpu->kvm))
+-              mds_clear_cpu_buffers();
++              x86_clear_cpu_buffers();
+       vmx_disable_fb_clear(vmx);
diff --git a/queue-5.10/x86-process-move-the-buffer-clearing-before-monitor.patch b/queue-5.10/x86-process-move-the-buffer-clearing-before-monitor.patch
new file mode 100644 (file)
index 0000000..0fc8624
--- /dev/null
@@ -0,0 +1,108 @@
+From stable+bounces-161972-greg=kroah.com@vger.kernel.org Tue Jul 15 14:38:15 2025
+From: Borislav Petkov <bp@kernel.org>
+Date: Tue, 15 Jul 2025 14:37:49 +0200
+Subject: x86/process: Move the buffer clearing before MONITOR
+To: <stable@vger.kernel.org>
+Message-ID: <20250715123749.4610-6-bp@kernel.org>
+
+From: Borislav Petkov <bp@kernel.org>
+
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+
+Commit 8e786a85c0a3c0fffae6244733fb576eeabd9dec upstream.
+
+Move the VERW clearing before the MONITOR so that VERW doesn't disarm it
+and the machine never enters C1.
+
+Original idea by Kim Phillips <kim.phillips@amd.com>.
+
+Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/mwait.h |   16 +++++++++++-----
+ arch/x86/kernel/process.c    |   15 ++++++++++++---
+ 2 files changed, 23 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -43,8 +43,6 @@ static inline void __monitorx(const void
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+-      x86_idle_clear_cpu_buffers();
+-
+       /* "mwait %eax, %ecx;" */
+       asm volatile(".byte 0x0f, 0x01, 0xc9;"
+                    :: "a" (eax), "c" (ecx));
+@@ -88,7 +86,6 @@ static inline void __mwaitx(unsigned lon
+ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+ {
+-      x86_idle_clear_cpu_buffers();
+       /* "mwait %eax, %ecx;" */
+       asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+@@ -107,6 +104,11 @@ static inline void __sti_mwait(unsigned
+  */
+ static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+ {
++      if (need_resched())
++              return;
++
++      x86_idle_clear_cpu_buffers();
++
+       if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
+               if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
+                       mb();
+@@ -115,9 +117,13 @@ static inline void mwait_idle_with_hints
+               }
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+-              if (!need_resched())
+-                      __mwait(eax, ecx);
++              if (need_resched())
++                      goto out;
++
++              __mwait(eax, ecx);
+       }
++
++out:
+       current_clr_polling();
+ }
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -825,6 +825,11 @@ static int prefer_mwait_c1_over_halt(con
+  */
+ static __cpuidle void mwait_idle(void)
+ {
++      if (need_resched())
++              return;
++
++      x86_idle_clear_cpu_buffers();
++
+       if (!current_set_polling_and_test()) {
+               if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
+                       mb(); /* quirk */
+@@ -833,13 +838,17 @@ static __cpuidle void mwait_idle(void)
+               }
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+-              if (!need_resched())
+-                      __sti_mwait(0, 0);
+-              else
++              if (need_resched()) {
+                       raw_local_irq_enable();
++                      goto out;
++              }
++
++              __sti_mwait(0, 0);
+       } else {
+               raw_local_irq_enable();
+       }
++
++out:
+       __current_clr_polling();
+ }