]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:07:08 +0000 (17:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:07:08 +0000 (17:07 +0200)
added patches:
documentation-hw-vuln-add-vmscape-documentation.patch
series
x86-bugs-move-cpu_bugs_smt_update-down.patch
x86-vmscape-add-conditional-ibpb-mitigation.patch
x86-vmscape-add-old-intel-cpus-to-affected-list.patch
x86-vmscape-enable-the-mitigation.patch
x86-vmscape-enumerate-vmscape-bug.patch
x86-vmscape-warn-when-stibp-is-disabled-with-smt.patch

queue-6.12/documentation-hw-vuln-add-vmscape-documentation.patch [new file with mode: 0644]
queue-6.12/series [new file with mode: 0644]
queue-6.12/x86-bugs-move-cpu_bugs_smt_update-down.patch [new file with mode: 0644]
queue-6.12/x86-vmscape-add-conditional-ibpb-mitigation.patch [new file with mode: 0644]
queue-6.12/x86-vmscape-add-old-intel-cpus-to-affected-list.patch [new file with mode: 0644]
queue-6.12/x86-vmscape-enable-the-mitigation.patch [new file with mode: 0644]
queue-6.12/x86-vmscape-enumerate-vmscape-bug.patch [new file with mode: 0644]
queue-6.12/x86-vmscape-warn-when-stibp-is-disabled-with-smt.patch [new file with mode: 0644]

diff --git a/queue-6.12/documentation-hw-vuln-add-vmscape-documentation.patch b/queue-6.12/documentation-hw-vuln-add-vmscape-documentation.patch
new file mode 100644 (file)
index 0000000..499d8cb
--- /dev/null
@@ -0,0 +1,146 @@
+From 1fdcfc92c52eb099c5167738c47afe3c6df3467a Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:42 -0700
+Subject: Documentation/hw-vuln: Add VMSCAPE documentation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit 9969779d0803f5dcd4460ae7aca2bc3fd91bff12 upstream.
+
+VMSCAPE is a vulnerability that may allow a guest to influence the branch
+prediction in host userspace, particularly affecting hypervisors like QEMU.
+
+Add the documentation.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/index.rst   |    1 
+ Documentation/admin-guide/hw-vuln/vmscape.rst |  110 ++++++++++++++++++++++++++
+ 2 files changed, 111 insertions(+)
+ create mode 100644 Documentation/admin-guide/hw-vuln/vmscape.rst
+
+--- a/Documentation/admin-guide/hw-vuln/index.rst
++++ b/Documentation/admin-guide/hw-vuln/index.rst
+@@ -23,3 +23,4 @@ are configurable at compile, boot or run
+    gather_data_sampling
+    reg-file-data-sampling
+    indirect-target-selection
++   vmscape
+--- /dev/null
++++ b/Documentation/admin-guide/hw-vuln/vmscape.rst
+@@ -0,0 +1,110 @@
++.. SPDX-License-Identifier: GPL-2.0
++
++VMSCAPE
++=======
++
++VMSCAPE is a vulnerability that may allow a guest to influence the branch
++prediction in host userspace. It particularly affects hypervisors like QEMU.
++
++Even if a hypervisor may not have any sensitive data like disk encryption keys,
++guest-userspace may be able to attack the guest-kernel using the hypervisor as
++a confused deputy.
++
++Affected processors
++-------------------
++
++The following CPU families are affected by VMSCAPE:
++
++**Intel processors:**
++  - Skylake generation (Parts without Enhanced-IBRS)
++  - Cascade Lake generation - (Parts affected by ITS guest/host separation)
++  - Alder Lake and newer (Parts affected by BHI)
++
++Note that, BHI affected parts that use BHB clearing software mitigation e.g.
++Icelake are not vulnerable to VMSCAPE.
++
++**AMD processors:**
++  - Zen series (families 0x17, 0x19, 0x1a)
++
++** Hygon processors:**
++ - Family 0x18
++
++Mitigation
++----------
++
++Conditional IBPB
++----------------
++
++Kernel tracks when a CPU has run a potentially malicious guest and issues an
++IBPB before the first exit to userspace after VM-exit. If userspace did not run
++between VM-exit and the next VM-entry, no IBPB is issued.
++
++Note that the existing userspace mitigation against Spectre-v2 is effective in
++protecting the userspace. They are insufficient to protect the userspace VMMs
++from a malicious guest. This is because Spectre-v2 mitigations are applied at
++context switch time, while the userspace VMM can run after a VM-exit without a
++context switch.
++
++Vulnerability enumeration and mitigation is not applied inside a guest. This is
++because nested hypervisors should already be deploying IBPB to isolate
++themselves from nested guests.
++
++SMT considerations
++------------------
++
++When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be
++vulnerable to cross-thread attacks. For complete protection against VMSCAPE
++attacks in SMT environments, STIBP should be enabled.
++
++The kernel will issue a warning if SMT is enabled without adequate STIBP
++protection. Warning is not issued when:
++
++- SMT is disabled
++- STIBP is enabled system-wide
++- Intel eIBRS is enabled (which implies STIBP protection)
++
++System information and options
++------------------------------
++
++The sysfs file showing VMSCAPE mitigation status is:
++
++  /sys/devices/system/cpu/vulnerabilities/vmscape
++
++The possible values in this file are:
++
++ * 'Not affected':
++
++   The processor is not vulnerable to VMSCAPE attacks.
++
++ * 'Vulnerable':
++
++   The processor is vulnerable and no mitigation has been applied.
++
++ * 'Mitigation: IBPB before exit to userspace':
++
++   Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has
++   run a potentially malicious guest and issues an IBPB before the first
++   exit to userspace after VM-exit.
++
++ * 'Mitigation: IBPB on VMEXIT':
++
++   IBPB is issued on every VM-exit. This occurs when other mitigations like
++   RETBLEED or SRSO are already issuing IBPB on VM-exit.
++
++Mitigation control on the kernel command line
++----------------------------------------------
++
++The mitigation can be controlled via the ``vmscape=`` command line parameter:
++
++ * ``vmscape=off``:
++
++   Disable the VMSCAPE mitigation.
++
++ * ``vmscape=ibpb``:
++
++   Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y).
++
++ * ``vmscape=force``:
++
++   Force vulnerability detection and mitigation even on processors that are
++   not known to be affected.
diff --git a/queue-6.12/series b/queue-6.12/series
new file mode 100644 (file)
index 0000000..cf11a28
--- /dev/null
@@ -0,0 +1,7 @@
+documentation-hw-vuln-add-vmscape-documentation.patch
+x86-vmscape-enumerate-vmscape-bug.patch
+x86-vmscape-add-conditional-ibpb-mitigation.patch
+x86-vmscape-enable-the-mitigation.patch
+x86-bugs-move-cpu_bugs_smt_update-down.patch
+x86-vmscape-warn-when-stibp-is-disabled-with-smt.patch
+x86-vmscape-add-old-intel-cpus-to-affected-list.patch
diff --git a/queue-6.12/x86-bugs-move-cpu_bugs_smt_update-down.patch b/queue-6.12/x86-bugs-move-cpu_bugs_smt_update-down.patch
new file mode 100644 (file)
index 0000000..92584b0
--- /dev/null
@@ -0,0 +1,192 @@
+From ba9b280cd6aff0d0719dcd7e73a4d0f55643f9e1 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:43 -0700
+Subject: x86/bugs: Move cpu_bugs_smt_update() down
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit 6449f5baf9c78a7a442d64f4a61378a21c5db113 upstream.
+
+cpu_bugs_smt_update() uses global variables from different mitigations. For
+SMT updates it can't currently use vmscape_mitigation that is defined after
+it.
+
+Since cpu_bugs_smt_update() depends on many other mitigations, move it
+after all mitigations are defined. With that, it can use vmscape_mitigation
+in a moment.
+
+No functional change.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |  148 ++++++++++++++++++++++-----------------------
+ 1 file changed, 74 insertions(+), 74 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2202,80 +2202,6 @@ out:
+       pr_info("%s\n", tsa_strings[tsa_mitigation]);
+ }
+-void cpu_bugs_smt_update(void)
+-{
+-      mutex_lock(&spec_ctrl_mutex);
+-
+-      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
+-          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
+-              pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
+-
+-      switch (spectre_v2_user_stibp) {
+-      case SPECTRE_V2_USER_NONE:
+-              break;
+-      case SPECTRE_V2_USER_STRICT:
+-      case SPECTRE_V2_USER_STRICT_PREFERRED:
+-              update_stibp_strict();
+-              break;
+-      case SPECTRE_V2_USER_PRCTL:
+-      case SPECTRE_V2_USER_SECCOMP:
+-              update_indir_branch_cond();
+-              break;
+-      }
+-
+-      switch (mds_mitigation) {
+-      case MDS_MITIGATION_FULL:
+-      case MDS_MITIGATION_VMWERV:
+-              if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+-                      pr_warn_once(MDS_MSG_SMT);
+-              update_mds_branch_idle();
+-              break;
+-      case MDS_MITIGATION_OFF:
+-              break;
+-      }
+-
+-      switch (taa_mitigation) {
+-      case TAA_MITIGATION_VERW:
+-      case TAA_MITIGATION_UCODE_NEEDED:
+-              if (sched_smt_active())
+-                      pr_warn_once(TAA_MSG_SMT);
+-              break;
+-      case TAA_MITIGATION_TSX_DISABLED:
+-      case TAA_MITIGATION_OFF:
+-              break;
+-      }
+-
+-      switch (mmio_mitigation) {
+-      case MMIO_MITIGATION_VERW:
+-      case MMIO_MITIGATION_UCODE_NEEDED:
+-              if (sched_smt_active())
+-                      pr_warn_once(MMIO_MSG_SMT);
+-              break;
+-      case MMIO_MITIGATION_OFF:
+-              break;
+-      }
+-
+-      switch (tsa_mitigation) {
+-      case TSA_MITIGATION_USER_KERNEL:
+-      case TSA_MITIGATION_VM:
+-      case TSA_MITIGATION_FULL:
+-      case TSA_MITIGATION_UCODE_NEEDED:
+-              /*
+-               * TSA-SQ can potentially lead to info leakage between
+-               * SMT threads.
+-               */
+-              if (sched_smt_active())
+-                      static_branch_enable(&cpu_buf_idle_clear);
+-              else
+-                      static_branch_disable(&cpu_buf_idle_clear);
+-              break;
+-      case TSA_MITIGATION_NONE:
+-              break;
+-      }
+-
+-      mutex_unlock(&spec_ctrl_mutex);
+-}
+-
+ #undef pr_fmt
+ #define pr_fmt(fmt)   "Speculative Store Bypass: " fmt
+@@ -3025,6 +2951,80 @@ static void __init vmscape_select_mitiga
+ #undef pr_fmt
+ #define pr_fmt(fmt) fmt
++void cpu_bugs_smt_update(void)
++{
++      mutex_lock(&spec_ctrl_mutex);
++
++      if (sched_smt_active() && unprivileged_ebpf_enabled() &&
++          spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
++              pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
++
++      switch (spectre_v2_user_stibp) {
++      case SPECTRE_V2_USER_NONE:
++              break;
++      case SPECTRE_V2_USER_STRICT:
++      case SPECTRE_V2_USER_STRICT_PREFERRED:
++              update_stibp_strict();
++              break;
++      case SPECTRE_V2_USER_PRCTL:
++      case SPECTRE_V2_USER_SECCOMP:
++              update_indir_branch_cond();
++              break;
++      }
++
++      switch (mds_mitigation) {
++      case MDS_MITIGATION_FULL:
++      case MDS_MITIGATION_VMWERV:
++              if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
++                      pr_warn_once(MDS_MSG_SMT);
++              update_mds_branch_idle();
++              break;
++      case MDS_MITIGATION_OFF:
++              break;
++      }
++
++      switch (taa_mitigation) {
++      case TAA_MITIGATION_VERW:
++      case TAA_MITIGATION_UCODE_NEEDED:
++              if (sched_smt_active())
++                      pr_warn_once(TAA_MSG_SMT);
++              break;
++      case TAA_MITIGATION_TSX_DISABLED:
++      case TAA_MITIGATION_OFF:
++              break;
++      }
++
++      switch (mmio_mitigation) {
++      case MMIO_MITIGATION_VERW:
++      case MMIO_MITIGATION_UCODE_NEEDED:
++              if (sched_smt_active())
++                      pr_warn_once(MMIO_MSG_SMT);
++              break;
++      case MMIO_MITIGATION_OFF:
++              break;
++      }
++
++      switch (tsa_mitigation) {
++      case TSA_MITIGATION_USER_KERNEL:
++      case TSA_MITIGATION_VM:
++      case TSA_MITIGATION_FULL:
++      case TSA_MITIGATION_UCODE_NEEDED:
++              /*
++               * TSA-SQ can potentially lead to info leakage between
++               * SMT threads.
++               */
++              if (sched_smt_active())
++                      static_branch_enable(&cpu_buf_idle_clear);
++              else
++                      static_branch_disable(&cpu_buf_idle_clear);
++              break;
++      case TSA_MITIGATION_NONE:
++              break;
++      }
++
++      mutex_unlock(&spec_ctrl_mutex);
++}
++
+ #ifdef CONFIG_SYSFS
+ #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
diff --git a/queue-6.12/x86-vmscape-add-conditional-ibpb-mitigation.patch b/queue-6.12/x86-vmscape-add-conditional-ibpb-mitigation.patch
new file mode 100644 (file)
index 0000000..c0cb95a
--- /dev/null
@@ -0,0 +1,118 @@
+From 267a0665bc29428b64274030e2b357174f43b8dd Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:42 -0700
+Subject: x86/vmscape: Add conditional IBPB mitigation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit 2f8f173413f1cbf52660d04df92d0069c4306d25 upstream.
+
+VMSCAPE is a vulnerability that exploits insufficient branch predictor
+isolation between a guest and a userspace hypervisor (like QEMU). Existing
+mitigations already protect kernel/KVM from a malicious guest. Userspace
+can additionally be protected by flushing the branch predictors after a
+VMexit.
+
+Since it is the userspace that consumes the poisoned branch predictors,
+conditionally issue an IBPB after a VMexit and before returning to
+userspace. Workloads that frequently switch between hypervisor and
+userspace will incur the most overhead from the new IBPB.
+
+This new IBPB is not integrated with the existing IBPB sites. For
+instance, a task can use the existing speculation control prctl() to
+get an IBPB at context switch time. With this implementation, the
+IBPB is doubled up: one at context switch and another before running
+userspace.
+
+The intent is to integrate and optimize these cases post-embargo.
+
+[ dhansen: elaborate on suboptimal IBPB solution ]
+
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h   |    1 +
+ arch/x86/include/asm/entry-common.h  |    7 +++++++
+ arch/x86/include/asm/nospec-branch.h |    2 ++
+ arch/x86/kernel/cpu/bugs.c           |    8 ++++++++
+ arch/x86/kvm/x86.c                   |    9 +++++++++
+ 5 files changed, 27 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -482,6 +482,7 @@
+ #define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
+ #define X86_FEATURE_TSA_L1_NO          (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
+ #define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
++#define X86_FEATURE_IBPB_EXIT_TO_USER  (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
+ /*
+  * BUG word(s)
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,13 @@ static inline void arch_exit_to_user_mod
+        * 8 (ia32) bits.
+        */
+       choose_random_kstack_offset(rdtsc());
++
++      /* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
++      if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
++          this_cpu_read(x86_ibpb_exit_to_user)) {
++              indirect_branch_prediction_barrier();
++              this_cpu_write(x86_ibpb_exit_to_user, false);
++      }
+ }
+ #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -549,6 +549,8 @@ void alternative_msr_write(unsigned int
+ extern u64 x86_pred_cmd;
++DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
++
+ static inline void indirect_branch_prediction_barrier(void)
+ {
+       alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -60,6 +60,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+ DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
+ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
++/*
++ * Set when the CPU has run a potentially malicious guest. An IBPB will
++ * be needed to before running userspace. That IBPB will flush the branch
++ * predictor content.
++ */
++DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
++EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
++
+ u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
+ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -11070,6 +11070,15 @@ static int vcpu_enter_guest(struct kvm_v
+               wrmsrl(MSR_IA32_XFD_ERR, 0);
+       /*
++       * Mark this CPU as needing a branch predictor flush before running
++       * userspace. Must be done before enabling preemption to ensure it gets
++       * set for the CPU that actually ran the guest, and not the CPU that it
++       * may migrate to.
++       */
++      if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
++              this_cpu_write(x86_ibpb_exit_to_user, true);
++
++      /*
+        * Consume any pending interrupts, including the possible source of
+        * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
+        * An instruction is required after local_irq_enable() to fully unblock
diff --git a/queue-6.12/x86-vmscape-add-old-intel-cpus-to-affected-list.patch b/queue-6.12/x86-vmscape-add-old-intel-cpus-to-affected-list.patch
new file mode 100644 (file)
index 0000000..89e5da9
--- /dev/null
@@ -0,0 +1,49 @@
+From fe2ac6a108810a598c4495dd338d787bb4244c2b Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Fri, 29 Aug 2025 15:28:52 -0700
+Subject: x86/vmscape: Add old Intel CPUs to affected list
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit 8a68d64bb10334426834e8c273319601878e961e upstream.
+
+These old CPUs are not tested against VMSCAPE, but are likely vulnerable.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/common.c |   21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1239,15 +1239,18 @@ static const __initconst struct x86_cpu_
+ #define VMSCAPE               BIT(11)
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+-      VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,         X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL,           X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L,         X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G,         X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X,         X86_STEPPING_ANY,               MMIO),
+-      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D,       X86_STEPPING_ANY,               MMIO),
+-      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,       X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,       X86_STEPPING_ANY,               MMIO),
+-      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,         X86_STEPPING_ANY,               SRBDS),
++      VULNBL_INTEL_STEPPINGS(INTEL_SANDYBRIDGE_X,     X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_SANDYBRIDGE,       X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE_X,       X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,         X86_STEPPING_ANY,               SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL,           X86_STEPPING_ANY,               SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L,         X86_STEPPING_ANY,               SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G,         X86_STEPPING_ANY,               SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X,         X86_STEPPING_ANY,               MMIO | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D,       X86_STEPPING_ANY,               MMIO | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,       X86_STEPPING_ANY,               MMIO | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,       X86_STEPPING_ANY,               SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,         X86_STEPPING_ANY,               SRBDS | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPINGS(0x0, 0x5),        MMIO | RETBLEED | GDS | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
diff --git a/queue-6.12/x86-vmscape-enable-the-mitigation.patch b/queue-6.12/x86-vmscape-enable-the-mitigation.patch
new file mode 100644 (file)
index 0000000..f09448a
--- /dev/null
@@ -0,0 +1,239 @@
+From 5835163129303d4bb4707fd16271825049ce34e9 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:42 -0700
+Subject: x86/vmscape: Enable the mitigation
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit 556c1ad666ad90c50ec8fccb930dd5046cfbecfb upstream.
+
+Enable the previously added mitigation for VMscape. Add the cmdline
+vmscape={off|ibpb|force} and sysfs reporting.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu |    1 
+ Documentation/admin-guide/kernel-parameters.txt    |   11 +++
+ arch/x86/Kconfig                                   |    9 ++
+ arch/x86/kernel/cpu/bugs.c                         |   77 +++++++++++++++++++++
+ drivers/base/cpu.c                                 |    3 
+ include/linux/cpu.h                                |    1 
+ 6 files changed, 102 insertions(+)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -525,6 +525,7 @@ What:              /sys/devices/system/cpu/vulnerabi
+               /sys/devices/system/cpu/vulnerabilities/srbds
+               /sys/devices/system/cpu/vulnerabilities/tsa
+               /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
++              /sys/devices/system/cpu/vulnerabilities/vmscape
+ Date:         January 2018
+ Contact:      Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description:  Information about CPU vulnerabilities
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -3548,6 +3548,7 @@
+                                              srbds=off [X86,INTEL]
+                                              ssbd=force-off [ARM64]
+                                              tsx_async_abort=off [X86]
++                                             vmscape=off [X86]
+                               Exceptions:
+                                              This does not have any effect on
+@@ -7425,6 +7426,16 @@
+       vmpoff=         [KNL,S390] Perform z/VM CP command after power off.
+                       Format: <command>
++      vmscape=        [X86] Controls mitigation for VMscape attacks.
++                      VMscape attacks can leak information from a userspace
++                      hypervisor to a guest via speculative side-channels.
++
++                      off             - disable the mitigation
++                      ibpb            - use Indirect Branch Prediction Barrier
++                                        (IBPB) mitigation (default)
++                      force           - force vulnerability detection even on
++                                        unaffected processors
++
+       vsyscall=       [X86-64,EARLY]
+                       Controls the behavior of vsyscalls (i.e. calls to
+                       fixed addresses of 0xffffffffff600x00 from legacy
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2769,6 +2769,15 @@ config MITIGATION_TSA
+         security vulnerability on AMD CPUs which can lead to forwarding of
+         invalid info to subsequent instructions and thus can affect their
+         timing and thereby cause a leakage.
++
++config MITIGATION_VMSCAPE
++      bool "Mitigate VMSCAPE"
++      depends on KVM
++      default y
++      help
++        Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
++        vulnerability on Intel and AMD CPUs that may allow a guest to do
++        Spectre v2 style attacks on userspace hypervisor.
+ endif
+ config ARCH_HAS_ADD_PAGES
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -51,6 +51,7 @@ static void __init srso_select_mitigatio
+ static void __init gds_select_mitigation(void);
+ static void __init its_select_mitigation(void);
+ static void __init tsa_select_mitigation(void);
++static void __init vmscape_select_mitigation(void);
+ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
+ u64 x86_spec_ctrl_base;
+@@ -194,6 +195,7 @@ void __init cpu_select_mitigations(void)
+       gds_select_mitigation();
+       its_select_mitigation();
+       tsa_select_mitigation();
++      vmscape_select_mitigation();
+ }
+ /*
+@@ -2959,6 +2961,68 @@ out:
+ }
+ #undef pr_fmt
++#define pr_fmt(fmt)   "VMSCAPE: " fmt
++
++enum vmscape_mitigations {
++      VMSCAPE_MITIGATION_NONE,
++      VMSCAPE_MITIGATION_AUTO,
++      VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
++      VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
++};
++
++static const char * const vmscape_strings[] = {
++      [VMSCAPE_MITIGATION_NONE]               = "Vulnerable",
++      /* [VMSCAPE_MITIGATION_AUTO] */
++      [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER]  = "Mitigation: IBPB before exit to userspace",
++      [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT]     = "Mitigation: IBPB on VMEXIT",
++};
++
++static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
++      IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
++
++static int __init vmscape_parse_cmdline(char *str)
++{
++      if (!str)
++              return -EINVAL;
++
++      if (!strcmp(str, "off")) {
++              vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
++      } else if (!strcmp(str, "ibpb")) {
++              vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
++      } else if (!strcmp(str, "force")) {
++              setup_force_cpu_bug(X86_BUG_VMSCAPE);
++              vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
++      } else {
++              pr_err("Ignoring unknown vmscape=%s option.\n", str);
++      }
++
++      return 0;
++}
++early_param("vmscape", vmscape_parse_cmdline);
++
++static void __init vmscape_select_mitigation(void)
++{
++      if (cpu_mitigations_off() ||
++          !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
++          !boot_cpu_has(X86_FEATURE_IBPB)) {
++              vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
++              return;
++      }
++
++      if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
++              vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
++
++      if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
++          srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
++              vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
++
++      if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
++              setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
++
++      pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
++}
++
++#undef pr_fmt
+ #define pr_fmt(fmt) fmt
+ #ifdef CONFIG_SYSFS
+@@ -3204,6 +3268,11 @@ static ssize_t tsa_show_state(char *buf)
+       return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
+ }
++static ssize_t vmscape_show_state(char *buf)
++{
++      return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
++}
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+                              char *buf, unsigned int bug)
+ {
+@@ -3268,6 +3337,9 @@ static ssize_t cpu_show_common(struct de
+       case X86_BUG_TSA:
+               return tsa_show_state(buf);
++      case X86_BUG_VMSCAPE:
++              return vmscape_show_state(buf);
++
+       default:
+               break;
+       }
+@@ -3357,6 +3429,11 @@ ssize_t cpu_show_tsa(struct device *dev,
+ {
+       return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
+ }
++
++ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
++{
++      return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
++}
+ #endif
+ void __warn_thunk(void)
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -601,6 +601,7 @@ CPU_SHOW_VULN_FALLBACK(gds);
+ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
+ CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+ CPU_SHOW_VULN_FALLBACK(tsa);
++CPU_SHOW_VULN_FALLBACK(vmscape);
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+@@ -618,6 +619,7 @@ static DEVICE_ATTR(gather_data_sampling,
+ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
+ static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+ static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
++static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+       &dev_attr_meltdown.attr,
+@@ -636,6 +638,7 @@ static struct attribute *cpu_root_vulner
+       &dev_attr_reg_file_data_sampling.attr,
+       &dev_attr_indirect_target_selection.attr,
+       &dev_attr_tsa.attr,
++      &dev_attr_vmscape.attr,
+       NULL
+ };
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -80,6 +80,7 @@ extern ssize_t cpu_show_reg_file_data_sa
+ extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
+                                                 struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/queue-6.12/x86-vmscape-enumerate-vmscape-bug.patch b/queue-6.12/x86-vmscape-enumerate-vmscape-bug.patch
new file mode 100644 (file)
index 0000000..4624129
--- /dev/null
@@ -0,0 +1,146 @@
+From 344da43bba2d6d16c24ae1dae84cd06a5e5847b8 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:42 -0700
+Subject: x86/vmscape: Enumerate VMSCAPE bug
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit a508cec6e5215a3fbc7e73ae86a5c5602187934d upstream.
+
+The VMSCAPE vulnerability may allow a guest to cause Branch Target
+Injection (BTI) in userspace hypervisors.
+
+Kernels (both host and guest) have existing defenses against direct BTI
+attacks from guests. There are also inter-process BTI mitigations which
+prevent processes from attacking each other. However, the threat in this
+case is to a userspace hypervisor within the same process as the attacker.
+
+Userspace hypervisors have access to their own sensitive data like disk
+encryption keys and also typically have access to all guest data. This
+means guest userspace may use the hypervisor as a confused deputy to attack
+sensitive guest kernel data. There are no existing mitigations for these
+attacks.
+
+Introduce X86_BUG_VMSCAPE for this vulnerability and set it on affected
+Intel and AMD CPUs.
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h |    1 
+ arch/x86/kernel/cpu/common.c       |   63 ++++++++++++++++++++++++-------------
+ 2 files changed, 43 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -536,4 +536,5 @@
+ #define X86_BUG_ITS                   X86_BUG(1*32 + 5) /* "its" CPU is affected by Indirect Target Selection */
+ #define X86_BUG_ITS_NATIVE_ONLY               X86_BUG(1*32 + 6) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
+ #define X86_BUG_TSA                   X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
++#define X86_BUG_VMSCAPE                       X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1235,6 +1235,8 @@ static const __initconst struct x86_cpu_
+ #define ITS_NATIVE_ONLY       BIT(9)
+ /* CPU is affected by Transient Scheduler Attacks */
+ #define TSA           BIT(10)
++/* CPU is affected by VMSCAPE */
++#define VMSCAPE               BIT(11)
+ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
+       VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE,         X86_STEPPING_ANY,               SRBDS),
+@@ -1246,43 +1248,54 @@ static const struct x86_cpu_id cpu_vuln_
+       VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G,       X86_STEPPING_ANY,               SRBDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X,       X86_STEPPING_ANY,               MMIO),
+       VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL,         X86_STEPPING_ANY,               SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPINGS(0x0, 0x5),        MMIO | RETBLEED | GDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | ITS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE,           X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,        X86_STEPPINGS(0x0, 0xb),        MMIO | RETBLEED | GDS | SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,        X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | ITS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,          X86_STEPPINGS(0x0, 0xc),        MMIO | RETBLEED | GDS | SRBDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,          X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | ITS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L,      X86_STEPPING_ANY,               RETBLEED),
++      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPINGS(0x0, 0x5),        MMIO | RETBLEED | GDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | ITS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L,         X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE,           X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,        X86_STEPPINGS(0x0, 0xb),        MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L,        X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,          X86_STEPPINGS(0x0, 0xc),        MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE,          X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L,      X86_STEPPING_ANY,               RETBLEED | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L,         X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+       VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D,         X86_STEPPING_ANY,               MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+       VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X,         X86_STEPPING_ANY,               MMIO | GDS | ITS | ITS_NATIVE_ONLY),
+-      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,         X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,       X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED | ITS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
++      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE,         X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,       X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED | ITS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L,       X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L,       X86_STEPPING_ANY,               GDS | ITS | ITS_NATIVE_ONLY),
+       VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE,         X86_STEPPING_ANY,               GDS | ITS | ITS_NATIVE_ONLY),
+       VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD,         X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE,        X86_STEPPING_ANY,               MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
+-      VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE,         X86_STEPPING_ANY,               RFDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L,       X86_STEPPING_ANY,               RFDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE,        X86_STEPPING_ANY,               RFDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P,      X86_STEPPING_ANY,               RFDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S,      X86_STEPPING_ANY,               RFDS),
+-      VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT,    X86_STEPPING_ANY,               RFDS),
++      VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE,         X86_STEPPING_ANY,               RFDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L,       X86_STEPPING_ANY,               RFDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE,        X86_STEPPING_ANY,               RFDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P,      X86_STEPPING_ANY,               RFDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S,      X86_STEPPING_ANY,               RFDS | VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_METEORLAKE_L,      X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE_H,       X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE,         X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_ARROWLAKE_U,       X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_LUNARLAKE_M,       X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_SAPPHIRERAPIDS_X,  X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_GRANITERAPIDS_X,   X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_EMERALDRAPIDS_X,   X86_STEPPING_ANY,               VMSCAPE),
++      VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT,    X86_STEPPING_ANY,               RFDS | VMSCAPE),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT,      X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RFDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D,    X86_STEPPING_ANY,               MMIO | RFDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L,    X86_STEPPING_ANY,               MMIO | MMIO_SBDS | RFDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT,     X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D,   X86_STEPPING_ANY,               RFDS),
+       VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY,              RFDS),
++      VULNBL_INTEL_STEPPINGS(INTEL_ATOM_CRESTMONT_X,  X86_STEPPING_ANY,               VMSCAPE),
+       VULNBL_AMD(0x15, RETBLEED),
+       VULNBL_AMD(0x16, RETBLEED),
+-      VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
+-      VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
+-      VULNBL_AMD(0x19, SRSO | TSA),
++      VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
++      VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
++      VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
++      VULNBL_AMD(0x1a, SRSO | VMSCAPE),
++
+       {}
+ };
+@@ -1502,6 +1515,14 @@ static void __init cpu_set_bug_bits(stru
+               }
+       }
++      /*
++       * Set the bug only on bare-metal. A nested hypervisor should already be
++       * deploying IBPB to isolate itself from nested guests.
++       */
++      if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
++          !boot_cpu_has(X86_FEATURE_HYPERVISOR))
++              setup_force_cpu_bug(X86_BUG_VMSCAPE);
++
+       if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
+               return;
diff --git a/queue-6.12/x86-vmscape-warn-when-stibp-is-disabled-with-smt.patch b/queue-6.12/x86-vmscape-warn-when-stibp-is-disabled-with-smt.patch
new file mode 100644 (file)
index 0000000..2f03d1b
--- /dev/null
@@ -0,0 +1,70 @@
+From 0f57e886bde4f0303c3410935ea0a66ce6d66029 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Thu, 14 Aug 2025 10:20:43 -0700
+Subject: x86/vmscape: Warn when STIBP is disabled with SMT
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+Commit b7cc9887231526ca4fa89f3fa4119e47c2dc7b1e upstream.
+
+Cross-thread attacks are generally harder as they require the victim to be
+co-located on a core. However, with VMSCAPE the adversary targets belong to
+the same guest execution, that are more likely to get co-located. In
+particular, a thread that is currently executing userspace hypervisor
+(after the IBPB) may still be targeted by a guest execution from a sibling
+thread.
+
+Issue a warning about the potential risk, except when:
+
+- SMT is disabled
+- STIBP is enabled system-wide
+- Intel eIBRS is enabled (which implies STIBP protection)
+
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |   24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2951,6 +2951,8 @@ static void __init vmscape_select_mitiga
+ #undef pr_fmt
+ #define pr_fmt(fmt) fmt
++#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
++
+ void cpu_bugs_smt_update(void)
+ {
+       mutex_lock(&spec_ctrl_mutex);
+@@ -3022,6 +3024,28 @@ void cpu_bugs_smt_update(void)
+               break;
+       }
++      switch (vmscape_mitigation) {
++      case VMSCAPE_MITIGATION_NONE:
++      case VMSCAPE_MITIGATION_AUTO:
++              break;
++      case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
++      case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
++              /*
++               * Hypervisors can be attacked across-threads, warn for SMT when
++               * STIBP is not already enabled system-wide.
++               *
++               * Intel eIBRS (!AUTOIBRS) implies STIBP on.
++               */
++              if (!sched_smt_active() ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
++                  spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
++                  (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
++                   !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
++                      break;
++              pr_warn_once(VMSCAPE_MSG_SMT);
++              break;
++      }
++
+       mutex_unlock(&spec_ctrl_mutex);
+ }