]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/vmscape: Enable the mitigation
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Thu, 14 Aug 2025 17:20:42 +0000 (10:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:21:46 +0000 (17:21 +0200)
Commit 556c1ad666ad90c50ec8fccb930dd5046cfbecfb upstream.

Enable the previously added mitigation for VMscape. Add the cmdline
vmscape={off|ibpb|force} and sysfs reporting.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
arch/x86/Kconfig
arch/x86/kernel/cpu/bugs.c
drivers/base/cpu.c
include/linux/cpu.h

index 53755b2021ed012a98ff3c59533e7520b562947b..28f062dc25e1f4218fe068331a36b2513ca92eec 100644 (file)
@@ -525,6 +525,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/srbds
                /sys/devices/system/cpu/vulnerabilities/tsa
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+               /sys/devices/system/cpu/vulnerabilities/vmscape
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index f402bbaccc8aa3184d74e027a3047b8f8b608846..8724c2c580b887585525199e3603df3ff49fac70 100644 (file)
                                               srbds=off [X86,INTEL]
                                               ssbd=force-off [ARM64]
                                               tsx_async_abort=off [X86]
+                                              vmscape=off [X86]
 
                                Exceptions:
                                               This does not have any effect on
        vmpoff=         [KNL,S390] Perform z/VM CP command after power off.
                        Format: <command>
 
+       vmscape=        [X86] Controls mitigation for VMscape attacks.
+                       VMscape attacks can leak information from a userspace
+                       hypervisor to a guest via speculative side-channels.
+
+                       off             - disable the mitigation
+                       ibpb            - use Indirect Branch Prediction Barrier
+                                         (IBPB) mitigation (default)
+                       force           - force vulnerability detection even on
+                                         unaffected processors
+
        vsyscall=       [X86-64,EARLY]
                        Controls the behavior of vsyscalls (i.e. calls to
                        fixed addresses of 0xffffffffff600x00 from legacy
index 2df0ae2a5e5d0ec79c2f44e06f4d4d4dcf5bdf6a..df14d0e67ea0cf72925496f47c32c2d273eadbef 100644 (file)
@@ -2769,6 +2769,15 @@ config MITIGATION_TSA
          security vulnerability on AMD CPUs which can lead to forwarding of
          invalid info to subsequent instructions and thus can affect their
          timing and thereby cause a leakage.
+
+config MITIGATION_VMSCAPE
+       bool "Mitigate VMSCAPE"
+       depends on KVM
+       default y
+       help
+         Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
+         vulnerability on Intel and AMD CPUs that may allow a guest to do
+         Spectre v2 style attacks on userspace hypervisor.
 endif
 
 config ARCH_HAS_ADD_PAGES
index 27542dcc746d72a7be2827397eb12fdf9c079a9d..cc035afd25743d2dc260f575ed49f1c6967ea556 100644 (file)
@@ -51,6 +51,7 @@ static void __init srso_select_mitigation(void);
 static void __init gds_select_mitigation(void);
 static void __init its_select_mitigation(void);
 static void __init tsa_select_mitigation(void);
+static void __init vmscape_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
 u64 x86_spec_ctrl_base;
@@ -194,6 +195,7 @@ void __init cpu_select_mitigations(void)
        gds_select_mitigation();
        its_select_mitigation();
        tsa_select_mitigation();
+       vmscape_select_mitigation();
 }
 
 /*
@@ -2958,6 +2960,68 @@ out:
        pr_info("%s\n", srso_strings[srso_mitigation]);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "VMSCAPE: " fmt
+
+enum vmscape_mitigations {
+       VMSCAPE_MITIGATION_NONE,
+       VMSCAPE_MITIGATION_AUTO,
+       VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
+       VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
+};
+
+static const char * const vmscape_strings[] = {
+       [VMSCAPE_MITIGATION_NONE]               = "Vulnerable",
+       /* [VMSCAPE_MITIGATION_AUTO] */
+       [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER]  = "Mitigation: IBPB before exit to userspace",
+       [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT]     = "Mitigation: IBPB on VMEXIT",
+};
+
+static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
+       IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
+
+static int __init vmscape_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off")) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+       } else if (!strcmp(str, "ibpb")) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+       } else if (!strcmp(str, "force")) {
+               setup_force_cpu_bug(X86_BUG_VMSCAPE);
+               vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
+       } else {
+               pr_err("Ignoring unknown vmscape=%s option.\n", str);
+       }
+
+       return 0;
+}
+early_param("vmscape", vmscape_parse_cmdline);
+
+static void __init vmscape_select_mitigation(void)
+{
+       if (cpu_mitigations_off() ||
+           !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
+           !boot_cpu_has(X86_FEATURE_IBPB)) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+               return;
+       }
+
+       if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+
+       if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
+           srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
+
+       if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
+               setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
+
+       pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
 #undef pr_fmt
 #define pr_fmt(fmt) fmt
 
@@ -3204,6 +3268,11 @@ static ssize_t tsa_show_state(char *buf)
        return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
 }
 
+static ssize_t vmscape_show_state(char *buf)
+{
+       return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -3268,6 +3337,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_TSA:
                return tsa_show_state(buf);
 
+       case X86_BUG_VMSCAPE:
+               return vmscape_show_state(buf);
+
        default:
                break;
        }
@@ -3357,6 +3429,11 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
 }
+
+ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
+}
 #endif
 
 void __warn_thunk(void)
index 02870e70ed59556c89a493d0c93de81bcccfc851..ee52b106a95534d6095fb5c4ebd03e6f4bc518e4 100644 (file)
@@ -601,6 +601,7 @@ CPU_SHOW_VULN_FALLBACK(gds);
 CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
 CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
 CPU_SHOW_VULN_FALLBACK(tsa);
+CPU_SHOW_VULN_FALLBACK(vmscape);
 
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -618,6 +619,7 @@ static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
 static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
 static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -636,6 +638,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_reg_file_data_sampling.attr,
        &dev_attr_indirect_target_selection.attr,
        &dev_attr_tsa.attr,
+       &dev_attr_vmscape.attr,
        NULL
 };
 
index 4342b5694909520c45760febeb44fca40f6bda7c..e682c75a3bb025de1c32491bd6e2d846fff6ab86 100644 (file)
@@ -80,6 +80,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
 extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
                                                  struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,