]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/vmscape: Enable the mitigation
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Thu, 14 Aug 2025 17:20:42 +0000 (10:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:16:06 +0000 (17:16 +0200)
commit 556c1ad666ad90c50ec8fccb930dd5046cfbecfb upstream.

Enable the previously added mitigation for VMscape. Add the cmdline
vmscape={off|ibpb|force} and sysfs reporting.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Amit Shah <amit.shah@amd.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
arch/x86/Kconfig
arch/x86/kernel/cpu/bugs.c
drivers/base/cpu.c
include/linux/cpu.h

index bf2b83e9c07d1f16884268d42c7272825d8778b3..d842fe4f8a6198450c8fb58ae4743e3f13815878 100644 (file)
@@ -516,6 +516,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/srbds
                /sys/devices/system/cpu/vulnerabilities/tsa
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
+               /sys/devices/system/cpu/vulnerabilities/vmscape
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Information about CPU vulnerabilities
index bbe6c23a5778603cfc321ec52443d35d952ce1c7..bac4b1493222ac3fa1224117d38856932a6c3421 100644 (file)
                                               ssbd=force-off [ARM64]
                                               nospectre_bhb [ARM64]
                                               tsx_async_abort=off [X86]
+                                              vmscape=off [X86]
 
                                Exceptions:
                                               This does not have any effect on
        vmpoff=         [KNL,S390] Perform z/VM CP command after power off.
                        Format: <command>
 
+       vmscape=        [X86] Controls mitigation for VMscape attacks.
+                       VMscape attacks can leak information from a userspace
+                       hypervisor to a guest via speculative side-channels.
+
+                       off             - disable the mitigation
+                       ibpb            - use Indirect Branch Prediction Barrier
+                                         (IBPB) mitigation (default)
+                       force           - force vulnerability detection even on
+                                         unaffected processors
+
        vsyscall=       [X86-64]
                        Controls the behavior of vsyscalls (i.e. calls to
                        fixed addresses of 0xffffffffff600x00 from legacy
index 77efb16cbc2424ba6be2b0664db6f2d7fdd616c2..43e5bc827f48866717624ed3958bf62de956723c 100644 (file)
@@ -2541,6 +2541,15 @@ config MITIGATION_TSA
          security vulnerability on AMD CPUs which can lead to forwarding of
          invalid info to subsequent instructions and thus can affect their
          timing and thereby cause a leakage.
+
+config MITIGATION_VMSCAPE
+       bool "Mitigate VMSCAPE"
+       depends on KVM
+       default y
+       help
+         Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
+         vulnerability on Intel and AMD CPUs that may allow a guest to do
+         Spectre v2 style attacks on userspace hypervisor.
 endif
 
 config ARCH_HAS_ADD_PAGES
index 423e7c67e20be45bee626e1372464ade75e2e973..a70af8c62939b7d2c7b46794fc01f13cf1f8bb4e 100644 (file)
@@ -49,6 +49,7 @@ static void __init gds_select_mitigation(void);
 static void __init srso_select_mitigation(void);
 static void __init its_select_mitigation(void);
 static void __init tsa_select_mitigation(void);
+static void __init vmscape_select_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
 u64 x86_spec_ctrl_base;
@@ -180,6 +181,7 @@ void __init cpu_select_mitigations(void)
        gds_select_mitigation();
        its_select_mitigation();
        tsa_select_mitigation();
+       vmscape_select_mitigation();
 }
 
 /*
@@ -2759,6 +2761,68 @@ pred_cmd:
                x86_pred_cmd = PRED_CMD_SBPB;
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "VMSCAPE: " fmt
+
+enum vmscape_mitigations {
+       VMSCAPE_MITIGATION_NONE,
+       VMSCAPE_MITIGATION_AUTO,
+       VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
+       VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
+};
+
+static const char * const vmscape_strings[] = {
+       [VMSCAPE_MITIGATION_NONE]               = "Vulnerable",
+       /* [VMSCAPE_MITIGATION_AUTO] */
+       [VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER]  = "Mitigation: IBPB before exit to userspace",
+       [VMSCAPE_MITIGATION_IBPB_ON_VMEXIT]     = "Mitigation: IBPB on VMEXIT",
+};
+
+static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
+       IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
+
+static int __init vmscape_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off")) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+       } else if (!strcmp(str, "ibpb")) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+       } else if (!strcmp(str, "force")) {
+               setup_force_cpu_bug(X86_BUG_VMSCAPE);
+               vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
+       } else {
+               pr_err("Ignoring unknown vmscape=%s option.\n", str);
+       }
+
+       return 0;
+}
+early_param("vmscape", vmscape_parse_cmdline);
+
+static void __init vmscape_select_mitigation(void)
+{
+       if (cpu_mitigations_off() ||
+           !boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
+           !boot_cpu_has(X86_FEATURE_IBPB)) {
+               vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
+               return;
+       }
+
+       if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
+
+       if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
+           srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
+               vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
+
+       if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
+               setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
+
+       pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
 #undef pr_fmt
 #define pr_fmt(fmt) fmt
 
@@ -2987,6 +3051,11 @@ static ssize_t tsa_show_state(char *buf)
        return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
 }
 
+static ssize_t vmscape_show_state(char *buf)
+{
+       return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -3051,6 +3120,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_TSA:
                return tsa_show_state(buf);
 
+       case X86_BUG_VMSCAPE:
+               return vmscape_show_state(buf);
+
        default:
                break;
        }
@@ -3140,4 +3212,9 @@ ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *bu
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
 }
+
+ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
+}
 #endif
index 377d8837e2b128b55761adfd24a7efb89367ed82..91c69d295793d199033c9672c54f70613d6486d5 100644 (file)
@@ -607,6 +607,10 @@ ssize_t __weak cpu_show_tsa(struct device *dev, struct device_attribute *attr, c
 {
        return sysfs_emit(buf, "Not affected\n");
 }
+ssize_t __weak cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sysfs_emit(buf, "Not affected\n");
+}
 
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -624,6 +628,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
 static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
 static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
+static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -642,6 +647,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_reg_file_data_sampling.attr,
        &dev_attr_indirect_target_selection.attr,
        &dev_attr_tsa.attr,
+       &dev_attr_vmscape.attr,
        NULL
 };
 
index f00bbb174a2e73c75ff6003a9c0844194cd36e29..1af83e3a6f6fd12449666631d83ff3e897a2ccb7 100644 (file)
@@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
 extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
                                                  struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,