]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/vmscape: Enumerate VMSCAPE bug
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Thu, 14 Aug 2025 17:20:42 +0000 (10:20 -0700)
committerDave Hansen <dave.hansen@linux.intel.com>
Thu, 14 Aug 2025 17:26:20 +0000 (10:26 -0700)
The VMSCAPE vulnerability may allow a guest to cause Branch Target
Injection (BTI) in userspace hypervisors.

Kernels (both host and guest) have existing defenses against direct BTI
attacks from guests. There are also inter-process BTI mitigations which
prevent processes from attacking each other. However, the threat in this
case is to a userspace hypervisor within the same process as the attacker.

Userspace hypervisors have access to their own sensitive data like disk
encryption keys and also typically have access to all guest data. This
means guest userspace may use the hypervisor as a confused deputy to attack
sensitive guest kernel data. There are no existing mitigations for these
attacks.

Introduce X86_BUG_VMSCAPE for this vulnerability and set it on affected
Intel and AMD CPUs.

Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/common.c

index 602957dd2609ce4f533bf78e431662138b393aec..b6fa5c33c85d851e7f11cc3e7cb11e4c6e0c2dd1 100644 (file)
 #define X86_BUG_ITS                    X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
 #define X86_BUG_ITS_NATIVE_ONLY                X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
 #define X86_BUG_TSA                    X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
+#define X86_BUG_VMSCAPE                        X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
 #endif /* _ASM_X86_CPUFEATURES_H */
index 34a054181c4dc4bcb998e2395c9c7aefeb778e54..2b87c93e660963d5306fc74e57e58d0fbc2abea3 100644 (file)
@@ -1236,6 +1236,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
 #define ITS_NATIVE_ONLY        BIT(9)
 /* CPU is affected by Transient Scheduler Attacks */
 #define TSA            BIT(10)
+/* CPU is affected by VMSCAPE */
+#define VMSCAPE                BIT(11)
 
 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,          X86_STEP_MAX,      SRBDS),
@@ -1247,44 +1249,55 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPS(INTEL_BROADWELL_G,        X86_STEP_MAX,      SRBDS),
        VULNBL_INTEL_STEPS(INTEL_BROADWELL_X,        X86_STEP_MAX,      MMIO),
        VULNBL_INTEL_STEPS(INTEL_BROADWELL,          X86_STEP_MAX,      SRBDS),
-       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,                   0x5,      MMIO | RETBLEED | GDS),
-       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,          X86_STEP_MAX,      MMIO | RETBLEED | GDS | ITS),
-       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,          X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS),
-       VULNBL_INTEL_STEPS(INTEL_SKYLAKE,            X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS),
-       VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,                  0xb,      MMIO | RETBLEED | GDS | SRBDS),
-       VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,         X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | ITS),
-       VULNBL_INTEL_STEPS(INTEL_KABYLAKE,                    0xc,      MMIO | RETBLEED | GDS | SRBDS),
-       VULNBL_INTEL_STEPS(INTEL_KABYLAKE,           X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | ITS),
-       VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,       X86_STEP_MAX,      RETBLEED),
+       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,                   0x5,      MMIO | RETBLEED | GDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,          X86_STEP_MAX,      MMIO | RETBLEED | GDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,          X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_SKYLAKE,            X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,                  0xb,      MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,         X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_KABYLAKE,                    0xc,      MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_KABYLAKE,           X86_STEP_MAX,      MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,       X86_STEP_MAX,      RETBLEED | VMSCAPE),
        VULNBL_INTEL_STEPS(INTEL_ICELAKE_L,          X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
        VULNBL_INTEL_STEPS(INTEL_ICELAKE_D,          X86_STEP_MAX,      MMIO | GDS | ITS | ITS_NATIVE_ONLY),
        VULNBL_INTEL_STEPS(INTEL_ICELAKE_X,          X86_STEP_MAX,      MMIO | GDS | ITS | ITS_NATIVE_ONLY),
-       VULNBL_INTEL_STEPS(INTEL_COMETLAKE,          X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
-       VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,                 0x0,      MMIO | RETBLEED | ITS),
-       VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,        X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
+       VULNBL_INTEL_STEPS(INTEL_COMETLAKE,          X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,                 0x0,      MMIO | RETBLEED | ITS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,        X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
        VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L,        X86_STEP_MAX,      GDS | ITS | ITS_NATIVE_ONLY),
        VULNBL_INTEL_STEPS(INTEL_TIGERLAKE,          X86_STEP_MAX,      GDS | ITS | ITS_NATIVE_ONLY),
        VULNBL_INTEL_STEPS(INTEL_LAKEFIELD,          X86_STEP_MAX,      MMIO | MMIO_SBDS | RETBLEED),
        VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE,         X86_STEP_MAX,      MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
-       VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,                   ATOM,      RFDS),
-       VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,        X86_STEP_MAX,      RFDS),
-       VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,                  ATOM,      RFDS),
-       VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,       X86_STEP_MAX,      RFDS),
-       VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,       X86_STEP_MAX,      RFDS),
-       VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,      RFDS),
+       VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,                   ATOM,      RFDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ALDERLAKE,          X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,        X86_STEP_MAX,      RFDS | VMSCAPE),
+       VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,                  ATOM,      RFDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE,         X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,       X86_STEP_MAX,      RFDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,       X86_STEP_MAX,      RFDS | VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L,       X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H,        X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ARROWLAKE,          X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U,        X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M,        X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X,   X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X,    X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X,    X86_STEP_MAX,      VMSCAPE),
+       VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,      RFDS | VMSCAPE),
        VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT,       X86_STEP_MAX,      MMIO | MMIO_SBDS | RFDS),
        VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D,     X86_STEP_MAX,      MMIO | RFDS),
        VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L,     X86_STEP_MAX,      MMIO | MMIO_SBDS | RFDS),
        VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT,      X86_STEP_MAX,      RFDS),
        VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D,    X86_STEP_MAX,      RFDS),
        VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX,      RFDS),
+       VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X,   X86_STEP_MAX,      VMSCAPE),
 
        VULNBL_AMD(0x15, RETBLEED),
        VULNBL_AMD(0x16, RETBLEED),
-       VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
-       VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
-       VULNBL_AMD(0x19, SRSO | TSA),
-       VULNBL_AMD(0x1a, SRSO),
+       VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
+       VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
+       VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
+       VULNBL_AMD(0x1a, SRSO | VMSCAPE),
        {}
 };
 
@@ -1543,6 +1556,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                }
        }
 
+       /*
+        * Set the bug only on bare-metal. A nested hypervisor should already be
+        * deploying IBPB to isolate itself from nested guests.
+        */
+       if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
+           !boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               setup_force_cpu_bug(X86_BUG_VMSCAPE);
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;