]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/vmscape: Add conditional IBPB mitigation
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Thu, 14 Aug 2025 17:20:42 +0000 (10:20 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Sep 2025 15:19:15 +0000 (17:19 +0200)
Commit 2f8f173413f1cbf52660d04df92d0069c4306d25 upstream.

VMSCAPE is a vulnerability that exploits insufficient branch predictor
isolation between a guest and a userspace hypervisor (like QEMU). Existing
mitigations already protect kernel/KVM from a malicious guest. Userspace
can additionally be protected by flushing the branch predictors after a
VMexit.

Since it is the userspace that consumes the poisoned branch predictors,
conditionally issue an IBPB after a VMexit and before returning to
userspace. Workloads that frequently switch between hypervisor and
userspace will incur the most overhead from the new IBPB.

This new IBPB is not integrated with the existing IBPB sites. For
instance, a task can use the existing speculation control prctl() to
get an IBPB at context switch time. With this implementation, the
IBPB is doubled up: one at context switch and another before running
userspace.

The intent is to integrate and optimize these cases post-embargo.

[ dhansen: elaborate on suboptimal IBPB solution ]

Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/entry-common.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/x86.c

index 83a5eaff33b6e846207c607e8e1f2ca6d13be3fd..f86e100cf56baeb87a56269981b63fd7899188c8 100644 (file)
 #define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
 #define X86_FEATURE_TSA_L1_NO          (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
 #define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
+#define X86_FEATURE_IBPB_EXIT_TO_USER  (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
 
 /*
  * BUG word(s)
index ebdf5c97f53a8172f59366d6b3b48a590029c1e4..7dedda82f4992dfeca5b65595b6e54a646127097 100644 (file)
@@ -83,6 +83,13 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
         * 8 (ia32) bits.
         */
        choose_random_kstack_offset(rdtsc());
+
+       /* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
+       if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
+           this_cpu_read(x86_ibpb_exit_to_user)) {
+               indirect_branch_prediction_barrier();
+               this_cpu_write(x86_ibpb_exit_to_user, false);
+       }
 }
 #define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
 
index c77a65a3e5f14a278986db394c8fadeb29220197..818a5913f21950870c7ad6f6419d5f51104f7f07 100644 (file)
@@ -394,6 +394,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
 
 extern u64 x86_pred_cmd;
 
+DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
+
 static inline void indirect_branch_prediction_barrier(void)
 {
        alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
index 4fbb5b15ab7516b4c6f791603cbf2c89e71e5416..32995b337ba3a4b891c2be5da9405d4bf3bd0cc5 100644 (file)
@@ -59,6 +59,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
 
+/*
+ * Set when the CPU has run a potentially malicious guest. An IBPB will
+ * be needed to before running userspace. That IBPB will flush the branch
+ * predictor content.
+ */
+DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
+EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
+
 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
 EXPORT_SYMBOL_GPL(x86_pred_cmd);
 
index 57ba9071841ea3433ef27a2065ec119723bccc88..11ca05d830e725a71844dbd3881fae6f4d751aad 100644 (file)
@@ -10925,6 +10925,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (vcpu->arch.guest_fpu.xfd_err)
                wrmsrl(MSR_IA32_XFD_ERR, 0);
 
+       /*
+        * Mark this CPU as needing a branch predictor flush before running
+        * userspace. Must be done before enabling preemption to ensure it gets
+        * set for the CPU that actually ran the guest, and not the CPU that it
+        * may migrate to.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
+               this_cpu_write(x86_ibpb_exit_to_user, true);
+
        /*
         * Consume any pending interrupts, including the possible source of
         * VM-Exit on SVM and any ticks that occur between VM-Exit and now.