]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/bugs: Move the X86_FEATURE_USE_IBPB check into callers
authorYosry Ahmed <yosry.ahmed@linux.dev>
Thu, 27 Feb 2025 01:27:07 +0000 (01:27 +0000)
committerIngo Molnar <mingo@kernel.org>
Thu, 27 Feb 2025 09:57:20 +0000 (10:57 +0100)
indirect_branch_prediction_barrier() only performs the MSR write if
X86_FEATURE_USE_IBPB is set, using alternative_msr_write(). In
preparation for removing X86_FEATURE_USE_IBPB, move the feature check
into the callers so that they can be addressed one-by-one, and use
X86_FEATURE_IBPB instead to guard the MSR write.

Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Acked-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20250227012712.3193063-2-yosry.ahmed@linux.dev
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/mm/tlb.c

index 7e8bf78c03d5d045889aa1449aa3e9a38f4742d7..7cbb76a2434b9f5b9cbee2e351258ec31579ef7f 100644 (file)
@@ -515,7 +515,7 @@ extern u64 x86_pred_cmd;
 
 static inline void indirect_branch_prediction_barrier(void)
 {
-       alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
+       alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_IBPB);
 }
 
 /* The Intel SPEC CTRL MSR base value cache */
index 1d7afc40f2272bcf2838f466dcc6254d522eab1a..754150fc05784e539c0592dfe2f48d4a3ed3f4c2 100644 (file)
@@ -2272,7 +2272,7 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (ctrl == PR_SPEC_FORCE_DISABLE)
                        task_set_spec_ib_force_disable(task);
                task_update_spec_tif(task);
-               if (task == current)
+               if (task == current && cpu_feature_enabled(X86_FEATURE_USE_IBPB))
                        indirect_branch_prediction_barrier();
                break;
        default:
index 77ab66c5bb962aeb0c77ce5b3b27941dbb0dc649..57222c3b56592bd03d5c46a83685539ec5512efd 100644 (file)
@@ -1565,7 +1565,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (sd->current_vmcb != svm->vmcb) {
                sd->current_vmcb = svm->vmcb;
 
-               if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT))
+               if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) &&
+                   cpu_feature_enabled(X86_FEATURE_USE_IBPB))
                        indirect_branch_prediction_barrier();
        }
        if (kvm_vcpu_apicv_active(vcpu))
index 8a7af02d466e9ff738c255533c09e1d34cb7c452..1df427a822dd7fb78298a7ec191886323dbd3368 100644 (file)
@@ -5026,7 +5026,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
         * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
         * separate modes for L2 vs L1.
         */
-       if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL))
+       if (guest_cpu_cap_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+           cpu_feature_enabled(X86_FEATURE_USE_IBPB))
                indirect_branch_prediction_barrier();
 
        /* Update any VMCS fields that might have changed while L2 ran */
index 6c56d5235f0f3e18c9476f433f7b783ba600ea3c..042b7a88157b071b95c363f6ad4e336133a8a7b9 100644 (file)
@@ -1477,7 +1477,8 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
                 * performs IBPB on nested VM-Exit (a single nested transition
                 * may switch the active VMCS multiple times).
                 */
-               if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+               if (cpu_feature_enabled(X86_FEATURE_USE_IBPB) &&
+                   (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)))
                        indirect_branch_prediction_barrier();
        }
 
index 6cf881a942bbedc672889f2c96344980f1fd4e46..4f61d11de041cc2a4df18f69724a3f7e3d1725b1 100644 (file)
@@ -437,7 +437,8 @@ static void cond_mitigation(struct task_struct *next)
                 * both have the IBPB bit set.
                 */
                if (next_mm != prev_mm &&
-                   (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+                   (next_mm | prev_mm) & LAST_USER_MM_IBPB &&
+                   cpu_feature_enabled(X86_FEATURE_USE_IBPB))
                        indirect_branch_prediction_barrier();
        }
 
@@ -447,8 +448,8 @@ static void cond_mitigation(struct task_struct *next)
                 * different context than the user space task which ran
                 * last on this CPU.
                 */
-               if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
-                                       (unsigned long)next->mm)
+               if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) != (unsigned long)next->mm &&
+                   cpu_feature_enabled(X86_FEATURE_USE_IBPB))
                        indirect_branch_prediction_barrier();
        }