]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86/bugs: Rename entry_ibpb() to write_ibpb()
authorJosh Poimboeuf <jpoimboe@kernel.org>
Tue, 8 Apr 2025 21:47:30 +0000 (14:47 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 9 Apr 2025 10:41:29 +0000 (12:41 +0200)
There's nothing entry-specific about entry_ibpb().  In preparation for
calling it from elsewhere, rename it to write_ibpb().

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/1e54ace131e79b760de3fe828264e26d0896e3ac.1744148254.git.jpoimboe@kernel.org
arch/x86/entry/entry.S
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c

index d3caa31240ede57d8f9da85e74fcd25b66882c7e..cabe65ac8379f3391be0deaaf811990b6c091584 100644 (file)
@@ -17,7 +17,8 @@
 
 .pushsection .noinstr.text, "ax"
 
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
        ANNOTATE_NOENDBR
        movl    $MSR_IA32_PRED_CMD, %ecx
        movl    $PRED_CMD_IBPB, %eax
@@ -27,9 +28,9 @@ SYM_FUNC_START(entry_ibpb)
        /* Make sure IBPB clears return stack preductions too. */
        FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
        RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
 /* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
 
 .popsection
 
index 8a5cc8e70439e10aab4eeb5b0f5e116cf635b43d..591d1dbca60a4b1082ed38c6d586d7b38c26a531 100644 (file)
  * typically has NO_MELTDOWN).
  *
  * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
  *
  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
  * where we have a stack but before any RET instruction.
        VALIDATE_UNRET_END
        CALL_UNTRAIN_RET
        ALTERNATIVE_2 "",                                               \
-                     "call entry_ibpb", \ibpb_feature,                 \
+                     "call write_ibpb", \ibpb_feature,                 \
                     __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
 #endif
 .endm
@@ -368,7 +368,7 @@ extern void srso_return_thunk(void);
 extern void srso_alias_return_thunk(void);
 
 extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
 
 #ifdef CONFIG_X86_64
 extern void clear_bhb_loop(void);
index 4386aa6c69e12c9a8d66758e9f7cfff816ccbbe3..608bbe6cf730e1906bbb280a2d27e5cbdac0f2cc 100644 (file)
@@ -1142,7 +1142,7 @@ do_cmd_auto:
                setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
 
                /*
-                * There is no need for RSB filling: entry_ibpb() ensures
+                * There is no need for RSB filling: write_ibpb() ensures
                 * all predictions, including the RSB, are invalidated,
                 * regardless of IBPB implementation.
                 */
@@ -2676,7 +2676,7 @@ static void __init srso_select_mitigation(void)
                                setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
 
                                /*
-                                * There is no need for RSB filling: entry_ibpb() ensures
+                                * There is no need for RSB filling: write_ibpb() ensures
                                 * all predictions, including the RSB, are invalidated,
                                 * regardless of IBPB implementation.
                                 */
@@ -2701,7 +2701,7 @@ ibpb_on_vmexit:
                                srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
 
                                /*
-                                * There is no need for RSB filling: entry_ibpb() ensures
+                                * There is no need for RSB filling: write_ibpb() ensures
                                 * all predictions, including the RSB, are invalidated,
                                 * regardless of IBPB implementation.
                                 */