.pushsection .noinstr.text, "ax"
-SYM_FUNC_START(entry_ibpb)
+/* Clobbers AX, CX, DX */
+SYM_FUNC_START(write_ibpb)
ANNOTATE_NOENDBR
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
/* Make sure IBPB clears return stack preductions too. */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_BUG_IBPB_NO_RET
RET
-SYM_FUNC_END(entry_ibpb)
+SYM_FUNC_END(write_ibpb)
/* For KVM */
-EXPORT_SYMBOL_GPL(entry_ibpb);
+EXPORT_SYMBOL_GPL(write_ibpb);
.popsection
* typically has NO_MELTDOWN).
*
* While retbleed_untrain_ret() doesn't clobber anything but requires stack,
- * entry_ibpb() will clobber AX, CX, DX.
+ * write_ibpb() will clobber AX, CX, DX.
*
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
* where we have a stack but before any RET instruction.
VALIDATE_UNRET_END
CALL_UNTRAIN_RET
ALTERNATIVE_2 "", \
- "call entry_ibpb", \ibpb_feature, \
+ "call write_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
.endm
extern void srso_alias_return_thunk(void);
extern void entry_untrain_ret(void);
-extern void entry_ibpb(void);
+extern void write_ibpb(void);
#ifdef CONFIG_X86_64
extern void clear_bhb_loop(void);
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
/*
- * There is no need for RSB filling: entry_ibpb() ensures
+ * There is no need for RSB filling: write_ibpb() ensures
* all predictions, including the RSB, are invalidated,
* regardless of IBPB implementation.
*/