From: Peter Zijlstra Date: Mon, 14 Aug 2023 11:44:30 +0000 (+0200) Subject: x86/alternative: Make custom return thunk unconditional X-Git-Tag: v5.10.211~34 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=1dfe6393d17936e285f58d4e35aa14affd153a3b;p=thirdparty%2Fkernel%2Fstable.git x86/alternative: Make custom return thunk unconditional Upstream commit: 095b8303f3835c68ac4a8b6d754ca1c3b6230711 There is infrastructure to rewrite return thunks to point to any random thunk one desires, unwrap that from CALL_THUNKS, which up to now was the sole user of that. [ bp: Make the thunks visible on 32-bit and add ifdeffery for the 32-bit builds. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 319b4249ea01e..7711ba5342a1a 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -190,7 +190,11 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); +#else +static inline void __x86_return_thunk(void) {} +#endif extern void retbleed_return_thunk(void); extern void srso_return_thunk(void); @@ -203,11 +207,7 @@ extern void srso_alias_untrain_ret(void); extern void entry_untrain_ret(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 4d3715f6aaab3..9ceef8515c031 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -677,10 +677,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index fc20f24b422a9..d9fda0b6eb19e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) {