From 1740598cfd71d52425c662343a1b479199760d8c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 23 Sep 2023 10:37:17 +0200 Subject: [PATCH] 6.5-stable patches added patches: x86-alternatives-remove-faulty-optimization.patch x86-static_call-fix-static-call-vs-return-thunk.patch --- queue-6.5/series | 2 + ...ernatives-remove-faulty-optimization.patch | 49 +++++++++++++++++ ...call-fix-static-call-vs-return-thunk.patch | 53 +++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 queue-6.5/x86-alternatives-remove-faulty-optimization.patch create mode 100644 queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch diff --git a/queue-6.5/series b/queue-6.5/series index f7b56f508de..490535d9f3f 100644 --- a/queue-6.5/series +++ b/queue-6.5/series @@ -208,3 +208,5 @@ drm-amd-display-fix-2nd-dpia-encoder-assignment.patch revert-memcg-drop-kmem.limit_in_bytes.patch drm-amdgpu-fix-amdgpu_cs_p1_user_fence.patch interconnect-teach-lockdep-about-icc_bw_lock-order.patch +x86-alternatives-remove-faulty-optimization.patch +x86-static_call-fix-static-call-vs-return-thunk.patch diff --git a/queue-6.5/x86-alternatives-remove-faulty-optimization.patch b/queue-6.5/x86-alternatives-remove-faulty-optimization.patch new file mode 100644 index 00000000000..b3c96ee5570 --- /dev/null +++ b/queue-6.5/x86-alternatives-remove-faulty-optimization.patch @@ -0,0 +1,49 @@ +From 4ba89dd6ddeca2a733bdaed7c9a5cbe4e19d9124 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Mon, 4 Sep 2023 22:04:54 -0700 +Subject: x86/alternatives: Remove faulty optimization + +From: Josh Poimboeuf + +commit 4ba89dd6ddeca2a733bdaed7c9a5cbe4e19d9124 upstream. + +The following commit + + 095b8303f383 ("x86/alternative: Make custom return thunk unconditional") + +made '__x86_return_thunk' a placeholder value. All code setting +X86_FEATURE_RETHUNK also changes the value of 'x86_return_thunk'. So +the optimization at the beginning of apply_returns() is dead code. + +Also, before the above-mentioned commit, the optimization actually had a +bug It bypassed __static_call_fixup(), causing some raw returns to +remain unpatched in static call trampolines. Thus the 'Fixes' tag. + +Fixes: d2408e043e72 ("x86/alternative: Optimize returns patching") +Signed-off-by: Josh Poimboeuf +Signed-off-by: Ingo Molnar +Signed-off-by: Borislav Petkov (AMD) +Acked-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/16d19d2249d4485d8380fb215ffaae81e6b8119e.1693889988.git.jpoimboe@kernel.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/alternative.c | 8 -------- + 1 file changed, 8 deletions(-) + +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -720,14 +720,6 @@ void __init_or_module noinline apply_ret + { + s32 *s; + +- /* +- * Do not patch out the default return thunks if those needed are the +- * ones generated by the compiler. +- */ +- if (cpu_feature_enabled(X86_FEATURE_RETHUNK) && +- (x86_return_thunk == __x86_return_thunk)) +- return; +- + for (s = start; s < end; s++) { + void *dest = NULL, *addr = (void *)s + *s; + struct insn insn; diff --git a/queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch b/queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch new file mode 100644 index 00000000000..9103077da2c --- /dev/null +++ b/queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch @@ -0,0 +1,53 @@ +From aee9d30b9744d677509ef790f30f3a24c7841c3d Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Fri, 22 Sep 2023 10:12:25 +0000 +Subject: x86,static_call: Fix static-call vs return-thunk + +From: Peter Zijlstra + +commit aee9d30b9744d677509ef790f30f3a24c7841c3d upstream. + +Commit + + 7825451fa4dc ("static_call: Add call depth tracking support") + +failed to realize the problem fixed there is not specific to call depth +tracking but applies to all return-thunk uses. + +Move the fix to the appropriate place and condition. + +Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding") +Reported-by: David Kaplan +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov (AMD) +Reviewed-by: Ingo Molnar +Tested-by: Borislav Petkov (AMD) +Cc: +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/alternative.c | 3 +++ + arch/x86/kernel/callthunks.c | 1 - + 2 files changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -720,6 +720,9 @@ void __init_or_module noinline apply_ret + { + s32 *s; + ++ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) ++ static_call_force_reinit(); ++ + for (s = start; s < end; s++) { + void *dest = NULL, *addr = (void *)s + *s; + struct insn insn; +--- a/arch/x86/kernel/callthunks.c ++++ b/arch/x86/kernel/callthunks.c +@@ -272,7 +272,6 @@ void __init callthunks_patch_builtin_cal + pr_info("Setting up call depth tracking\n"); + mutex_lock(&text_mutex); + callthunks_setup(&cs, &builtin_coretext); +- static_call_force_reinit(); + thunks_initialized = true; + mutex_unlock(&text_mutex); + } -- 2.47.3