]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Sep 2023 08:37:17 +0000 (10:37 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Sep 2023 08:37:17 +0000 (10:37 +0200)
added patches:
x86-alternatives-remove-faulty-optimization.patch
x86-static_call-fix-static-call-vs-return-thunk.patch

queue-6.5/series
queue-6.5/x86-alternatives-remove-faulty-optimization.patch [new file with mode: 0644]
queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch [new file with mode: 0644]

index f7b56f508dea8f2895a05048d2957ba1339b1a0c..490535d9f3fa5f6c7af3e2f8efe57c582d2f7999 100644 (file)
@@ -208,3 +208,5 @@ drm-amd-display-fix-2nd-dpia-encoder-assignment.patch
 revert-memcg-drop-kmem.limit_in_bytes.patch
 drm-amdgpu-fix-amdgpu_cs_p1_user_fence.patch
 interconnect-teach-lockdep-about-icc_bw_lock-order.patch
+x86-alternatives-remove-faulty-optimization.patch
+x86-static_call-fix-static-call-vs-return-thunk.patch
diff --git a/queue-6.5/x86-alternatives-remove-faulty-optimization.patch b/queue-6.5/x86-alternatives-remove-faulty-optimization.patch
new file mode 100644 (file)
index 0000000..b3c96ee
--- /dev/null
@@ -0,0 +1,49 @@
+From 4ba89dd6ddeca2a733bdaed7c9a5cbe4e19d9124 Mon Sep 17 00:00:00 2001
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+Date: Mon, 4 Sep 2023 22:04:54 -0700
+Subject: x86/alternatives: Remove faulty optimization
+
+From: Josh Poimboeuf <jpoimboe@kernel.org>
+
+commit 4ba89dd6ddeca2a733bdaed7c9a5cbe4e19d9124 upstream.
+
+The following commit
+
+  095b8303f383 ("x86/alternative: Make custom return thunk unconditional")
+
+made '__x86_return_thunk' a placeholder value.  All code setting
+X86_FEATURE_RETHUNK also changes the value of 'x86_return_thunk'.  So
+the optimization at the beginning of apply_returns() is dead code.
+
+Also, before the above-mentioned commit, the optimization actually had a
+bug It bypassed __static_call_fixup(), causing some raw returns to
+remain unpatched in static call trampolines.  Thus the 'Fixes' tag.
+
+Fixes: d2408e043e72 ("x86/alternative: Optimize returns patching")
+Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/16d19d2249d4485d8380fb215ffaae81e6b8119e.1693889988.git.jpoimboe@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c |    8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -720,14 +720,6 @@ void __init_or_module noinline apply_ret
+ {
+       s32 *s;
+-      /*
+-       * Do not patch out the default return thunks if those needed are the
+-       * ones generated by the compiler.
+-       */
+-      if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
+-          (x86_return_thunk == __x86_return_thunk))
+-              return;
+-
+       for (s = start; s < end; s++) {
+               void *dest = NULL, *addr = (void *)s + *s;
+               struct insn insn;
diff --git a/queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch b/queue-6.5/x86-static_call-fix-static-call-vs-return-thunk.patch
new file mode 100644 (file)
index 0000000..9103077
--- /dev/null
@@ -0,0 +1,53 @@
+From aee9d30b9744d677509ef790f30f3a24c7841c3d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 22 Sep 2023 10:12:25 +0000
+Subject: x86,static_call: Fix static-call vs return-thunk
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit aee9d30b9744d677509ef790f30f3a24c7841c3d upstream.
+
+Commit
+
+  7825451fa4dc ("static_call: Add call depth tracking support")
+
+failed to realize the problem fixed there is not specific to call depth
+tracking but applies to all return-thunk uses.
+
+Move the fix to the appropriate place and condition.
+
+Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding")
+Reported-by: David Kaplan <David.Kaplan@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c |    3 +++
+ arch/x86/kernel/callthunks.c  |    1 -
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -720,6 +720,9 @@ void __init_or_module noinline apply_ret
+ {
+       s32 *s;
++      if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++              static_call_force_reinit();
++
+       for (s = start; s < end; s++) {
+               void *dest = NULL, *addr = (void *)s + *s;
+               struct insn insn;
+--- a/arch/x86/kernel/callthunks.c
++++ b/arch/x86/kernel/callthunks.c
+@@ -272,7 +272,6 @@ void __init callthunks_patch_builtin_cal
+       pr_info("Setting up call depth tracking\n");
+       mutex_lock(&text_mutex);
+       callthunks_setup(&cs, &builtin_coretext);
+-      static_call_force_reinit();
+       thunks_initialized = true;
+       mutex_unlock(&text_mutex);
+ }