]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/alternative: Optimize returns patching
authorBorislav Petkov (AMD) <bp@alien8.de>
Fri, 12 May 2023 12:05:11 +0000 (14:05 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Fri, 12 May 2023 15:53:18 +0000 (17:53 +0200)
Instead of decoding each instruction in the return sites range only to
realize that that return site is a jump to the default return thunk
which is needed - X86_FEATURE_RETHUNK is enabled - lift that check
before the loop and get rid of that loop overhead.

Add comments about what gets patched, while at it.

Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230512120952.7924-1-bp@alien8.de
arch/x86/kernel/alternative.c

index b78d55f0dfadc5728e4057785f666cd6b2a97c10..3bb0a5f61e8c0a71e2116a041c5395f49f615333 100644 (file)
@@ -693,13 +693,12 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
 {
        int i = 0;
 
+       /* Patch the custom return thunks... */
        if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
-               if (x86_return_thunk == __x86_return_thunk)
-                       return -1;
-
                i = JMP32_INSN_SIZE;
                __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
        } else {
+               /* ... or patch them out if not needed. */
                bytes[i++] = RET_INSN_OPCODE;
        }
 
@@ -712,6 +711,14 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
 {
        s32 *s;
 
+       /*
+        * Do not patch out the default return thunks if those needed are the
+        * ones generated by the compiler.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
+           (x86_return_thunk == __x86_return_thunk))
+               return;
+
        for (s = start; s < end; s++) {
                void *dest = NULL, *addr = (void *)s + *s;
                struct insn insn;