]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/its: Add support for ITS-safe return thunk
authorPawan Gupta <pawan.kumar.gupta@linux.intel.com>
Wed, 18 Jun 2025 00:46:24 +0000 (17:46 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 17 Jul 2025 16:27:55 +0000 (18:27 +0200)
commit a75bf27fe41abe658c53276a0c486c4bf9adecfc upstream.

RETs in the lower half of cacheline may be affected by ITS bug,
specifically when the RSB-underflows. Use ITS-safe return thunk for such
RETs.

RETs that are not patched:

- RET in retpoline sequence does not need to be patched, because the
  sequence itself fills an RSB before RET.
- RETs in .init section are not reachable after init.
- RETs that are explicitly marked safe with ANNOTATE_UNRET_SAFE.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/alternative.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/alternative.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/static_call.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/lib/retpoline.S
arch/x86/net/bpf_jit_comp.c

index 0e777b27972be34ceecafb5437302e649d36aaee..d7f33c1e052b7724db10fe23c1dfc63ab09dd2c9 100644 (file)
@@ -80,6 +80,20 @@ extern void apply_returns(s32 *start, s32 *end);
 
 struct module;
 
+#ifdef CONFIG_RETHUNK
+extern bool cpu_wants_rethunk(void);
+extern bool cpu_wants_rethunk_at(void *addr);
+#else
+static __always_inline bool cpu_wants_rethunk(void)
+{
+       return false;
+}
+static __always_inline bool cpu_wants_rethunk_at(void *addr)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_SMP
 extern void alternatives_smp_module_add(struct module *mod, char *name,
                                        void *locks, void *locks_end,
index 4cc0ee529325e62d5b30f171933b47f31ff14a92..7ccaefaa16a683110e58c58c7a22926e2191965f 100644 (file)
@@ -226,6 +226,12 @@ extern void __x86_return_thunk(void);
 static inline void __x86_return_thunk(void) {}
 #endif
 
+#ifdef CONFIG_MITIGATION_ITS
+extern void its_return_thunk(void);
+#else
+static inline void its_return_thunk(void) {}
+#endif
+
 extern void retbleed_return_thunk(void);
 extern void srso_return_thunk(void);
 extern void srso_alias_return_thunk(void);
index 3102e7cf6a48375a2216303b0e1769532ed37270..ae4a6bc25b29c19842dd281d041fb23cb5ba5a75 100644 (file)
@@ -760,6 +760,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
 
 #ifdef CONFIG_RETHUNK
 
+bool cpu_wants_rethunk(void)
+{
+       return cpu_feature_enabled(X86_FEATURE_RETHUNK);
+}
+
+bool cpu_wants_rethunk_at(void *addr)
+{
+       if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
+               return false;
+       if (x86_return_thunk != its_return_thunk)
+               return true;
+
+       return !((unsigned long)addr & 0x20);
+}
+
 /*
  * Rewrite the compiler generated return thunk tail-calls.
  *
@@ -776,7 +791,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
        int i = 0;
 
        /* Patch the custom return thunks... */
-       if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+       if (cpu_wants_rethunk_at(addr)) {
                i = JMP32_INSN_SIZE;
                __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
        } else {
index 46447877b59419984581a294b637fb99c8c781c4..fba03ad16cceb1e17a64559989660327ce09a866 100644 (file)
@@ -367,7 +367,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
                goto fail;
 
        ip = trampoline + size;
-       if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+       if (cpu_wants_rethunk_at(ip))
                __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
        else
                memcpy(ip, retq, sizeof(retq));
index 4544f124bbd4d6b1bc7ada65e0122b891640fa9d..42564d29eb1bacdd1e11ccca1c0cb14b8c4e10b2 100644 (file)
@@ -41,7 +41,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
                break;
 
        case RET:
-               if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+               if (cpu_wants_rethunk_at(insn))
                        code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
                else
                        code = &retinsn;
index 6cee70927281f30871cb5a9680d11c5bf44b4156..1f77896515c5230f738b9637ea5c5920bd58a623 100644 (file)
@@ -542,6 +542,8 @@ INIT_PER_CPU(irq_stack_backing_store);
 . = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
 . = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
 . = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
+
+. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
 #endif
 
 #endif /* CONFIG_X86_32 */
index 9bde4e9d8c2a36bfa2bc804efefefe10ffd5eae7..01fcf0cd679bd65f509c2ae8e334572c42f7e77b 100644 (file)
@@ -281,7 +281,18 @@ SYM_CODE_START(__x86_indirect_its_thunk_array)
        .align 64, 0xcc
 SYM_CODE_END(__x86_indirect_its_thunk_array)
 
-#endif
+.align 64, 0xcc
+.skip 32, 0xcc
+SYM_CODE_START(its_return_thunk)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
+SYM_CODE_END(its_return_thunk)
+EXPORT_SYMBOL(its_return_thunk)
+
+#endif /* CONFIG_MITIGATION_ITS */
 
 SYM_CODE_START(__x86_return_thunk)
        UNWIND_HINT_FUNC
index 6225e8a8349f76bb8d62bb5613823bc083ffaf4b..c322702126407e4102cbd0a874264dd69b17b9fd 100644 (file)
@@ -408,7 +408,7 @@ static void emit_return(u8 **pprog, u8 *ip)
        u8 *prog = *pprog;
        int cnt = 0;
 
-       if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
+       if (cpu_wants_rethunk()) {
                emit_jump(&prog, x86_return_thunk, ip);
        } else {
                EMIT1(0xC3);            /* ret */