]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/6.6.26/x86-srso-disentangle-rethunk-dependent-options.patch
Linux 6.6.26
[thirdparty/kernel/stable-queue.git] / releases / 6.6.26 / x86-srso-disentangle-rethunk-dependent-options.patch
CommitLineData
340d2c8a
GKH
1From bda2adb3cbbeb20561a803bde681dc0f4015d29e Mon Sep 17 00:00:00 2001
2From: Josh Poimboeuf <jpoimboe@kernel.org>
3Date: Mon, 4 Sep 2023 22:05:00 -0700
4Subject: x86/srso: Disentangle rethunk-dependent options
5
6From: Josh Poimboeuf <jpoimboe@kernel.org>
7
8Commit 34a3cae7474c6e6f4a85aad4a7b8191b8b35cdcd upstream.
9
10CONFIG_RETHUNK, CONFIG_CPU_UNRET_ENTRY and CONFIG_CPU_SRSO are all
11tangled up. De-spaghettify the code a bit.
12
13Some of the rethunk-related code has been shuffled around within the
14'.text..__x86.return_thunk' section, but otherwise there are no
15functional changes. srso_alias_untrain_ret() and srso_alias_safe_ret()
16((which are very address-sensitive) haven't moved.
17
18Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
19Signed-off-by: Ingo Molnar <mingo@kernel.org>
20Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
21Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
22Link: https://lore.kernel.org/r/2845084ed303d8384905db3b87b77693945302b4.1693889988.git.jpoimboe@kernel.org
23Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24---
25 arch/x86/include/asm/nospec-branch.h | 25 +++--
26 arch/x86/kernel/cpu/bugs.c | 5 -
27 arch/x86/kernel/vmlinux.lds.S | 7 -
28 arch/x86/lib/retpoline.S | 158 +++++++++++++++++++----------------
29 4 files changed, 109 insertions(+), 86 deletions(-)
30
31--- a/arch/x86/include/asm/nospec-branch.h
32+++ b/arch/x86/include/asm/nospec-branch.h
33@@ -289,19 +289,17 @@
34 * where we have a stack but before any RET instruction.
35 */
36 .macro UNTRAIN_RET
37-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
38- defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
39+#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
40 VALIDATE_UNRET_END
41 ALTERNATIVE_3 "", \
42 CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
43 "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
44- __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
45+ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
46 #endif
47 .endm
48
49 .macro UNTRAIN_RET_VM
50-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
51- defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
52+#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
53 VALIDATE_UNRET_END
54 ALTERNATIVE_3 "", \
55 CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
56@@ -311,8 +309,7 @@
57 .endm
58
59 .macro UNTRAIN_RET_FROM_CALL
60-#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
61- defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
62+#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
63 VALIDATE_UNRET_END
64 ALTERNATIVE_3 "", \
65 CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
66@@ -359,6 +356,20 @@ extern void __x86_return_thunk(void);
67 static inline void __x86_return_thunk(void) {}
68 #endif
69
70+#ifdef CONFIG_CPU_UNRET_ENTRY
71+extern void retbleed_return_thunk(void);
72+#else
73+static inline void retbleed_return_thunk(void) {}
74+#endif
75+
76+#ifdef CONFIG_CPU_SRSO
77+extern void srso_return_thunk(void);
78+extern void srso_alias_return_thunk(void);
79+#else
80+static inline void srso_return_thunk(void) {}
81+static inline void srso_alias_return_thunk(void) {}
82+#endif
83+
84 extern void retbleed_return_thunk(void);
85 extern void srso_return_thunk(void);
86 extern void srso_alias_return_thunk(void);
87--- a/arch/x86/kernel/cpu/bugs.c
88+++ b/arch/x86/kernel/cpu/bugs.c
89@@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
90
91 static DEFINE_MUTEX(spec_ctrl_mutex);
92
93-void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
94+void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
95
96 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
97 static void update_spec_ctrl(u64 val)
98@@ -1108,8 +1108,7 @@ do_cmd_auto:
99 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
100 setup_force_cpu_cap(X86_FEATURE_UNRET);
101
102- if (IS_ENABLED(CONFIG_RETHUNK))
103- x86_return_thunk = retbleed_return_thunk;
104+ x86_return_thunk = retbleed_return_thunk;
105
106 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
107 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
108--- a/arch/x86/kernel/vmlinux.lds.S
109+++ b/arch/x86/kernel/vmlinux.lds.S
110@@ -139,10 +139,7 @@ SECTIONS
111 STATIC_CALL_TEXT
112
113 ALIGN_ENTRY_TEXT_BEGIN
114-#ifdef CONFIG_CPU_SRSO
115 *(.text..__x86.rethunk_untrain)
116-#endif
117-
118 ENTRY_TEXT
119
120 #ifdef CONFIG_CPU_SRSO
121@@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
122 "fixed_percpu_data is not at start of per-cpu area");
123 #endif
124
125-#ifdef CONFIG_RETHUNK
126+#ifdef CONFIG_CPU_UNRET_ENTRY
127 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
128-. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
129 #endif
130
131 #ifdef CONFIG_CPU_SRSO
132+. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
133 /*
134 * GNU ld cannot do XOR until 2.41.
135 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
136--- a/arch/x86/lib/retpoline.S
137+++ b/arch/x86/lib/retpoline.S
138@@ -126,12 +126,13 @@ SYM_CODE_END(__x86_indirect_jump_thunk_a
139 #include <asm/GEN-for-each-reg.h>
140 #undef GEN
141 #endif
142-/*
143- * This function name is magical and is used by -mfunction-return=thunk-extern
144- * for the compiler to generate JMPs to it.
145- */
146+
147 #ifdef CONFIG_RETHUNK
148
149+ .section .text..__x86.return_thunk
150+
151+#ifdef CONFIG_CPU_SRSO
152+
153 /*
154 * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
155 * special addresses:
156@@ -147,9 +148,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_a
157 *
158 * As a result, srso_alias_safe_ret() becomes a safe return.
159 */
160-#ifdef CONFIG_CPU_SRSO
161- .section .text..__x86.rethunk_untrain
162-
163+ .pushsection .text..__x86.rethunk_untrain
164 SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
165 UNWIND_HINT_FUNC
166 ANNOTATE_NOENDBR
167@@ -158,17 +157,9 @@ SYM_START(srso_alias_untrain_ret, SYM_L_
168 jmp srso_alias_return_thunk
169 SYM_FUNC_END(srso_alias_untrain_ret)
170 __EXPORT_THUNK(srso_alias_untrain_ret)
171+ .popsection
172
173- .section .text..__x86.rethunk_safe
174-#else
175-/* dummy definition for alternatives */
176-SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
177- ANNOTATE_UNRET_SAFE
178- ret
179- int3
180-SYM_FUNC_END(srso_alias_untrain_ret)
181-#endif
182-
183+ .pushsection .text..__x86.rethunk_safe
184 SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
185 lea 8(%_ASM_SP), %_ASM_SP
186 UNWIND_HINT_FUNC
187@@ -183,8 +174,58 @@ SYM_CODE_START_NOALIGN(srso_alias_return
188 call srso_alias_safe_ret
189 ud2
190 SYM_CODE_END(srso_alias_return_thunk)
191+ .popsection
192+
193+/*
194+ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
195+ * above. On kernel entry, srso_untrain_ret() is executed which is a
196+ *
197+ * movabs $0xccccc30824648d48,%rax
198+ *
199+ * and when the return thunk executes the inner label srso_safe_ret()
200+ * later, it is a stack manipulation and a RET which is mispredicted and
201+ * thus a "safe" one to use.
202+ */
203+ .align 64
204+ .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
205+SYM_START(srso_untrain_ret, SYM_L_LOCAL, SYM_A_NONE)
206+ ANNOTATE_NOENDBR
207+ .byte 0x48, 0xb8
208+
209+/*
210+ * This forces the function return instruction to speculate into a trap
211+ * (UD2 in srso_return_thunk() below). This RET will then mispredict
212+ * and execution will continue at the return site read from the top of
213+ * the stack.
214+ */
215+SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
216+ lea 8(%_ASM_SP), %_ASM_SP
217+ ret
218+ int3
219+ int3
220+ /* end of movabs */
221+ lfence
222+ call srso_safe_ret
223+ ud2
224+SYM_CODE_END(srso_safe_ret)
225+SYM_FUNC_END(srso_untrain_ret)
226+
227+SYM_CODE_START(srso_return_thunk)
228+ UNWIND_HINT_FUNC
229+ ANNOTATE_NOENDBR
230+ call srso_safe_ret
231+ ud2
232+SYM_CODE_END(srso_return_thunk)
233+
234+#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
235+#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
236+#else /* !CONFIG_CPU_SRSO */
237+#define JMP_SRSO_UNTRAIN_RET "ud2"
238+#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
239+#endif /* CONFIG_CPU_SRSO */
240+
241+#ifdef CONFIG_CPU_UNRET_ENTRY
242
243- .section .text..__x86.return_thunk
244 /*
245 * Some generic notes on the untraining sequences:
246 *
247@@ -265,65 +306,21 @@ SYM_CODE_END(retbleed_return_thunk)
248 SYM_FUNC_END(retbleed_untrain_ret)
249 __EXPORT_THUNK(retbleed_untrain_ret)
250
251-/*
252- * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
253- * above. On kernel entry, srso_untrain_ret() is executed which is a
254- *
255- * movabs $0xccccc30824648d48,%rax
256- *
257- * and when the return thunk executes the inner label srso_safe_ret()
258- * later, it is a stack manipulation and a RET which is mispredicted and
259- * thus a "safe" one to use.
260- */
261- .align 64
262- .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
263-SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
264- ANNOTATE_NOENDBR
265- .byte 0x48, 0xb8
266+#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
267+#else /* !CONFIG_CPU_UNRET_ENTRY */
268+#define JMP_RETBLEED_UNTRAIN_RET "ud2"
269+#endif /* CONFIG_CPU_UNRET_ENTRY */
270
271-/*
272- * This forces the function return instruction to speculate into a trap
273- * (UD2 in srso_return_thunk() below). This RET will then mispredict
274- * and execution will continue at the return site read from the top of
275- * the stack.
276- */
277-SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
278- lea 8(%_ASM_SP), %_ASM_SP
279- ret
280- int3
281- int3
282- /* end of movabs */
283- lfence
284- call srso_safe_ret
285- ud2
286-SYM_CODE_END(srso_safe_ret)
287-SYM_FUNC_END(srso_untrain_ret)
288-__EXPORT_THUNK(srso_untrain_ret)
289-
290-SYM_CODE_START(srso_return_thunk)
291- UNWIND_HINT_FUNC
292- ANNOTATE_NOENDBR
293- call srso_safe_ret
294- ud2
295-SYM_CODE_END(srso_return_thunk)
296+#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
297
298 SYM_FUNC_START(entry_untrain_ret)
299- ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
300- "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
301- "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
302+ ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
303+ JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
304+ JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
305 SYM_FUNC_END(entry_untrain_ret)
306 __EXPORT_THUNK(entry_untrain_ret)
307
308-SYM_CODE_START(__x86_return_thunk)
309- UNWIND_HINT_FUNC
310- ANNOTATE_NOENDBR
311- ANNOTATE_UNRET_SAFE
312- ret
313- int3
314-SYM_CODE_END(__x86_return_thunk)
315-EXPORT_SYMBOL(__x86_return_thunk)
316-
317-#endif /* CONFIG_RETHUNK */
318+#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
319
320 #ifdef CONFIG_CALL_DEPTH_TRACKING
321
322@@ -358,3 +355,22 @@ SYM_FUNC_START(__x86_return_skl)
323 SYM_FUNC_END(__x86_return_skl)
324
325 #endif /* CONFIG_CALL_DEPTH_TRACKING */
326+
327+/*
328+ * This function name is magical and is used by -mfunction-return=thunk-extern
329+ * for the compiler to generate JMPs to it.
330+ *
331+ * This code is only used during kernel boot or module init. All
332+ * 'JMP __x86_return_thunk' sites are changed to something else by
333+ * apply_returns().
334+ */
335+SYM_CODE_START(__x86_return_thunk)
336+ UNWIND_HINT_FUNC
337+ ANNOTATE_NOENDBR
338+ ANNOTATE_UNRET_SAFE
339+ ret
340+ int3
341+SYM_CODE_END(__x86_return_thunk)
342+EXPORT_SYMBOL(__x86_return_thunk)
343+
344+#endif /* CONFIG_RETHUNK */