From 05c85bb35bc32976176aa34e2ae633cfd8cda965 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 21 Aug 2023 17:37:49 +0200 Subject: [PATCH] 6.1-stable patches added patches: x86-alternative-make-custom-return-thunk-unconditional.patch x86-cpu-fix-__x86_return_thunk-symbol-type.patch x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch --- queue-6.1/series | 3 ++ ...ke-custom-return-thunk-unconditional.patch | 51 +++++++++++++++++++ ...u-fix-__x86_return_thunk-symbol-type.patch | 41 +++++++++++++++ ...srso_safe_ret-and-__x86_return_thunk.patch | 48 +++++++++++++++++ 4 files changed, 143 insertions(+) create mode 100644 queue-6.1/x86-alternative-make-custom-return-thunk-unconditional.patch create mode 100644 queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch create mode 100644 queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch diff --git a/queue-6.1/series b/queue-6.1/series index 62af34650a4..b18d210f207 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -170,3 +170,6 @@ sched-fair-remove-capacity-inversion-detection.patch drm-amd-display-implement-workaround-for-writing-to-otg_pixel_rate_div-register.patch hugetlb-do-not-clear-hugetlb-dtor-until-allocating-vmemmap.patch netfilter-set-default-timeout-to-3-secs-for-sctp-shutdown-send-and-recv-state.patch +x86-cpu-fix-__x86_return_thunk-symbol-type.patch +x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch +x86-alternative-make-custom-return-thunk-unconditional.patch diff --git a/queue-6.1/x86-alternative-make-custom-return-thunk-unconditional.patch b/queue-6.1/x86-alternative-make-custom-return-thunk-unconditional.patch new file mode 100644 index 00000000000..0b4c6183b9d --- /dev/null +++ b/queue-6.1/x86-alternative-make-custom-return-thunk-unconditional.patch @@ -0,0 +1,51 @@ +From 095b8303f3835c68ac4a8b6d754ca1c3b6230711 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Mon, 14 Aug 2023 13:44:30 +0200 +Subject: x86/alternative: Make custom return thunk unconditional + +From: Peter Zijlstra + +commit 095b8303f3835c68ac4a8b6d754ca1c3b6230711 upstream. + +There is infrastructure to rewrite return thunks to point to any +random thunk one desires, unwrap that from CALL_THUNKS, which up to +now was the sole user of that. + + [ bp: Make the thunks visible on 32-bit and add ifdeffery for the + 32-bit builds. ] + +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/nospec-branch.h | 5 +++++ + arch/x86/kernel/cpu/bugs.c | 2 ++ + 2 files changed, 7 insertions(+) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -210,7 +210,12 @@ + typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; + extern retpoline_thunk_t __x86_indirect_thunk_array[]; + ++#ifdef CONFIG_RETHUNK + extern void __x86_return_thunk(void); ++#else ++static inline void __x86_return_thunk(void) {} ++#endif ++ + extern void zen_untrain_ret(void); + extern void srso_untrain_ret(void); + extern void srso_untrain_ret_alias(void); +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); + + static DEFINE_MUTEX(spec_ctrl_mutex); + ++void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; ++ + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ + static void update_spec_ctrl(u64 val) + { diff --git a/queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch b/queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch new file mode 100644 index 00000000000..aeb1e50c266 --- /dev/null +++ b/queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch @@ -0,0 +1,41 @@ +From 77f67119004296a9b2503b377d610e08b08afc2a Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Mon, 14 Aug 2023 13:44:27 +0200 +Subject: x86/cpu: Fix __x86_return_thunk symbol type + +From: Peter Zijlstra + +commit 77f67119004296a9b2503b377d610e08b08afc2a upstream. + +Commit + + fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") + +reimplemented __x86_return_thunk with a mix of SYM_FUNC_START and +SYM_CODE_END, this is not a sane combination. + +Since nothing should ever actually 'CALL' this, make it consistently +CODE. + +Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20230814121148.571027074@infradead.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/lib/retpoline.S | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -207,7 +207,9 @@ SYM_CODE_END(srso_safe_ret) + SYM_FUNC_END(srso_untrain_ret) + __EXPORT_THUNK(srso_untrain_ret) + +-SYM_FUNC_START(__x86_return_thunk) ++SYM_CODE_START(__x86_return_thunk) ++ UNWIND_HINT_FUNC ++ ANNOTATE_NOENDBR + ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ + "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + int3 diff --git a/queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch b/queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch new file mode 100644 index 00000000000..13e155a44e2 --- /dev/null +++ b/queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch @@ -0,0 +1,48 @@ +From af023ef335f13c8b579298fc432daeef609a9e60 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Mon, 14 Aug 2023 13:44:28 +0200 +Subject: x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() + +From: Peter Zijlstra + +commit af023ef335f13c8b579298fc432daeef609a9e60 upstream. + + vmlinux.o: warning: objtool: srso_untrain_ret() falls through to next function __x86_return_skl() + vmlinux.o: warning: objtool: __x86_return_thunk() falls through to next function __x86_return_skl() + +This is because these functions (can) end with CALL, which objtool +does not consider a terminating instruction. Therefore, replace the +INT3 instruction (which is a non-fatal trap) with UD2 (which is a +fatal-trap). + +This indicates execution will not continue past this point. + +Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") +Signed-off-by: Peter Zijlstra (Intel) +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20230814121148.637802730@infradead.org +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/lib/retpoline.S | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -202,7 +202,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLO + int3 + lfence + call srso_safe_ret +- int3 ++ ud2 + SYM_CODE_END(srso_safe_ret) + SYM_FUNC_END(srso_untrain_ret) + __EXPORT_THUNK(srso_untrain_ret) +@@ -212,7 +212,7 @@ SYM_CODE_START(__x86_return_thunk) + ANNOTATE_NOENDBR + ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ + "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS +- int3 ++ ud2 + SYM_CODE_END(__x86_return_thunk) + EXPORT_SYMBOL(__x86_return_thunk) + -- 2.47.3