From: Greg Kroah-Hartman Date: Fri, 5 Apr 2024 10:23:37 +0000 (+0200) Subject: 6.1-stable patches X-Git-Tag: v5.15.154~87 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=86e300ef490ac7d6c60636cb67449095fd9b6c39;p=thirdparty%2Fkernel%2Fstable-queue.git 6.1-stable patches added patches: x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch x86-retpoline-do-the-necessary-fixup-to-the-zen3-4-srso-return-thunk-for-srso.patch --- diff --git a/queue-6.1/series b/queue-6.1/series index ce41a27241f..d3ab46796fc 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -50,3 +50,5 @@ net-phy-micrel-lan8814-fix-when-enabling-disabling-1-step-timestamping.patch net-phy-micrel-fix-potential-null-pointer-dereference.patch selftests-net-gro-fwd-update-vxlan-gro-test-expectations.patch gro-fix-ownership-transfer.patch +x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch +x86-retpoline-do-the-necessary-fixup-to-the-zen3-4-srso-return-thunk-for-srso.patch diff --git a/queue-6.1/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch b/queue-6.1/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch new file mode 100644 index 00000000000..8921bccc0ae --- /dev/null +++ b/queue-6.1/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch @@ -0,0 +1,94 @@ +From 4535e1a4174c4111d92c5a9a21e542d232e0fcaa Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Thu, 28 Mar 2024 13:59:05 +0100 +Subject: x86/bugs: Fix the SRSO mitigation on Zen3/4 + +From: Borislav Petkov (AMD) + +commit 4535e1a4174c4111d92c5a9a21e542d232e0fcaa upstream. + +The original version of the mitigation would patch in the calls to the +untraining routines directly. That is, the alternative() in UNTRAIN_RET +will patch in the CALL to srso_alias_untrain_ret() directly. + +However, even if commit e7c25c441e9e ("x86/cpu: Cleanup the untrain +mess") meant well in trying to clean up the situation, due to micro- +architectural reasons, the untraining routine srso_alias_untrain_ret() +must be the target of a CALL instruction and not of a JMP instruction as +it is done now. + +Reshuffle the alternative macros to accomplish that. + +Fixes: e7c25c441e9e ("x86/cpu: Cleanup the untrain mess") +Signed-off-by: Borislav Petkov (AMD) +Reviewed-by: Ingo Molnar +Cc: stable@kernel.org +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/asm-prototypes.h | 1 + + arch/x86/include/asm/nospec-branch.h | 20 ++++++++++++++------ + arch/x86/lib/retpoline.S | 4 +--- + 3 files changed, 16 insertions(+), 9 deletions(-) + +--- a/arch/x86/include/asm/asm-prototypes.h ++++ b/arch/x86/include/asm/asm-prototypes.h +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + + #ifndef CONFIG_X86_CMPXCHG64 + extern void cmpxchg8b_emu(void); +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -167,11 +167,20 @@ + .Lskip_rsb_\@: + .endm + ++/* ++ * The CALL to srso_alias_untrain_ret() must be patched in directly at ++ * the spot where untraining must be done, ie., srso_alias_untrain_ret() ++ * must be the target of a CALL instruction instead of indirectly ++ * jumping to a wrapper which then calls it. Therefore, this macro is ++ * called outside of __UNTRAIN_RET below, for the time being, before the ++ * kernel can support nested alternatives with arbitrary nesting. ++ */ ++.macro CALL_UNTRAIN_RET + #ifdef CONFIG_CPU_UNRET_ENTRY +-#define CALL_UNTRAIN_RET "call entry_untrain_ret" +-#else +-#define CALL_UNTRAIN_RET "" ++ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \ ++ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS + #endif ++.endm + + /* + * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the +@@ -188,9 +197,8 @@ + #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CPU_SRSO) + ANNOTATE_UNRET_END +- ALTERNATIVE_2 "", \ +- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ +- "call entry_ibpb", X86_FEATURE_ENTRY_IBPB ++ CALL_UNTRAIN_RET ++ ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_ENTRY_IBPB + #endif + .endm + +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -252,9 +252,7 @@ SYM_CODE_START(srso_return_thunk) + SYM_CODE_END(srso_return_thunk) + + SYM_FUNC_START(entry_untrain_ret) +- ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ +- "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ +- "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS ++ ALTERNATIVE "jmp retbleed_untrain_ret", "jmp srso_untrain_ret", X86_FEATURE_SRSO + SYM_FUNC_END(entry_untrain_ret) + __EXPORT_THUNK(entry_untrain_ret) + diff --git a/queue-6.1/x86-retpoline-do-the-necessary-fixup-to-the-zen3-4-srso-return-thunk-for-srso.patch b/queue-6.1/x86-retpoline-do-the-necessary-fixup-to-the-zen3-4-srso-return-thunk-for-srso.patch new file mode 100644 index 00000000000..93ec3fe46d6 --- /dev/null +++ b/queue-6.1/x86-retpoline-do-the-necessary-fixup-to-the-zen3-4-srso-return-thunk-for-srso.patch @@ -0,0 +1,52 @@ +From 0e110732473e14d6520e49d75d2c88ef7d46fe67 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Tue, 2 Apr 2024 16:05:49 +0200 +Subject: x86/retpoline: Do the necessary fixup to the Zen3/4 srso return thunk for !SRSO + +From: Borislav Petkov (AMD) + +commit 0e110732473e14d6520e49d75d2c88ef7d46fe67 upstream. + +The srso_alias_untrain_ret() dummy thunk in the !CONFIG_MITIGATION_SRSO +case is there only for the altenative in CALL_UNTRAIN_RET to have +a symbol to resolve. + +However, testing with kernels which don't have CONFIG_MITIGATION_SRSO +enabled, leads to the warning in patch_return() to fire: + + missing return thunk: srso_alias_untrain_ret+0x0/0x10-0x0: eb 0e 66 66 2e + WARNING: CPU: 0 PID: 0 at arch/x86/kernel/alternative.c:826 apply_returns (arch/x86/kernel/alternative.c:826 + +Put in a plain "ret" there so that gcc doesn't put a return thunk in +in its place which special and gets checked. + +In addition: + + ERROR: modpost: "srso_alias_untrain_ret" [arch/x86/kvm/kvm-amd.ko] undefined! + make[2]: *** [scripts/Makefile.modpost:145: Module.symvers] Chyba 1 + make[1]: *** [/usr/src/linux-6.8.3/Makefile:1873: modpost] Chyba 2 + make: *** [Makefile:240: __sub-make] Chyba 2 + +since !SRSO builds would use the dummy return thunk as reported by +petr.pisar@atlas.cz, https://bugzilla.kernel.org/show_bug.cgi?id=218679. + +Reported-by: kernel test robot +Closes: https://lore.kernel.org/oe-lkp/202404020901.da75a60f-oliver.sang@intel.com +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/all/202404020901.da75a60f-oliver.sang@intel.com/ +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/lib/retpoline.S | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -110,6 +110,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_ + ret + int3 + SYM_FUNC_END(srso_alias_untrain_ret) ++__EXPORT_THUNK(srso_alias_untrain_ret) + #endif + + SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)