]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2024 10:57:39 +0000 (12:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 1 Apr 2024 10:57:39 +0000 (12:57 +0200)
added patches:
crash-use-macro-to-add-crashk_res-into-iomem-early-for-specific-arch.patch
x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch

queue-6.8/crash-use-macro-to-add-crashk_res-into-iomem-early-for-specific-arch.patch [new file with mode: 0644]
queue-6.8/series
queue-6.8/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch [new file with mode: 0644]

diff --git a/queue-6.8/crash-use-macro-to-add-crashk_res-into-iomem-early-for-specific-arch.patch b/queue-6.8/crash-use-macro-to-add-crashk_res-into-iomem-early-for-specific-arch.patch
new file mode 100644 (file)
index 0000000..1472c87
--- /dev/null
@@ -0,0 +1,98 @@
+From 32fbe5246582af4f611ccccee33fd6e559087252 Mon Sep 17 00:00:00 2001
+From: Baoquan He <bhe@redhat.com>
+Date: Mon, 25 Mar 2024 09:50:50 +0800
+Subject: crash: use macro to add crashk_res into iomem early for specific arch
+
+From: Baoquan He <bhe@redhat.com>
+
+commit 32fbe5246582af4f611ccccee33fd6e559087252 upstream.
+
+There are regression reports[1][2] that crashkernel region on x86_64 can't
+be added into iomem tree sometime.  This causes the later failure of kdump
+loading.
+
+This happened after commit 4a693ce65b18 ("kdump: defer the insertion of
+crashkernel resources") was merged.
+
+Even though, these reported issues are proved to be related to other
+component, they are just exposed after above commmit applied, I still
+would like to keep crashk_res and crashk_low_res being added into iomem
+early as before because the early adding has been always there on x86_64
+and working very well.  For safety of kdump, Let's change it back.
+
+Here, add a macro HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY to limit that
+only ARCH defining the macro can have the early adding
+crashk_res/_low_res into iomem. Then define
+HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY on x86 to enable it.
+
+Note: In reserve_crashkernel_low(), there's a remnant of crashk_low_res
+handling which was mistakenly added back in commit 85fcde402db1 ("kexec:
+split crashkernel reservation code out from crash_core.c").
+
+[1]
+[PATCH V2] x86/kexec: do not update E820 kexec table for setup_data
+https://lore.kernel.org/all/Zfv8iCL6CT2JqLIC@darkstar.users.ipa.redhat.com/T/#u
+
+[2]
+Question about Address Range Validation in Crash Kernel Allocation
+https://lore.kernel.org/all/4eeac1f733584855965a2ea62fa4da58@huawei.com/T/#u
+
+Link: https://lkml.kernel.org/r/ZgDYemRQ2jxjLkq+@MiWiFi-R3L-srv
+Fixes: 4a693ce65b18 ("kdump: defer the insertion of crashkernel resources")
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Huacai Chen <chenhuacai@loongson.cn>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Jiri Bohac <jbohac@suse.cz>
+Cc: Li Huafei <lihuafei1@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Baoquan He <bhe@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/crash_core.h |    2 ++
+ kernel/crash_core.c               |    8 ++++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/x86/include/asm/crash_core.h
++++ b/arch/x86/include/asm/crash_core.h
+@@ -39,4 +39,6 @@ static inline unsigned long crash_low_si
+ #endif
+ }
++#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
++
+ #endif /* _X86_CRASH_CORE_H */
+--- a/kernel/crash_core.c
++++ b/kernel/crash_core.c
+@@ -376,6 +376,9 @@ static int __init reserve_crashkernel_lo
+       crashk_low_res.start = low_base;
+       crashk_low_res.end   = low_base + low_size - 1;
++#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
++      insert_resource(&iomem_resource, &crashk_low_res);
++#endif
+ #endif
+       return 0;
+ }
+@@ -457,8 +460,12 @@ retry:
+       crashk_res.start = crash_base;
+       crashk_res.end = crash_base + crash_size - 1;
++#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
++      insert_resource(&iomem_resource, &crashk_res);
++#endif
+ }
++#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+ static __init int insert_crashkernel_resources(void)
+ {
+       if (crashk_res.start < crashk_res.end)
+@@ -471,6 +478,7 @@ static __init int insert_crashkernel_res
+ }
+ early_initcall(insert_crashkernel_resources);
+ #endif
++#endif
+ int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+                         void **addr, unsigned long *sz)
index 9d558eff8f70652be3e8e9f52215b89d5e72d323..a3f596dcb43876d9442fcda8477f217ac716e014 100644 (file)
@@ -384,3 +384,5 @@ scsi-qla2xxx-fix-double-free-of-the-ha-vp_map-pointer.patch
 scsi-qla2xxx-fix-double-free-of-fcport.patch
 scsi-qla2xxx-change-debug-message-during-driver-unload.patch
 scsi-qla2xxx-delay-i-o-abort-on-pci-error.patch
+x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch
+crash-use-macro-to-add-crashk_res-into-iomem-early-for-specific-arch.patch
diff --git a/queue-6.8/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch b/queue-6.8/x86-bugs-fix-the-srso-mitigation-on-zen3-4.patch
new file mode 100644 (file)
index 0000000..f72e009
--- /dev/null
@@ -0,0 +1,125 @@
+From 4535e1a4174c4111d92c5a9a21e542d232e0fcaa Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Thu, 28 Mar 2024 13:59:05 +0100
+Subject: x86/bugs: Fix the SRSO mitigation on Zen3/4
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 4535e1a4174c4111d92c5a9a21e542d232e0fcaa upstream.
+
+The original version of the mitigation would patch in the calls to the
+untraining routines directly.  That is, the alternative() in UNTRAIN_RET
+will patch in the CALL to srso_alias_untrain_ret() directly.
+
+However, even if commit e7c25c441e9e ("x86/cpu: Cleanup the untrain
+mess") meant well in trying to clean up the situation, due to micro-
+architectural reasons, the untraining routine srso_alias_untrain_ret()
+must be the target of a CALL instruction and not of a JMP instruction as
+it is done now.
+
+Reshuffle the alternative macros to accomplish that.
+
+Fixes: e7c25c441e9e ("x86/cpu: Cleanup the untrain mess")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/asm-prototypes.h |    1 +
+ arch/x86/include/asm/nospec-branch.h  |   21 ++++++++++++++++-----
+ arch/x86/lib/retpoline.S              |   11 ++++++-----
+ 3 files changed, 23 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -13,6 +13,7 @@
+ #include <asm/preempt.h>
+ #include <asm/asm.h>
+ #include <asm/gsseg.h>
++#include <asm/nospec-branch.h>
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -271,11 +271,20 @@
+ .Lskip_rsb_\@:
+ .endm
++/*
++ * The CALL to srso_alias_untrain_ret() must be patched in directly at
++ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
++ * must be the target of a CALL instruction instead of indirectly
++ * jumping to a wrapper which then calls it. Therefore, this macro is
++ * called outside of __UNTRAIN_RET below, for the time being, before the
++ * kernel can support nested alternatives with arbitrary nesting.
++ */
++.macro CALL_UNTRAIN_RET
+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+-#define CALL_UNTRAIN_RET      "call entry_untrain_ret"
+-#else
+-#define CALL_UNTRAIN_RET      ""
++      ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
++                        "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ #endif
++.endm
+ /*
+  * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
+@@ -291,8 +300,8 @@
+ .macro __UNTRAIN_RET ibpb_feature, call_depth_insns
+ #if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
+       VALIDATE_UNRET_END
+-      ALTERNATIVE_3 "",                                               \
+-                    CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
++      CALL_UNTRAIN_RET
++      ALTERNATIVE_2 "",                                               \
+                     "call entry_ibpb", \ibpb_feature,                 \
+                    __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
+ #endif
+@@ -351,6 +360,8 @@ extern void retbleed_return_thunk(void);
+ static inline void retbleed_return_thunk(void) {}
+ #endif
++extern void srso_alias_untrain_ret(void);
++
+ #ifdef CONFIG_CPU_SRSO
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -163,6 +163,7 @@ SYM_CODE_START_NOALIGN(srso_alias_untrai
+       lfence
+       jmp srso_alias_return_thunk
+ SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+       .popsection
+       .pushsection .text..__x86.rethunk_safe
+@@ -224,10 +225,12 @@ SYM_CODE_START(srso_return_thunk)
+ SYM_CODE_END(srso_return_thunk)
+ #define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
+-#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
+ #else /* !CONFIG_CPU_SRSO */
+ #define JMP_SRSO_UNTRAIN_RET "ud2"
+-#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
++/* Dummy for the alternative in CALL_UNTRAIN_RET. */
++SYM_CODE_START(srso_alias_untrain_ret)
++      RET
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif /* CONFIG_CPU_SRSO */
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+@@ -319,9 +322,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
+ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
+ SYM_FUNC_START(entry_untrain_ret)
+-      ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET,                         \
+-                    JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO,           \
+-                    JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
++      ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
+ SYM_FUNC_END(entry_untrain_ret)
+ __EXPORT_THUNK(entry_untrain_ret)