]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop the x86 6.1 patches, will add back later in a complete set
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 13:50:50 +0000 (15:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 13:50:50 +0000 (15:50 +0200)
queue-6.1/series
queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch [deleted file]
queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch [deleted file]
queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-d.patch [deleted file]
queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-config.patch [deleted file]
queue-6.1/x86-static_call-fix-__static_call_fixup.patch [deleted file]

index a9e0476fd10ab36cb3d2aa328e80bd203f398e6f..699278928a3a06c6f87e49e84cfa4322b3945cba 100644 (file)
@@ -146,13 +146,8 @@ asoc-meson-axg-tdm-formatter-fix-channel-slot-alloca.patch
 alsa-hda-realtek-add-quirks-for-hp-g11-laptops.patch
 soc-aspeed-uart-routing-use-__sysfs_match_string.patch
 soc-aspeed-socinfo-add-kfree-for-kstrdup.patch
-x86-srso-disable-the-mitigation-on-unaffected-config.patch
-x86-cpu-fix-__x86_return_thunk-symbol-type.patch
-x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch
 alsa-hda-realtek-remodified-3k-pull-low-procedure.patch
 riscv-uaccess-return-the-number-of-bytes-effectively.patch
-x86-static_call-fix-__static_call_fixup.patch
-x86-srso-correct-the-mitigation-status-when-smt-is-d.patch
 serial-8250-fix-oops-for-port-pm-on-uart_change_pm.patch
 alsa-usb-audio-add-support-for-mythware-xa001au-capture-and-playback-interfaces.patch
 cifs-release-folio-lock-on-fscache-read-hit.patch
diff --git a/queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch b/queue-6.1/x86-cpu-fix-__x86_return_thunk-symbol-type.patch
deleted file mode 100644 (file)
index b7ef978..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-From 824485511e11da5e7c94033dab59fc541b266e3c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Aug 2023 13:44:27 +0200
-Subject: x86/cpu: Fix __x86_return_thunk symbol type
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit 77f67119004296a9b2503b377d610e08b08afc2a ]
-
-Commit
-
-  fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
-
-reimplemented __x86_return_thunk with a mix of SYM_FUNC_START and
-SYM_CODE_END, this is not a sane combination.
-
-Since nothing should ever actually 'CALL' this, make it consistently
-CODE.
-
-Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
-Link: https://lore.kernel.org/r/20230814121148.571027074@infradead.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/lib/retpoline.S | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
-diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
-index 30e76fab678a5..591d4a2419b55 100644
---- a/arch/x86/lib/retpoline.S
-+++ b/arch/x86/lib/retpoline.S
-@@ -207,7 +207,9 @@ SYM_CODE_END(srso_safe_ret)
- SYM_FUNC_END(srso_untrain_ret)
- __EXPORT_THUNK(srso_untrain_ret)
--SYM_FUNC_START(__x86_return_thunk)
-+SYM_CODE_START(__x86_return_thunk)
-+      UNWIND_HINT_FUNC
-+      ANNOTATE_NOENDBR
-       ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
-                       "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
-       int3
--- 
-2.40.1
-
diff --git a/queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch b/queue-6.1/x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch
deleted file mode 100644 (file)
index 327c693..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-From f6d1cb7cfaefec06c3b26fc414351f9700385dbc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Aug 2023 13:44:28 +0200
-Subject: x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk()
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit af023ef335f13c8b579298fc432daeef609a9e60 ]
-
-  vmlinux.o: warning: objtool: srso_untrain_ret() falls through to next function __x86_return_skl()
-  vmlinux.o: warning: objtool: __x86_return_thunk() falls through to next function __x86_return_skl()
-
-This is because these functions (can) end with CALL, which objtool
-does not consider a terminating instruction. Therefore, replace the
-INT3 instruction (which is a non-fatal trap) with UD2 (which is a
-fatal-trap).
-
-This indicates execution will not continue past this point.
-
-Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
-Link: https://lore.kernel.org/r/20230814121148.637802730@infradead.org
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/lib/retpoline.S | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
-index 591d4a2419b55..271f6560448de 100644
---- a/arch/x86/lib/retpoline.S
-+++ b/arch/x86/lib/retpoline.S
-@@ -202,7 +202,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
-       int3
-       lfence
-       call srso_safe_ret
--      int3
-+      ud2
- SYM_CODE_END(srso_safe_ret)
- SYM_FUNC_END(srso_untrain_ret)
- __EXPORT_THUNK(srso_untrain_ret)
-@@ -212,7 +212,7 @@ SYM_CODE_START(__x86_return_thunk)
-       ANNOTATE_NOENDBR
-       ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
-                       "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
--      int3
-+      ud2
- SYM_CODE_END(__x86_return_thunk)
- EXPORT_SYMBOL(__x86_return_thunk)
--- 
-2.40.1
-
diff --git a/queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-d.patch b/queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-d.patch
deleted file mode 100644 (file)
index 7350caf..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-From b705656ee1f3dc6f3d0cc32dfa341beaf6c8feee Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 15 Aug 2023 11:53:13 +0200
-Subject: x86/srso: Correct the mitigation status when SMT is disabled
-
-From: Borislav Petkov (AMD) <bp@alien8.de>
-
-[ Upstream commit 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 ]
-
-Specify how is SRSO mitigated when SMT is disabled. Also, correct the
-SMT check for that.
-
-Fixes: e9fbc47b818b ("x86/srso: Disable the mitigation on unaffected configurations")
-Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
-Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
-Link: https://lore.kernel.org/r/20230814200813.p5czl47zssuej7nv@treble
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kernel/cpu/bugs.c | 5 ++---
- 1 file changed, 2 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 97a4819ad8386..f07947f37dfd0 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2388,8 +2388,7 @@ static void __init srso_select_mitigation(void)
-                * Zen1/2 with SMT off aren't vulnerable after the right
-                * IBPB microcode has been applied.
-                */
--              if ((boot_cpu_data.x86 < 0x19) &&
--                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
-+              if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
-                       setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
-                       return;
-               }
-@@ -2675,7 +2674,7 @@ static ssize_t gds_show_state(char *buf)
- static ssize_t srso_show_state(char *buf)
- {
-       if (boot_cpu_has(X86_FEATURE_SRSO_NO))
--              return sysfs_emit(buf, "Not affected\n");
-+              return sysfs_emit(buf, "Mitigation: SMT disabled\n");
-       return sysfs_emit(buf, "%s%s\n",
-                         srso_strings[srso_mitigation],
--- 
-2.40.1
-
diff --git a/queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-config.patch b/queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-config.patch
deleted file mode 100644 (file)
index 13de215..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-From b7b59c5c2bb9ec66b57428e5594b89331f598960 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 13 Aug 2023 12:39:34 +0200
-Subject: x86/srso: Disable the mitigation on unaffected configurations
-
-From: Borislav Petkov (AMD) <bp@alien8.de>
-
-[ Upstream commit e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 ]
-
-Skip the srso cmd line parsing which is not needed on Zen1/2 with SMT
-disabled and with the proper microcode applied (latter should be the
-case anyway) as those are not affected.
-
-Fixes: 5a15d8348881 ("x86/srso: Tie SBPB bit setting to microcode patch detection")
-Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
-Link: https://lore.kernel.org/r/20230813104517.3346-1-bp@alien8.de
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kernel/cpu/bugs.c | 7 ++++++-
- 1 file changed, 6 insertions(+), 1 deletion(-)
-
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index d98f33ea57e47..97a4819ad8386 100644
---- a/arch/x86/kernel/cpu/bugs.c
-+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2389,8 +2389,10 @@ static void __init srso_select_mitigation(void)
-                * IBPB microcode has been applied.
-                */
-               if ((boot_cpu_data.x86 < 0x19) &&
--                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
-+                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
-                       setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
-+                      return;
-+              }
-       }
-       if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-@@ -2672,6 +2674,9 @@ static ssize_t gds_show_state(char *buf)
- static ssize_t srso_show_state(char *buf)
- {
-+      if (boot_cpu_has(X86_FEATURE_SRSO_NO))
-+              return sysfs_emit(buf, "Not affected\n");
-+
-       return sysfs_emit(buf, "%s%s\n",
-                         srso_strings[srso_mitigation],
-                         (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
--- 
-2.40.1
-
diff --git a/queue-6.1/x86-static_call-fix-__static_call_fixup.patch b/queue-6.1/x86-static_call-fix-__static_call_fixup.patch
deleted file mode 100644 (file)
index 2fbd429..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-From 283cdb0bcc62097bfda78afe28ac1867ddfa49bc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 16 Aug 2023 12:44:19 +0200
-Subject: x86/static_call: Fix __static_call_fixup()
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit 54097309620ef0dc2d7083783dc521c6a5fef957 ]
-
-Christian reported spurious module load crashes after some of Song's
-module memory layout patches.
-
-Turns out that if the very last instruction on the very last page of the
-module is a 'JMP __x86_return_thunk' then __static_call_fixup() will
-trip a fault and die.
-
-And while the module rework made this slightly more likely to happen,
-it's always been possible.
-
-Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding")
-Reported-by: Christian Bricart <christian@bricart.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
-Link: https://lkml.kernel.org/r/20230816104419.GA982867@hirez.programming.kicks-ass.net
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/kernel/static_call.c | 13 +++++++++++++
- 1 file changed, 13 insertions(+)
-
-diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
-index a9b54b795ebff..3fbb491688275 100644
---- a/arch/x86/kernel/static_call.c
-+++ b/arch/x86/kernel/static_call.c
-@@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
-  */
- bool __static_call_fixup(void *tramp, u8 op, void *dest)
- {
-+      unsigned long addr = (unsigned long)tramp;
-+      /*
-+       * Not all .return_sites are a static_call trampoline (most are not).
-+       * Check if the 3 bytes after the return are still kernel text, if not,
-+       * then this definitely is not a trampoline and we need not worry
-+       * further.
-+       *
-+       * This avoids the memcmp() below tripping over pagefaults etc..
-+       */
-+      if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
-+          !kernel_text_address(addr + 7))
-+              return false;
-+
-       if (memcmp(tramp+5, tramp_ud, 3)) {
-               /* Not a trampoline site, not our problem. */
-               return false;
--- 
-2.40.1
-