]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 15:44:05 +0000 (17:44 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 15:44:05 +0000 (17:44 +0200)
added patches:
x86-cpu-clean-up-srso-return-thunk-mess.patch
x86-cpu-rename-original-retbleed-methods.patch
x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch

queue-6.1/series
queue-6.1/x86-cpu-clean-up-srso-return-thunk-mess.patch [new file with mode: 0644]
queue-6.1/x86-cpu-rename-original-retbleed-methods.patch [new file with mode: 0644]
queue-6.1/x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch [new file with mode: 0644]

index b18d210f207e7b712e9b79a7ebdc8151d9253259..3f000f2d22b8d65773db6ad9e25a3527f6e2b9c2 100644 (file)
@@ -173,3 +173,6 @@ netfilter-set-default-timeout-to-3-secs-for-sctp-shutdown-send-and-recv-state.pa
 x86-cpu-fix-__x86_return_thunk-symbol-type.patch
 x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch
 x86-alternative-make-custom-return-thunk-unconditional.patch
+x86-cpu-clean-up-srso-return-thunk-mess.patch
+x86-cpu-rename-original-retbleed-methods.patch
+x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch
diff --git a/queue-6.1/x86-cpu-clean-up-srso-return-thunk-mess.patch b/queue-6.1/x86-cpu-clean-up-srso-return-thunk-mess.patch
new file mode 100644 (file)
index 0000000..9c0e17b
--- /dev/null
@@ -0,0 +1,321 @@
+From d43490d0ab824023e11d0b57d0aeec17a6e0ca13 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:31 +0200
+Subject: x86/cpu: Clean up SRSO return thunk mess
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit d43490d0ab824023e11d0b57d0aeec17a6e0ca13 upstream.
+
+Use the existing configurable return thunk. There is absolute no
+justification for having created this __x86_return_thunk alternative.
+
+To clarify, the whole thing looks like:
+
+Zen3/4 does:
+
+  srso_alias_untrain_ret:
+         nop2
+         lfence
+         jmp srso_alias_return_thunk
+         int3
+
+  srso_alias_safe_ret: // aliasses srso_alias_untrain_ret just so
+         add $8, %rsp
+         ret
+         int3
+
+  srso_alias_return_thunk:
+         call srso_alias_safe_ret
+         ud2
+
+While Zen1/2 does:
+
+  srso_untrain_ret:
+         movabs $foo, %rax
+         lfence
+         call srso_safe_ret           (jmp srso_return_thunk ?)
+         int3
+
+  srso_safe_ret: // embedded in movabs instruction
+         add $8,%rsp
+          ret
+          int3
+
+  srso_return_thunk:
+         call srso_safe_ret
+         ud2
+
+While retbleed does:
+
+  zen_untrain_ret:
+         test $0xcc, %bl
+         lfence
+         jmp zen_return_thunk
+          int3
+
+  zen_return_thunk: // embedded in the test instruction
+         ret
+          int3
+
+Where Zen1/2 flush the BTB entry using the instruction decoder trick
+(test,movabs) Zen3/4 use BTB aliasing. SRSO adds a return sequence
+(srso_safe_ret()) which forces the function return instruction to
+speculate into a trap (UD2).  This RET will then mispredict and
+execution will continue at the return site read from the top of the
+stack.
+
+Pick one of three options at boot (evey function can only ever return
+once).
+
+  [ bp: Fixup commit message uarch details and add them in a comment in
+    the code too. Add a comment about the srso_select_mitigation()
+    dependency on retbleed_select_mitigation(). Add moar ifdeffery for
+    32-bit builds. Add a dummy srso_untrain_ret_alias() definition for
+    32-bit alternatives needing the symbol. ]
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.842775684@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |    5 +++
+ arch/x86/kernel/cpu/bugs.c           |   17 ++++++++--
+ arch/x86/kernel/vmlinux.lds.S        |    4 +-
+ arch/x86/lib/retpoline.S             |   58 +++++++++++++++++++++++++----------
+ tools/objtool/arch/x86/decode.c      |    2 -
+ 5 files changed, 64 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -216,9 +216,14 @@ extern void __x86_return_thunk(void);
+ static inline void __x86_return_thunk(void) {}
+ #endif
++extern void zen_return_thunk(void);
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++
+ extern void zen_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_untrain_ret_alias(void);
++
+ extern void entry_ibpb(void);
+ #ifdef CONFIG_RETPOLINE
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -166,8 +166,13 @@ void __init cpu_select_mitigations(void)
+       md_clear_select_mitigation();
+       srbds_select_mitigation();
+       l1d_flush_select_mitigation();
+-      gds_select_mitigation();
++
++      /*
++       * srso_select_mitigation() depends and must run after
++       * retbleed_select_mitigation().
++       */
+       srso_select_mitigation();
++      gds_select_mitigation();
+ }
+ /*
+@@ -1015,6 +1020,9 @@ do_cmd_auto:
+               setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+               setup_force_cpu_cap(X86_FEATURE_UNRET);
++              if (IS_ENABLED(CONFIG_RETHUNK))
++                      x86_return_thunk = zen_return_thunk;
++
+               if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+                   boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+                       pr_err(RETBLEED_UNTRAIN_MSG);
+@@ -2422,10 +2430,13 @@ static void __init srso_select_mitigatio
+                        */
+                       setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+-                      if (boot_cpu_data.x86 == 0x19)
++                      if (boot_cpu_data.x86 == 0x19) {
+                               setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+-                      else
++                              x86_return_thunk = srso_alias_return_thunk;
++                      } else {
+                               setup_force_cpu_cap(X86_FEATURE_SRSO);
++                              x86_return_thunk = srso_return_thunk;
++                      }
+                       srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+               } else {
+                       pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+            "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+- #ifdef CONFIG_RETHUNK
+-. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++#ifdef CONFIG_RETHUNK
++. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -94,22 +94,27 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+       .section .text.__x86.rethunk_untrain
+ SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++      UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+       ASM_NOP2
+       lfence
+-      jmp __x86_return_thunk
++      jmp srso_alias_return_thunk
+ SYM_FUNC_END(srso_untrain_ret_alias)
+ __EXPORT_THUNK(srso_untrain_ret_alias)
+       .section .text.__x86.rethunk_safe
++#else
++/* dummy definition for alternatives */
++SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++      ANNOTATE_UNRET_SAFE
++      ret
++      int3
++SYM_FUNC_END(srso_untrain_ret_alias)
+ #endif
+-/* Needs a definition for the __x86_return_thunk alternative below. */
+ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+-#ifdef CONFIG_CPU_SRSO
+       add $8, %_ASM_SP
+       UNWIND_HINT_FUNC
+-#endif
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
+@@ -117,9 +122,16 @@ SYM_FUNC_END(srso_safe_ret_alias)
+       .section .text.__x86.return_thunk
++SYM_CODE_START(srso_alias_return_thunk)
++      UNWIND_HINT_FUNC
++      ANNOTATE_NOENDBR
++      call srso_safe_ret_alias
++      ud2
++SYM_CODE_END(srso_alias_return_thunk)
++
+ /*
+  * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for
+  *    alignment within the BTB.
+  * 2) The instruction at zen_untrain_ret must contain, and not
+  *    end with, the 0xc3 byte of the RET.
+@@ -127,7 +139,7 @@ SYM_FUNC_END(srso_safe_ret_alias)
+  *    from re-poisioning the BTB prediction.
+  */
+       .align 64
+-      .skip 64 - (__ret - zen_untrain_ret), 0xcc
++      .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc
+ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       ANNOTATE_NOENDBR
+       /*
+@@ -135,16 +147,16 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL,
+        *
+        *   TEST $0xcc, %bl
+        *   LFENCE
+-       *   JMP __x86_return_thunk
++       *   JMP zen_return_thunk
+        *
+        * Executing the TEST instruction has a side effect of evicting any BTB
+        * prediction (potentially attacker controlled) attached to the RET, as
+-       * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
++       * zen_return_thunk + 1 isn't an instruction boundary at the moment.
+        */
+       .byte   0xf6
+       /*
+-       * As executed from __x86_return_thunk, this is a plain RET.
++       * As executed from zen_return_thunk, this is a plain RET.
+        *
+        * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+        *
+@@ -156,13 +168,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL,
+        * With SMT enabled and STIBP active, a sibling thread cannot poison
+        * RET's prediction to a type of its choice, but can evict the
+        * prediction due to competitive sharing. If the prediction is
+-       * evicted, __x86_return_thunk will suffer Straight Line Speculation
++       * evicted, zen_return_thunk will suffer Straight Line Speculation
+        * which will be contained safely by the INT3.
+        */
+-SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
++SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL)
+       ret
+       int3
+-SYM_CODE_END(__ret)
++SYM_CODE_END(zen_return_thunk)
+       /*
+        * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -173,7 +185,7 @@ SYM_CODE_END(__ret)
+        * Jump back and execute the RET in the middle of the TEST instruction.
+        * INT3 is for SLS protection.
+        */
+-      jmp __ret
++      jmp zen_return_thunk
+       int3
+ SYM_FUNC_END(zen_untrain_ret)
+ __EXPORT_THUNK(zen_untrain_ret)
+@@ -194,12 +206,19 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL
+       ANNOTATE_NOENDBR
+       .byte 0x48, 0xb8
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below).  This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+       add $8, %_ASM_SP
+       ret
+       int3
+       int3
+       int3
++      /* end of movabs */
+       lfence
+       call srso_safe_ret
+       ud2
+@@ -207,12 +226,19 @@ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+-SYM_CODE_START(__x86_return_thunk)
++SYM_CODE_START(srso_return_thunk)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+-      ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+-                      "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++      call srso_safe_ret
+       ud2
++SYM_CODE_END(srso_return_thunk)
++
++SYM_CODE_START(__x86_return_thunk)
++      UNWIND_HINT_FUNC
++      ANNOTATE_NOENDBR
++      ANNOTATE_UNRET_SAFE
++      ret
++      int3
+ SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -799,5 +799,5 @@ bool arch_is_rethunk(struct symbol *sym)
+       return !strcmp(sym->name, "__x86_return_thunk") ||
+              !strcmp(sym->name, "srso_untrain_ret") ||
+              !strcmp(sym->name, "srso_safe_ret") ||
+-             !strcmp(sym->name, "__ret");
++             !strcmp(sym->name, "zen_return_thunk");
+ }
diff --git a/queue-6.1/x86-cpu-rename-original-retbleed-methods.patch b/queue-6.1/x86-cpu-rename-original-retbleed-methods.patch
new file mode 100644 (file)
index 0000000..283f61e
--- /dev/null
@@ -0,0 +1,182 @@
+From d025b7bac07a6e90b6b98b487f88854ad9247c39 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:32 +0200
+Subject: x86/cpu: Rename original retbleed methods
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit d025b7bac07a6e90b6b98b487f88854ad9247c39 upstream.
+
+Rename the original retbleed return thunk and untrain_ret to
+retbleed_return_thunk() and retbleed_untrain_ret().
+
+No functional changes.
+
+Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |    8 ++++----
+ arch/x86/kernel/cpu/bugs.c           |    2 +-
+ arch/x86/kernel/vmlinux.lds.S        |    2 +-
+ arch/x86/lib/retpoline.S             |   30 +++++++++++++++---------------
+ tools/objtool/arch/x86/decode.c      |    2 +-
+ tools/objtool/check.c                |    2 +-
+ 6 files changed, 23 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -168,7 +168,7 @@
+ .endm
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET  "call zen_untrain_ret"
++#define CALL_ZEN_UNTRAIN_RET  "call retbleed_untrain_ret"
+ #else
+ #define CALL_ZEN_UNTRAIN_RET  ""
+ #endif
+@@ -178,7 +178,7 @@
+  * return thunk isn't mapped into the userspace tables (then again, AMD
+  * typically has NO_MELTDOWN).
+  *
+- * While zen_untrain_ret() doesn't clobber anything but requires stack,
++ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+  * entry_ibpb() will clobber AX, CX, DX.
+  *
+  * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+@@ -216,11 +216,11 @@ extern void __x86_return_thunk(void);
+ static inline void __x86_return_thunk(void) {}
+ #endif
+-extern void zen_return_thunk(void);
++extern void retbleed_return_thunk(void);
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+-extern void zen_untrain_ret(void);
++extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_untrain_ret_alias(void);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1021,7 +1021,7 @@ do_cmd_auto:
+               setup_force_cpu_cap(X86_FEATURE_UNRET);
+               if (IS_ENABLED(CONFIG_RETHUNK))
+-                      x86_return_thunk = zen_return_thunk;
++                      x86_return_thunk = retbleed_return_thunk;
+               if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+                   boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -508,7 +508,7 @@ INIT_PER_CPU(irq_stack_backing_store);
+ #endif
+ #ifdef CONFIG_RETHUNK
+-. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned");
++. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -131,32 +131,32 @@ SYM_CODE_END(srso_alias_return_thunk)
+ /*
+  * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+  *    alignment within the BTB.
+- * 2) The instruction at zen_untrain_ret must contain, and not
++ * 2) The instruction at retbleed_untrain_ret must contain, and not
+  *    end with, the 0xc3 byte of the RET.
+  * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+  *    from re-poisioning the BTB prediction.
+  */
+       .align 64
+-      .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc
+-SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++      .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
++SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       ANNOTATE_NOENDBR
+       /*
+-       * As executed from zen_untrain_ret, this is:
++       * As executed from retbleed_untrain_ret, this is:
+        *
+        *   TEST $0xcc, %bl
+        *   LFENCE
+-       *   JMP zen_return_thunk
++       *   JMP retbleed_return_thunk
+        *
+        * Executing the TEST instruction has a side effect of evicting any BTB
+        * prediction (potentially attacker controlled) attached to the RET, as
+-       * zen_return_thunk + 1 isn't an instruction boundary at the moment.
++       * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
+        */
+       .byte   0xf6
+       /*
+-       * As executed from zen_return_thunk, this is a plain RET.
++       * As executed from retbleed_return_thunk, this is a plain RET.
+        *
+        * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+        *
+@@ -168,13 +168,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL,
+        * With SMT enabled and STIBP active, a sibling thread cannot poison
+        * RET's prediction to a type of its choice, but can evict the
+        * prediction due to competitive sharing. If the prediction is
+-       * evicted, zen_return_thunk will suffer Straight Line Speculation
++       * evicted, retbleed_return_thunk will suffer Straight Line Speculation
+        * which will be contained safely by the INT3.
+        */
+-SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL)
++SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
+       ret
+       int3
+-SYM_CODE_END(zen_return_thunk)
++SYM_CODE_END(retbleed_return_thunk)
+       /*
+        * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -185,13 +185,13 @@ SYM_CODE_END(zen_return_thunk)
+        * Jump back and execute the RET in the middle of the TEST instruction.
+        * INT3 is for SLS protection.
+        */
+-      jmp zen_return_thunk
++      jmp retbleed_return_thunk
+       int3
+-SYM_FUNC_END(zen_untrain_ret)
+-__EXPORT_THUNK(zen_untrain_ret)
++SYM_FUNC_END(retbleed_untrain_ret)
++__EXPORT_THUNK(retbleed_untrain_ret)
+ /*
+- * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+  * above. On kernel entry, srso_untrain_ret() is executed which is a
+  *
+  * movabs $0xccccccc308c48348,%rax
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -799,5 +799,5 @@ bool arch_is_rethunk(struct symbol *sym)
+       return !strcmp(sym->name, "__x86_return_thunk") ||
+              !strcmp(sym->name, "srso_untrain_ret") ||
+              !strcmp(sym->name, "srso_safe_ret") ||
+-             !strcmp(sym->name, "zen_return_thunk");
++             !strcmp(sym->name, "retbleed_return_thunk");
+ }
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1430,7 +1430,7 @@ static int add_jump_destinations(struct
+                       struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+                       /*
+-                       * This is a special case for zen_untrain_ret().
++                       * This is a special case for retbleed_untrain_ret().
+                        * It jumps to __x86_return_thunk(), but objtool
+                        * can't find the thunk's starting RET
+                        * instruction, because the RET is also in the
diff --git a/queue-6.1/x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch b/queue-6.1/x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch
new file mode 100644 (file)
index 0000000..a2964b7
--- /dev/null
@@ -0,0 +1,155 @@
+From 42be649dd1f2eee6b1fb185f1a231b9494cf095f Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:33 +0200
+Subject: x86/cpu: Rename srso_(.*)_alias to srso_alias_\1
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 42be649dd1f2eee6b1fb185f1a231b9494cf095f upstream.
+
+For a more consistent namespace.
+
+  [ bp: Fixup names in the doc too. ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/srso.rst |    4 ++--
+ arch/x86/include/asm/nospec-branch.h       |    4 ++--
+ arch/x86/kernel/vmlinux.lds.S              |    8 ++++----
+ arch/x86/lib/retpoline.S                   |   26 +++++++++++++-------------
+ 4 files changed, 21 insertions(+), 21 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -124,8 +124,8 @@ sequence.
+ To ensure the safety of this mitigation, the kernel must ensure that the
+ safe return sequence is itself free from attacker interference.  In Zen3
+ and Zen4, this is accomplished by creating a BTB alias between the
+-untraining function srso_untrain_ret_alias() and the safe return
+-function srso_safe_ret_alias() which results in evicting a potentially
++untraining function srso_alias_untrain_ret() and the safe return
++function srso_alias_safe_ret() which results in evicting a potentially
+ poisoned BTB entry and using that safe one for all function returns.
+ In older Zen1 and Zen2, this is accomplished using a reinterpretation
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -195,7 +195,7 @@
+ #ifdef CONFIG_CPU_SRSO
+       ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+-                        "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
++                        "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ #endif
+ .endm
+@@ -222,7 +222,7 @@ extern void srso_alias_return_thunk(void
+ extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+-extern void srso_untrain_ret_alias(void);
++extern void srso_alias_untrain_ret(void);
+ extern void entry_ibpb(void);
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -141,10 +141,10 @@ SECTIONS
+ #ifdef CONFIG_CPU_SRSO
+               /*
+-               * See the comment above srso_untrain_ret_alias()'s
++               * See the comment above srso_alias_untrain_ret()'s
+                * definition.
+                */
+-              . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++              . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+               *(.text.__x86.rethunk_safe)
+ #endif
+               ALIGN_ENTRY_TEXT_END
+@@ -523,8 +523,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+  * Instead do: (A | B) - (A & B) in order to compute the XOR
+  * of the two function addresses:
+  */
+-. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
+-              (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
++              (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+               "SRSO function pair won't alias");
+ #endif
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -76,56 +76,56 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ #ifdef CONFIG_RETHUNK
+ /*
+- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+  * special addresses:
+  *
+- * - srso_untrain_ret_alias() is 2M aligned
+- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * - srso_alias_untrain_ret() is 2M aligned
++ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+  * and 20 in its virtual address are set (while those bits in the
+- * srso_untrain_ret_alias() function are cleared).
++ * srso_alias_untrain_ret() function are cleared).
+  *
+  * This guarantees that those two addresses will alias in the branch
+  * target buffer of Zen3/4 generations, leading to any potential
+  * poisoned entries at that BTB slot to get evicted.
+  *
+- * As a result, srso_safe_ret_alias() becomes a safe return.
++ * As a result, srso_alias_safe_ret() becomes a safe return.
+  */
+ #ifdef CONFIG_CPU_SRSO
+       .section .text.__x86.rethunk_untrain
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+       ASM_NOP2
+       lfence
+       jmp srso_alias_return_thunk
+-SYM_FUNC_END(srso_untrain_ret_alias)
+-__EXPORT_THUNK(srso_untrain_ret_alias)
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+       .section .text.__x86.rethunk_safe
+ #else
+ /* dummy definition for alternatives */
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
+-SYM_FUNC_END(srso_untrain_ret_alias)
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       add $8, %_ASM_SP
+       UNWIND_HINT_FUNC
+       ANNOTATE_UNRET_SAFE
+       ret
+       int3
+-SYM_FUNC_END(srso_safe_ret_alias)
++SYM_FUNC_END(srso_alias_safe_ret)
+       .section .text.__x86.return_thunk
+ SYM_CODE_START(srso_alias_return_thunk)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
+-      call srso_safe_ret_alias
++      call srso_alias_safe_ret
+       ud2
+ SYM_CODE_END(srso_alias_return_thunk)