]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 16:22:51 +0000 (18:22 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Aug 2023 16:22:51 +0000 (18:22 +0200)
added patches:
objtool-x86-fixup-frame-pointer-vs-rethunk.patch
x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch
x86-cpu-cleanup-the-untrain-mess.patch
x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch
x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch
x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch
x86-srso-disable-the-mitigation-on-unaffected-configurations.patch
x86-srso-explain-the-untraining-sequences-a-bit-more.patch
x86-static_call-fix-__static_call_fixup.patch

queue-6.1/objtool-x86-fixup-frame-pointer-vs-rethunk.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch [new file with mode: 0644]
queue-6.1/x86-cpu-cleanup-the-untrain-mess.patch [new file with mode: 0644]
queue-6.1/x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch [new file with mode: 0644]
queue-6.1/x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch [new file with mode: 0644]
queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch [new file with mode: 0644]
queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-configurations.patch [new file with mode: 0644]
queue-6.1/x86-srso-explain-the-untraining-sequences-a-bit-more.patch [new file with mode: 0644]
queue-6.1/x86-static_call-fix-__static_call_fixup.patch [new file with mode: 0644]

diff --git a/queue-6.1/objtool-x86-fixup-frame-pointer-vs-rethunk.patch b/queue-6.1/objtool-x86-fixup-frame-pointer-vs-rethunk.patch
new file mode 100644 (file)
index 0000000..9946662
--- /dev/null
@@ -0,0 +1,62 @@
+From dbf46008775516f7f25c95b7760041c286299783 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 16 Aug 2023 13:59:21 +0200
+Subject: objtool/x86: Fixup frame-pointer vs rethunk
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit dbf46008775516f7f25c95b7760041c286299783 upstream.
+
+For stack-validation of a frame-pointer build, objtool validates that
+every CALL instruction is preceded by a frame-setup. The new SRSO
+return thunks violate this with their RSB stuffing trickery.
+
+Extend the __fentry__ exception to also cover the embedded_insn case
+used for this. This cures:
+
+  vmlinux.o: warning: objtool: srso_untrain_ret+0xd: call without frame pointer save/setup
+
+Fixes: 4ae68b26c3ab ("objtool/x86: Fix SRSO mess")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lore.kernel.org/r/20230816115921.GH980931@hirez.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/check.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2450,12 +2450,17 @@ static int decode_sections(struct objtoo
+       return 0;
+ }
+-static bool is_fentry_call(struct instruction *insn)
++static bool is_special_call(struct instruction *insn)
+ {
+-      if (insn->type == INSN_CALL &&
+-          insn->call_dest &&
+-          insn->call_dest->fentry)
+-              return true;
++      if (insn->type == INSN_CALL) {
++              struct symbol *dest = insn->call_dest;
++
++              if (!dest)
++                      return false;
++
++              if (dest->fentry)
++                      return true;
++      }
+       return false;
+ }
+@@ -3448,7 +3453,7 @@ static int validate_branch(struct objtoo
+                       if (ret)
+                               return ret;
+-                      if (opts.stackval && func && !is_fentry_call(insn) &&
++                      if (opts.stackval && func && !is_special_call(insn) &&
+                           !has_valid_stack_frame(&state)) {
+                               WARN_FUNC("call without frame pointer save/setup",
+                                         sec, insn->offset);
index 3f000f2d22b8d65773db6ad9e25a3527f6e2b9c2..38380da39907904b94f40503f4a941ff3cd2dc90 100644 (file)
@@ -176,3 +176,12 @@ x86-alternative-make-custom-return-thunk-unconditional.patch
 x86-cpu-clean-up-srso-return-thunk-mess.patch
 x86-cpu-rename-original-retbleed-methods.patch
 x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch
+x86-cpu-cleanup-the-untrain-mess.patch
+x86-srso-explain-the-untraining-sequences-a-bit-more.patch
+x86-static_call-fix-__static_call_fixup.patch
+x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch
+x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch
+x86-srso-disable-the-mitigation-on-unaffected-configurations.patch
+x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch
+objtool-x86-fixup-frame-pointer-vs-rethunk.patch
+x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch
diff --git a/queue-6.1/x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch b/queue-6.1/x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch
new file mode 100644 (file)
index 0000000..75377e0
--- /dev/null
@@ -0,0 +1,73 @@
+From f58d6fbcb7c848b7f2469be339bc571f2e9d245b Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Fri, 11 Aug 2023 23:38:24 +0200
+Subject: x86/CPU/AMD: Fix the DIV(0) initial fix attempt
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream.
+
+Initially, it was thought that doing an innocuous division in the #DE
+handler would take care to prevent any leaking of old data from the
+divider but by the time the fault is raised, the speculation has already
+advanced too far and such data could already have been used by younger
+operations.
+
+Therefore, do the innocuous division on every exit to userspace so that
+userspace doesn't see any potentially old data from integer divisions in
+kernel space.
+
+Do the same before VMRUN too, to protect host data from leaking into the
+guest too.
+
+Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/entry-common.h |    1 +
+ arch/x86/kernel/cpu/amd.c           |    1 +
+ arch/x86/kernel/traps.c             |    2 --
+ arch/x86/kvm/svm/svm.c              |    2 ++
+ 4 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mod
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+       mds_user_clear_cpu_buffers();
++      amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1295,3 +1295,4 @@ void noinstr amd_clear_divider(void)
+       asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+                    :: "a" (0), "d" (0), "r" (1));
+ }
++EXPORT_SYMBOL_GPL(amd_clear_divider);
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+       do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+                     FPE_INTDIV, error_get_trap_addr(regs));
+-
+-      amd_clear_divider();
+ }
+ DEFINE_IDTENTRY(exc_overflow)
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3947,6 +3947,8 @@ static noinstr void svm_vcpu_enter_exit(
+       guest_state_enter_irqoff();
++      amd_clear_divider();
++
+       if (sev_es_guest(vcpu->kvm))
+               __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+       else
diff --git a/queue-6.1/x86-cpu-cleanup-the-untrain-mess.patch b/queue-6.1/x86-cpu-cleanup-the-untrain-mess.patch
new file mode 100644 (file)
index 0000000..d02f293
--- /dev/null
@@ -0,0 +1,91 @@
+From e7c25c441e9e0fa75b4c83e0b26306b702cfe90d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:34 +0200
+Subject: x86/cpu: Cleanup the untrain mess
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit e7c25c441e9e0fa75b4c83e0b26306b702cfe90d upstream.
+
+Since there can only be one active return_thunk, there only needs be
+one (matching) untrain_ret. It fundamentally doesn't make sense to
+allow multiple untrain_ret at the same time.
+
+Fold all the 3 different untrain methods into a single (temporary)
+helper stub.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |   12 ++++--------
+ arch/x86/kernel/cpu/bugs.c           |    1 +
+ arch/x86/lib/retpoline.S             |    7 +++++++
+ 3 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -168,9 +168,9 @@
+ .endm
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET  "call retbleed_untrain_ret"
++#define CALL_UNTRAIN_RET      "call entry_untrain_ret"
+ #else
+-#define CALL_ZEN_UNTRAIN_RET  ""
++#define CALL_UNTRAIN_RET      ""
+ #endif
+ /*
+@@ -189,14 +189,9 @@
+       defined(CONFIG_CPU_SRSO)
+       ANNOTATE_UNRET_END
+       ALTERNATIVE_2 "",                                               \
+-                    CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET,          \
++                    CALL_UNTRAIN_RET, X86_FEATURE_UNRET,              \
+                     "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
+-
+-#ifdef CONFIG_CPU_SRSO
+-      ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+-                        "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+-#endif
+ .endm
+ #else /* __ASSEMBLY__ */
+@@ -224,6 +219,7 @@ extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_alias_untrain_ret(void);
++extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+ #ifdef CONFIG_RETPOLINE
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2429,6 +2429,7 @@ static void __init srso_select_mitigatio
+                        * like ftrace, static_call, etc.
+                        */
+                       setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++                      setup_force_cpu_cap(X86_FEATURE_UNRET);
+                       if (boot_cpu_data.x86 == 0x19) {
+                               setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -233,6 +233,13 @@ SYM_CODE_START(srso_return_thunk)
+       ud2
+ SYM_CODE_END(srso_return_thunk)
++SYM_FUNC_START(entry_untrain_ret)
++      ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
++                    "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
++                    "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++SYM_FUNC_END(entry_untrain_ret)
++__EXPORT_THUNK(entry_untrain_ret)
++
+ SYM_CODE_START(__x86_return_thunk)
+       UNWIND_HINT_FUNC
+       ANNOTATE_NOENDBR
diff --git a/queue-6.1/x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch b/queue-6.1/x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch
new file mode 100644 (file)
index 0000000..d541862
--- /dev/null
@@ -0,0 +1,111 @@
+From ba5ca5e5e6a1d55923e88b4a83da452166f5560e Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 11 Aug 2023 08:52:55 -0700
+Subject: x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ba5ca5e5e6a1d55923e88b4a83da452166f5560e upstream.
+
+Use LEA instead of ADD when adjusting %rsp in srso_safe_ret{,_alias}()
+so as to avoid clobbering flags.  Drop one of the INT3 instructions to
+account for the LEA consuming one more byte than the ADD.
+
+KVM's emulator makes indirect calls into a jump table of sorts, where
+the destination of each call is a small blob of code that performs fast
+emulation by executing the target instruction with fixed operands.
+
+E.g. to emulate ADC, fastop() invokes adcb_al_dl():
+
+  adcb_al_dl:
+    <+0>:  adc    %dl,%al
+    <+2>:  jmp    <__x86_return_thunk>
+
+A major motivation for doing fast emulation is to leverage the CPU to
+handle consumption and manipulation of arithmetic flags, i.e. RFLAGS is
+both an input and output to the target of the call.  fastop() collects
+the RFLAGS result by pushing RFLAGS onto the stack and popping them back
+into a variable (held in %rdi in this case):
+
+  asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
+
+  <+71>: mov    0xc0(%r8),%rdx
+  <+78>: mov    0x100(%r8),%rcx
+  <+85>: push   %rdi
+  <+86>: popf
+  <+87>: call   *%rsi
+  <+89>: nop
+  <+90>: nop
+  <+91>: nop
+  <+92>: pushf
+  <+93>: pop    %rdi
+
+and then propagating the arithmetic flags into the vCPU's emulator state:
+
+  ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
+
+  <+64>:  and    $0xfffffffffffff72a,%r9
+  <+94>:  and    $0x8d5,%edi
+  <+109>: or     %rdi,%r9
+  <+122>: mov    %r9,0x10(%r8)
+
+The failures can be most easily reproduced by running the "emulator"
+test in KVM-Unit-Tests.
+
+If you're feeling a bit of deja vu, see commit b63f20a778c8
+("x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386").
+
+In addition, this breaks booting of clang-compiled guest on
+a gcc-compiled host where the host contains the %rsp-modifying SRSO
+mitigations.
+
+  [ bp: Massage commit message, extend, remove addresses. ]
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Closes: https://lore.kernel.org/all/de474347-122d-54cd-eabf-9dcc95ab9eae@amd.com
+Reported-by: Srikanth Aithal <sraithal@amd.com>
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/20230810013334.GA5354@dev-arch.thelio-3990X/
+Link: https://lore.kernel.org/r/20230811155255.250835-1-seanjc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -113,7 +113,7 @@ SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+-      add $8, %_ASM_SP
++      lea 8(%_ASM_SP), %_ASM_SP
+       UNWIND_HINT_FUNC
+       ANNOTATE_UNRET_SAFE
+       ret
+@@ -213,7 +213,7 @@ __EXPORT_THUNK(retbleed_untrain_ret)
+  * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+  * above. On kernel entry, srso_untrain_ret() is executed which is a
+  *
+- * movabs $0xccccccc308c48348,%rax
++ * movabs $0xccccc30824648d48,%rax
+  *
+  * and when the return thunk executes the inner label srso_safe_ret()
+  * later, it is a stack manipulation and a RET which is mispredicted and
+@@ -232,11 +232,10 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL
+  * the stack.
+  */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+-      add $8, %_ASM_SP
++      lea 8(%_ASM_SP), %_ASM_SP
+       ret
+       int3
+       int3
+-      int3
+       /* end of movabs */
+       lfence
+       call srso_safe_ret
diff --git a/queue-6.1/x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch b/queue-6.1/x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch
new file mode 100644 (file)
index 0000000..462ea72
--- /dev/null
@@ -0,0 +1,132 @@
+From 79cd2a11224eab86d6673fe8a11d2046ae9d2757 Mon Sep 17 00:00:00 2001
+From: Petr Pavlu <petr.pavlu@suse.com>
+Date: Tue, 11 Jul 2023 11:19:51 +0200
+Subject: x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+commit 79cd2a11224eab86d6673fe8a11d2046ae9d2757 upstream.
+
+The linker script arch/x86/kernel/vmlinux.lds.S matches the thunk
+sections ".text.__x86.*" from arch/x86/lib/retpoline.S as follows:
+
+  .text {
+    [...]
+    TEXT_TEXT
+    [...]
+    __indirect_thunk_start = .;
+    *(.text.__x86.*)
+    __indirect_thunk_end = .;
+    [...]
+  }
+
+Macro TEXT_TEXT references TEXT_MAIN which normally expands to only
+".text". However, with CONFIG_LTO_CLANG, TEXT_MAIN becomes
+".text .text.[0-9a-zA-Z_]*" which wrongly matches also the thunk
+sections. The output layout is then different than expected. For
+instance, the currently defined range [__indirect_thunk_start,
+__indirect_thunk_end] becomes empty.
+
+Prevent the problem by using ".." as the first separator, for example,
+".text..__x86.indirect_thunk". This pattern is utilized by other
+explicit section names which start with one of the standard prefixes,
+such as ".text" or ".data", and that need to be individually selected in
+the linker script.
+
+  [ nathan: Fix conflicts with SRSO and fold in fix issue brought up by
+    Andrew Cooper in post-review:
+    https://lore.kernel.org/20230803230323.1478869-1-andrew.cooper3@citrix.com ]
+
+Fixes: dc5723b02e52 ("kbuild: add support for Clang LTO")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230711091952.27944-2-petr.pavlu@suse.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/vmlinux.lds.S |    8 ++++----
+ arch/x86/lib/retpoline.S      |    8 ++++----
+ tools/objtool/check.c         |    2 +-
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -134,7 +134,7 @@ SECTIONS
+               KPROBES_TEXT
+               ALIGN_ENTRY_TEXT_BEGIN
+ #ifdef CONFIG_CPU_SRSO
+-              *(.text.__x86.rethunk_untrain)
++              *(.text..__x86.rethunk_untrain)
+ #endif
+               ENTRY_TEXT
+@@ -145,7 +145,7 @@ SECTIONS
+                * definition.
+                */
+               . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+-              *(.text.__x86.rethunk_safe)
++              *(.text..__x86.rethunk_safe)
+ #endif
+               ALIGN_ENTRY_TEXT_END
+               SOFTIRQENTRY_TEXT
+@@ -154,8 +154,8 @@ SECTIONS
+ #ifdef CONFIG_RETPOLINE
+               __indirect_thunk_start = .;
+-              *(.text.__x86.indirect_thunk)
+-              *(.text.__x86.return_thunk)
++              *(.text..__x86.indirect_thunk)
++              *(.text..__x86.return_thunk)
+               __indirect_thunk_end = .;
+ #endif
+       } :text =0xcccc
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -11,7 +11,7 @@
+ #include <asm/frame.h>
+ #include <asm/nops.h>
+-      .section .text.__x86.indirect_thunk
++      .section .text..__x86.indirect_thunk
+ .macro RETPOLINE reg
+       ANNOTATE_INTRA_FUNCTION_CALL
+@@ -91,7 +91,7 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+  * As a result, srso_alias_safe_ret() becomes a safe return.
+  */
+ #ifdef CONFIG_CPU_SRSO
+-      .section .text.__x86.rethunk_untrain
++      .section .text..__x86.rethunk_untrain
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+       UNWIND_HINT_FUNC
+@@ -102,7 +102,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_
+ SYM_FUNC_END(srso_alias_untrain_ret)
+ __EXPORT_THUNK(srso_alias_untrain_ret)
+-      .section .text.__x86.rethunk_safe
++      .section .text..__x86.rethunk_safe
+ #else
+ /* dummy definition for alternatives */
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+@@ -120,7 +120,7 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLO
+       int3
+ SYM_FUNC_END(srso_alias_safe_ret)
+-      .section .text.__x86.return_thunk
++      .section .text..__x86.return_thunk
+ SYM_CODE_START(srso_alias_return_thunk)
+       UNWIND_HINT_FUNC
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -379,7 +379,7 @@ static int decode_instructions(struct ob
+               if (!strcmp(sec->name, ".noinstr.text") ||
+                   !strcmp(sec->name, ".entry.text") ||
+-                  !strncmp(sec->name, ".text.__x86.", 12))
++                  !strncmp(sec->name, ".text..__x86.", 13))
+                       sec->noinstr = true;
+               for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
diff --git a/queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch b/queue-6.1/x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch
new file mode 100644 (file)
index 0000000..8d338f7
--- /dev/null
@@ -0,0 +1,43 @@
+From 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Tue, 15 Aug 2023 11:53:13 +0200
+Subject: x86/srso: Correct the mitigation status when SMT is disabled
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 upstream.
+
+Specify how is SRSO mitigated when SMT is disabled. Also, correct the
+SMT check for that.
+
+Fixes: e9fbc47b818b ("x86/srso: Disable the mitigation on unaffected configurations")
+Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lore.kernel.org/r/20230814200813.p5czl47zssuej7nv@treble
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2398,8 +2398,7 @@ static void __init srso_select_mitigatio
+                * Zen1/2 with SMT off aren't vulnerable after the right
+                * IBPB microcode has been applied.
+                */
+-              if ((boot_cpu_data.x86 < 0x19) &&
+-                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
++              if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+                       setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+                       return;
+               }
+@@ -2689,7 +2688,7 @@ static ssize_t gds_show_state(char *buf)
+ static ssize_t srso_show_state(char *buf)
+ {
+       if (boot_cpu_has(X86_FEATURE_SRSO_NO))
+-              return sysfs_emit(buf, "Not affected\n");
++              return sysfs_emit(buf, "Mitigation: SMT disabled\n");
+       return sysfs_emit(buf, "%s%s\n",
+                         srso_strings[srso_mitigation],
diff --git a/queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-configurations.patch b/queue-6.1/x86-srso-disable-the-mitigation-on-unaffected-configurations.patch
new file mode 100644 (file)
index 0000000..b82bfd1
--- /dev/null
@@ -0,0 +1,45 @@
+From e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Sun, 13 Aug 2023 12:39:34 +0200
+Subject: x86/srso: Disable the mitigation on unaffected configurations
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 upstream.
+
+Skip the srso cmd line parsing which is not needed on Zen1/2 with SMT
+disabled and with the proper microcode applied (latter should be the
+case anyway) as those are not affected.
+
+Fixes: 5a15d8348881 ("x86/srso: Tie SBPB bit setting to microcode patch detection")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230813104517.3346-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/bugs.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2399,8 +2399,10 @@ static void __init srso_select_mitigatio
+                * IBPB microcode has been applied.
+                */
+               if ((boot_cpu_data.x86 < 0x19) &&
+-                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
++                  (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
+                       setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
++                      return;
++              }
+       }
+       if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2686,6 +2688,9 @@ static ssize_t gds_show_state(char *buf)
+ static ssize_t srso_show_state(char *buf)
+ {
++      if (boot_cpu_has(X86_FEATURE_SRSO_NO))
++              return sysfs_emit(buf, "Not affected\n");
++
+       return sysfs_emit(buf, "%s%s\n",
+                         srso_strings[srso_mitigation],
+                         (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
diff --git a/queue-6.1/x86-srso-explain-the-untraining-sequences-a-bit-more.patch b/queue-6.1/x86-srso-explain-the-untraining-sequences-a-bit-more.patch
new file mode 100644 (file)
index 0000000..c29b78d
--- /dev/null
@@ -0,0 +1,46 @@
+From 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Mon, 14 Aug 2023 21:29:50 +0200
+Subject: x86/srso: Explain the untraining sequences a bit more
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e upstream.
+
+The goal is to eventually have a proper documentation about all this.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814164447.GFZNpZ/64H4lENIe94@fat_crate.local
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S |   19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -130,6 +130,25 @@ SYM_CODE_START(srso_alias_return_thunk)
+ SYM_CODE_END(srso_alias_return_thunk)
+ /*
++ * Some generic notes on the untraining sequences:
++ *
++ * They are interchangeable when it comes to flushing potentially wrong
++ * RET predictions from the BTB.
++ *
++ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
++ * Retbleed sequence because the return sequence done there
++ * (srso_safe_ret()) is longer and the return sequence must fully nest
++ * (end before) the untraining sequence. Therefore, the untraining
++ * sequence must fully overlap the return sequence.
++ *
++ * Regarding alignment - the instructions which need to be untrained,
++ * must all start at a cacheline boundary for Zen1/2 generations. That
++ * is, instruction sequences starting at srso_safe_ret() and
++ * the respective instruction sequences at retbleed_return_thunk()
++ * must start at a cacheline boundary.
++ */
++
++/*
+  * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+  * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+  *    alignment within the BTB.
diff --git a/queue-6.1/x86-static_call-fix-__static_call_fixup.patch b/queue-6.1/x86-static_call-fix-__static_call_fixup.patch
new file mode 100644 (file)
index 0000000..a760430
--- /dev/null
@@ -0,0 +1,51 @@
+From 54097309620ef0dc2d7083783dc521c6a5fef957 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 16 Aug 2023 12:44:19 +0200
+Subject: x86/static_call: Fix __static_call_fixup()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 54097309620ef0dc2d7083783dc521c6a5fef957 upstream.
+
+Christian reported spurious module load crashes after some of Song's
+module memory layout patches.
+
+Turns out that if the very last instruction on the very last page of the
+module is a 'JMP __x86_return_thunk' then __static_call_fixup() will
+trip a fault and die.
+
+And while the module rework made this slightly more likely to happen,
+it's always been possible.
+
+Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding")
+Reported-by: Christian Bricart <christian@bricart.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lkml.kernel.org/r/20230816104419.GA982867@hirez.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/static_call.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -184,6 +184,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_trans
+  */
+ bool __static_call_fixup(void *tramp, u8 op, void *dest)
+ {
++      unsigned long addr = (unsigned long)tramp;
++      /*
++       * Not all .return_sites are a static_call trampoline (most are not).
++       * Check if the 3 bytes after the return are still kernel text, if not,
++       * then this definitely is not a trampoline and we need not worry
++       * further.
++       *
++       * This avoids the memcmp() below tripping over pagefaults etc..
++       */
++      if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
++          !kernel_text_address(addr + 7))
++              return false;
++
+       if (memcmp(tramp+5, tramp_ud, 3)) {
+               /* Not a trampoline site, not our problem. */
+               return false;