--- /dev/null
+From 88b19a0e5cbc59720e4c808689ea87c3fa140acf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Sep 2021 23:41:13 +0900
+Subject: objtool: Add frame-pointer-specific function ignore
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+[ Upstream commit e028c4f7ac7ca8c96126fe46c54ab3d56ffe6a66 ]
+
+Add a CONFIG_FRAME_POINTER-specific version of
+STACK_FRAME_NON_STANDARD() for the case where a function is
+intentionally missing frame pointer setup, but otherwise needs
+objtool/ORC coverage when frame pointers are disabled.
+
+Link: https://lkml.kernel.org/r/163163047364.489837.17377799909553689661.stgit@devnote2
+
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Tested-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Stable-dep-of: c8c301abeae5 ("x86/ibt: Add ANNOTATE_NOENDBR")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/objtool.h | 12 ++++++++++++
+ tools/include/linux/objtool.h | 12 ++++++++++++
+ 2 files changed, 24 insertions(+)
+
+--- a/include/linux/objtool.h
++++ b/include/linux/objtool.h
+@@ -71,6 +71,17 @@ struct unwind_hint {
+ static void __used __section(".discard.func_stack_frame_non_standard") \
+ *__func_stack_frame_non_standard_##func = func
+
++/*
++ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
++ * for the case where a function is intentionally missing frame pointer setup,
++ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
++ */
++#ifdef CONFIG_FRAME_POINTER
++#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
++#else
++#define STACK_FRAME_NON_STANDARD_FP(func)
++#endif
++
+ #else /* __ASSEMBLY__ */
+
+ /*
+@@ -126,6 +137,7 @@ struct unwind_hint {
+ #define UNWIND_HINT(sp_reg, sp_offset, type, end) \
+ "\n\t"
+ #define STACK_FRAME_NON_STANDARD(func)
++#define STACK_FRAME_NON_STANDARD_FP(func)
+ #else
+ #define ANNOTATE_INTRA_FUNCTION_CALL
+ .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
+--- a/tools/include/linux/objtool.h
++++ b/tools/include/linux/objtool.h
+@@ -71,6 +71,17 @@ struct unwind_hint {
+ static void __used __section(".discard.func_stack_frame_non_standard") \
+ *__func_stack_frame_non_standard_##func = func
+
++/*
++ * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore
++ * for the case where a function is intentionally missing frame pointer setup,
++ * but otherwise needs objtool/ORC coverage when frame pointers are disabled.
++ */
++#ifdef CONFIG_FRAME_POINTER
++#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func)
++#else
++#define STACK_FRAME_NON_STANDARD_FP(func)
++#endif
++
+ #else /* __ASSEMBLY__ */
+
+ /*
+@@ -126,6 +137,7 @@ struct unwind_hint {
+ #define UNWIND_HINT(sp_reg, sp_offset, type, end) \
+ "\n\t"
+ #define STACK_FRAME_NON_STANDARD(func)
++#define STACK_FRAME_NON_STANDARD_FP(func)
+ #else
+ #define ANNOTATE_INTRA_FUNCTION_CALL
+ .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
--- /dev/null
+From dbf46008775516f7f25c95b7760041c286299783 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 16 Aug 2023 13:59:21 +0200
+Subject: objtool/x86: Fixup frame-pointer vs rethunk
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit dbf46008775516f7f25c95b7760041c286299783 upstream.
+
+For stack-validation of a frame-pointer build, objtool validates that
+every CALL instruction is preceded by a frame-setup. The new SRSO
+return thunks violate this with their RSB stuffing trickery.
+
+Extend the __fentry__ exception to also cover the embedded_insn case
+used for this. This cures:
+
+ vmlinux.o: warning: objtool: srso_untrain_ret+0xd: call without frame pointer save/setup
+
+Fixes: 4ae68b26c3ab ("objtool/x86: Fix SRSO mess")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Link: https://lore.kernel.org/r/20230816115921.GH980931@hirez.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/check.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -2079,12 +2079,17 @@ static int decode_sections(struct objtoo
+ return 0;
+ }
+
+-static bool is_fentry_call(struct instruction *insn)
++static bool is_special_call(struct instruction *insn)
+ {
+- if (insn->type == INSN_CALL &&
+- insn->call_dest &&
+- insn->call_dest->fentry)
+- return true;
++ if (insn->type == INSN_CALL) {
++ struct symbol *dest = insn->call_dest;
++
++ if (!dest)
++ return false;
++
++ if (dest->fentry)
++ return true;
++ }
+
+ return false;
+ }
+@@ -2958,7 +2963,7 @@ static int validate_branch(struct objtoo
+ if (ret)
+ return ret;
+
+- if (!no_fp && func && !is_fentry_call(insn) &&
++ if (!no_fp && func && !is_special_call(insn) &&
+ !has_valid_stack_frame(&state)) {
+ WARN_FUNC("call without frame pointer save/setup",
+ sec, insn->offset);
arm64-dts-rockchip-disable-hs400-for-emmc-on-rock-pi.patch
asoc-rt5665-add-missed-regulator_bulk_disable.patch
asoc-meson-axg-tdm-formatter-fix-channel-slot-alloca.patch
-x86-srso-disable-the-mitigation-on-unaffected-config.patch
-x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch
alsa-hda-realtek-remodified-3k-pull-low-procedure.patch
riscv-__asm_copy_to-from_user-optimize-unaligned-mem.patch
riscv-lib-uaccess-fold-fixups-into-body.patch
riscv-lib-uaccess-fix-csr_status-sr_sum-bit.patch
riscv-uaccess-return-the-number-of-bytes-effectively.patch
-x86-static_call-fix-__static_call_fixup.patch
-x86-srso-correct-the-mitigation-status-when-smt-is-d.patch
serial-8250-fix-oops-for-port-pm-on-uart_change_pm.patch
alsa-usb-audio-add-support-for-mythware-xa001au-capture-and-playback-interfaces.patch
cifs-release-folio-lock-on-fscache-read-hit.patch
virtio-net-set-queues-after-driver_ok.patch
net-fix-the-rto-timer-retransmitting-skb-every-1ms-if-linear-option-is-enabled.patch
mmc-f-sdh30-fix-order-of-function-calls-in-sdhci_f_sdh30_remove.patch
+x86-cpu-fix-__x86_return_thunk-symbol-type.patch
+x86-cpu-fix-up-srso_safe_ret-and-__x86_return_thunk.patch
+x86-alternative-make-custom-return-thunk-unconditional.patch
+objtool-add-frame-pointer-specific-function-ignore.patch
+x86-ibt-add-annotate_noendbr.patch
+x86-cpu-clean-up-srso-return-thunk-mess.patch
+x86-cpu-rename-original-retbleed-methods.patch
+x86-cpu-rename-srso_-.-_alias-to-srso_alias_-1.patch
+x86-cpu-cleanup-the-untrain-mess.patch
+x86-srso-explain-the-untraining-sequences-a-bit-more.patch
+x86-static_call-fix-__static_call_fixup.patch
+x86-retpoline-don-t-clobber-rflags-during-srso_safe_ret.patch
+x86-cpu-amd-fix-the-div-0-initial-fix-attempt.patch
+x86-srso-disable-the-mitigation-on-unaffected-configurations.patch
+x86-retpoline-kprobes-fix-position-of-thunk-sections-with-config_lto_clang.patch
+objtool-x86-fixup-frame-pointer-vs-rethunk.patch
+x86-srso-correct-the-mitigation-status-when-smt-is-disabled.patch
--- /dev/null
+From 095b8303f3835c68ac4a8b6d754ca1c3b6230711 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:30 +0200
+Subject: x86/alternative: Make custom return thunk unconditional
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 095b8303f3835c68ac4a8b6d754ca1c3b6230711 upstream.
+
+There is infrastructure to rewrite return thunks to point to any
+random thunk one desires, unwrap that from CALL_THUNKS, which up to
+now was the sole user of that.
+
+ [ bp: Make the thunks visible on 32-bit and add ifdeffery for the
+ 32-bit builds. ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 5 +++++
+ arch/x86/kernel/cpu/bugs.c | 2 ++
+ 2 files changed, 7 insertions(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -195,7 +195,12 @@
+ _ASM_PTR " 999b\n\t" \
+ ".popsection\n\t"
+
++#ifdef CONFIG_RETHUNK
+ extern void __x86_return_thunk(void);
++#else
++static inline void __x86_return_thunk(void) {}
++#endif
++
+ extern void zen_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_untrain_ret_alias(void);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -61,6 +61,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
++void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
--- /dev/null
+From f58d6fbcb7c848b7f2469be339bc571f2e9d245b Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Fri, 11 Aug 2023 23:38:24 +0200
+Subject: x86/CPU/AMD: Fix the DIV(0) initial fix attempt
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream.
+
+Initially, it was thought that doing an innocuous division in the #DE
+handler would take care to prevent any leaking of old data from the
+divider but by the time the fault is raised, the speculation has already
+advanced too far and such data could already have been used by younger
+operations.
+
+Therefore, do the innocuous division on every exit to userspace so that
+userspace doesn't see any potentially old data from integer divisions in
+kernel space.
+
+Do the same before VMRUN too, to protect host data from leaking into the
+guest too.
+
+Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0")
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: <stable@kernel.org>
+Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/entry-common.h | 1 +
+ arch/x86/kernel/cpu/amd.c | 1 +
+ arch/x86/kernel/traps.c | 2 --
+ arch/x86/kvm/svm/svm.c | 1 +
+ 4 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -78,6 +78,7 @@ static inline void arch_exit_to_user_mod
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+ mds_user_clear_cpu_buffers();
++ amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1331,3 +1331,4 @@ void noinstr amd_clear_divider(void)
+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ :: "a" (0), "d" (0), "r" (1));
+ }
++EXPORT_SYMBOL_GPL(amd_clear_divider);
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -198,8 +198,6 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ FPE_INTDIV, error_get_trap_addr(regs));
+-
+- amd_clear_divider();
+ }
+
+ DEFINE_IDTENTRY(exc_overflow)
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -3376,6 +3376,7 @@ static void svm_flush_tlb_gva(struct kvm
+
+ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+ {
++ amd_clear_divider();
+ }
+
+ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
--- /dev/null
+From d43490d0ab824023e11d0b57d0aeec17a6e0ca13 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:31 +0200
+Subject: x86/cpu: Clean up SRSO return thunk mess
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit d43490d0ab824023e11d0b57d0aeec17a6e0ca13 upstream.
+
+Use the existing configurable return thunk. There is absolute no
+justification for having created this __x86_return_thunk alternative.
+
+To clarify, the whole thing looks like:
+
+Zen3/4 does:
+
+ srso_alias_untrain_ret:
+ nop2
+ lfence
+ jmp srso_alias_return_thunk
+ int3
+
+ srso_alias_safe_ret: // aliasses srso_alias_untrain_ret just so
+ add $8, %rsp
+ ret
+ int3
+
+ srso_alias_return_thunk:
+ call srso_alias_safe_ret
+ ud2
+
+While Zen1/2 does:
+
+ srso_untrain_ret:
+ movabs $foo, %rax
+ lfence
+ call srso_safe_ret (jmp srso_return_thunk ?)
+ int3
+
+ srso_safe_ret: // embedded in movabs instruction
+ add $8,%rsp
+ ret
+ int3
+
+ srso_return_thunk:
+ call srso_safe_ret
+ ud2
+
+While retbleed does:
+
+ zen_untrain_ret:
+ test $0xcc, %bl
+ lfence
+ jmp zen_return_thunk
+ int3
+
+ zen_return_thunk: // embedded in the test instruction
+ ret
+ int3
+
+Where Zen1/2 flush the BTB entry using the instruction decoder trick
+(test,movabs) Zen3/4 use BTB aliasing. SRSO adds a return sequence
+(srso_safe_ret()) which forces the function return instruction to
+speculate into a trap (UD2). This RET will then mispredict and
+execution will continue at the return site read from the top of the
+stack.
+
+Pick one of three options at boot (evey function can only ever return
+once).
+
+ [ bp: Fixup commit message uarch details and add them in a comment in
+ the code too. Add a comment about the srso_select_mitigation()
+ dependency on retbleed_select_mitigation(). Add moar ifdeffery for
+ 32-bit builds. Add a dummy srso_untrain_ret_alias() definition for
+ 32-bit alternatives needing the symbol. ]
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.842775684@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 5 +++
+ arch/x86/kernel/cpu/bugs.c | 17 ++++++++--
+ arch/x86/kernel/vmlinux.lds.S | 2 -
+ arch/x86/lib/retpoline.S | 58 +++++++++++++++++++++++++----------
+ tools/objtool/arch/x86/decode.c | 2 -
+ 5 files changed, 63 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -201,9 +201,14 @@ extern void __x86_return_thunk(void);
+ static inline void __x86_return_thunk(void) {}
+ #endif
+
++extern void zen_return_thunk(void);
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++
+ extern void zen_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_untrain_ret_alias(void);
++
+ extern void entry_ibpb(void);
+
+ #ifdef CONFIG_RETPOLINE
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -157,8 +157,13 @@ void __init cpu_select_mitigations(void)
+ l1tf_select_mitigation();
+ md_clear_select_mitigation();
+ srbds_select_mitigation();
+- gds_select_mitigation();
++
++ /*
++ * srso_select_mitigation() depends and must run after
++ * retbleed_select_mitigation().
++ */
+ srso_select_mitigation();
++ gds_select_mitigation();
+ }
+
+ /*
+@@ -978,6 +983,9 @@ do_cmd_auto:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
++ if (IS_ENABLED(CONFIG_RETHUNK))
++ x86_return_thunk = zen_return_thunk;
++
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ pr_err(RETBLEED_UNTRAIN_MSG);
+@@ -2352,10 +2360,13 @@ static void __init srso_select_mitigatio
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+
+- if (boot_cpu_data.x86 == 0x19)
++ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+- else
++ x86_return_thunk = srso_alias_return_thunk;
++ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
++ x86_return_thunk = srso_return_thunk;
++ }
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -518,7 +518,7 @@ INIT_PER_CPU(irq_stack_backing_store);
+ #endif
+
+ #ifdef CONFIG_RETHUNK
+-. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -93,21 +93,26 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ .section .text.__x86.rethunk_untrain
+
+ SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++ UNWIND_HINT_FUNC
+ ASM_NOP2
+ lfence
+- jmp __x86_return_thunk
++ jmp srso_alias_return_thunk
+ SYM_FUNC_END(srso_untrain_ret_alias)
+ __EXPORT_THUNK(srso_untrain_ret_alias)
+
+ .section .text.__x86.rethunk_safe
++#else
++/* dummy definition for alternatives */
++SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++ ANNOTATE_UNRET_SAFE
++ ret
++ int3
++SYM_FUNC_END(srso_untrain_ret_alias)
+ #endif
+
+-/* Needs a definition for the __x86_return_thunk alternative below. */
+ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+-#ifdef CONFIG_CPU_SRSO
+ add $8, %_ASM_SP
+ UNWIND_HINT_FUNC
+-#endif
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+@@ -115,9 +120,16 @@ SYM_FUNC_END(srso_safe_ret_alias)
+
+ .section .text.__x86.return_thunk
+
++SYM_CODE_START(srso_alias_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ call srso_safe_ret_alias
++ ud2
++SYM_CODE_END(srso_alias_return_thunk)
++
+ /*
+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for
+ * alignment within the BTB.
+ * 2) The instruction at zen_untrain_ret must contain, and not
+ * end with, the 0xc3 byte of the RET.
+@@ -125,7 +137,7 @@ SYM_FUNC_END(srso_safe_ret_alias)
+ * from re-poisioning the BTB prediction.
+ */
+ .align 64
+- .skip 64 - (__ret - zen_untrain_ret), 0xcc
++ .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc
+ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+
+ /*
+@@ -133,16 +145,16 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+ *
+ * TEST $0xcc, %bl
+ * LFENCE
+- * JMP __x86_return_thunk
++ * JMP zen_return_thunk
+ *
+ * Executing the TEST instruction has a side effect of evicting any BTB
+ * prediction (potentially attacker controlled) attached to the RET, as
+- * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
++ * zen_return_thunk + 1 isn't an instruction boundary at the moment.
+ */
+ .byte 0xf6
+
+ /*
+- * As executed from __x86_return_thunk, this is a plain RET.
++ * As executed from zen_return_thunk, this is a plain RET.
+ *
+ * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+ *
+@@ -154,13 +166,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+ * With SMT enabled and STIBP active, a sibling thread cannot poison
+ * RET's prediction to a type of its choice, but can evict the
+ * prediction due to competitive sharing. If the prediction is
+- * evicted, __x86_return_thunk will suffer Straight Line Speculation
++ * evicted, zen_return_thunk will suffer Straight Line Speculation
+ * which will be contained safely by the INT3.
+ */
+-SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
++SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL)
+ ret
+ int3
+-SYM_CODE_END(__ret)
++SYM_CODE_END(zen_return_thunk)
+
+ /*
+ * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -171,7 +183,7 @@ SYM_CODE_END(__ret)
+ * Jump back and execute the RET in the middle of the TEST instruction.
+ * INT3 is for SLS protection.
+ */
+- jmp __ret
++ jmp zen_return_thunk
+ int3
+ SYM_FUNC_END(zen_untrain_ret)
+ __EXPORT_THUNK(zen_untrain_ret)
+@@ -191,12 +203,19 @@ __EXPORT_THUNK(zen_untrain_ret)
+ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ .byte 0x48, 0xb8
+
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below). This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+ add $8, %_ASM_SP
+ ret
+ int3
+ int3
+ int3
++ /* end of movabs */
+ lfence
+ call srso_safe_ret
+ ud2
+@@ -204,12 +223,19 @@ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+
+-SYM_CODE_START(__x86_return_thunk)
++SYM_CODE_START(srso_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+- ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+- "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++ call srso_safe_ret
+ ud2
++SYM_CODE_END(srso_return_thunk)
++
++SYM_CODE_START(__x86_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ ANNOTATE_UNRET_SAFE
++ ret
++ int3
+ SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -655,5 +655,5 @@ bool arch_is_rethunk(struct symbol *sym)
+ return !strcmp(sym->name, "__x86_return_thunk") ||
+ !strcmp(sym->name, "srso_untrain_ret") ||
+ !strcmp(sym->name, "srso_safe_ret") ||
+- !strcmp(sym->name, "__ret");
++ !strcmp(sym->name, "zen_return_thunk");
+ }
--- /dev/null
+From e7c25c441e9e0fa75b4c83e0b26306b702cfe90d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:34 +0200
+Subject: x86/cpu: Cleanup the untrain mess
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit e7c25c441e9e0fa75b4c83e0b26306b702cfe90d upstream.
+
+Since there can only be one active return_thunk, there only needs be
+one (matching) untrain_ret. It fundamentally doesn't make sense to
+allow multiple untrain_ret at the same time.
+
+Fold all the 3 different untrain methods into a single (temporary)
+helper stub.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 12 ++++--------
+ arch/x86/kernel/cpu/bugs.c | 1 +
+ arch/x86/lib/retpoline.S | 7 +++++++
+ 3 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -156,9 +156,9 @@
+ .endm
+
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret"
++#define CALL_UNTRAIN_RET "call entry_untrain_ret"
+ #else
+-#define CALL_ZEN_UNTRAIN_RET ""
++#define CALL_UNTRAIN_RET ""
+ #endif
+
+ /*
+@@ -177,14 +177,9 @@
+ defined(CONFIG_CPU_SRSO)
+ ANNOTATE_UNRET_END
+ ALTERNATIVE_2 "", \
+- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
++ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
+ #endif
+-
+-#ifdef CONFIG_CPU_SRSO
+- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+- "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+-#endif
+ .endm
+
+ #else /* __ASSEMBLY__ */
+@@ -209,6 +204,7 @@ extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_alias_untrain_ret(void);
+
++extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+
+ #ifdef CONFIG_RETPOLINE
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2359,6 +2359,7 @@ static void __init srso_select_mitigatio
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -230,6 +230,13 @@ SYM_CODE_START(srso_return_thunk)
+ ud2
+ SYM_CODE_END(srso_return_thunk)
+
++SYM_FUNC_START(entry_untrain_ret)
++ ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
++ "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
++ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++SYM_FUNC_END(entry_untrain_ret)
++__EXPORT_THUNK(entry_untrain_ret)
++
+ SYM_CODE_START(__x86_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
--- /dev/null
+From 77f67119004296a9b2503b377d610e08b08afc2a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:27 +0200
+Subject: x86/cpu: Fix __x86_return_thunk symbol type
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 77f67119004296a9b2503b377d610e08b08afc2a upstream.
+
+Commit
+
+ fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+
+reimplemented __x86_return_thunk with a mix of SYM_FUNC_START and
+SYM_CODE_END, this is not a sane combination.
+
+Since nothing should ever actually 'CALL' this, make it consistently
+CODE.
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.571027074@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -204,7 +204,9 @@ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+
+-SYM_FUNC_START(__x86_return_thunk)
++SYM_CODE_START(__x86_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
+ ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
+ int3
-From 21aec79a18bf0bfd55dc28a6f75e40139a009e7b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From af023ef335f13c8b579298fc432daeef609a9e60 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
Date: Mon, 14 Aug 2023 13:44:28 +0200
Subject: x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk()
From: Peter Zijlstra <peterz@infradead.org>
-[ Upstream commit af023ef335f13c8b579298fc432daeef609a9e60 ]
+commit af023ef335f13c8b579298fc432daeef609a9e60 upstream.
vmlinux.o: warning: objtool: srso_untrain_ret() falls through to next function __x86_return_skl()
vmlinux.o: warning: objtool: __x86_return_thunk() falls through to next function __x86_return_skl()
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121148.637802730@infradead.org
Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
- arch/x86/lib/retpoline.S | 4 ++--
+ arch/x86/lib/retpoline.S | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
-index 5f7eed97487ec..a0fa45e8a87cd 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
-@@ -199,7 +199,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+@@ -199,7 +199,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLO
int3
lfence
call srso_safe_ret
SYM_CODE_END(srso_safe_ret)
SYM_FUNC_END(srso_untrain_ret)
__EXPORT_THUNK(srso_untrain_ret)
-@@ -207,7 +207,7 @@ __EXPORT_THUNK(srso_untrain_ret)
- SYM_FUNC_START(__x86_return_thunk)
+@@ -209,7 +209,7 @@ SYM_CODE_START(__x86_return_thunk)
+ ANNOTATE_NOENDBR
ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
"call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
- int3
SYM_CODE_END(__x86_return_thunk)
EXPORT_SYMBOL(__x86_return_thunk)
---
-2.40.1
-
--- /dev/null
+From d025b7bac07a6e90b6b98b487f88854ad9247c39 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:32 +0200
+Subject: x86/cpu: Rename original retbleed methods
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit d025b7bac07a6e90b6b98b487f88854ad9247c39 upstream.
+
+Rename the original retbleed return thunk and untrain_ret to
+retbleed_return_thunk() and retbleed_untrain_ret().
+
+No functional changes.
+
+Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 8 ++++----
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kernel/vmlinux.lds.S | 2 +-
+ arch/x86/lib/retpoline.S | 30 +++++++++++++++---------------
+ tools/objtool/arch/x86/decode.c | 2 +-
+ tools/objtool/check.c | 2 +-
+ 6 files changed, 23 insertions(+), 23 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -156,7 +156,7 @@
+ .endm
+
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
++#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret"
+ #else
+ #define CALL_ZEN_UNTRAIN_RET ""
+ #endif
+@@ -166,7 +166,7 @@
+ * return thunk isn't mapped into the userspace tables (then again, AMD
+ * typically has NO_MELTDOWN).
+ *
+- * While zen_untrain_ret() doesn't clobber anything but requires stack,
++ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+ * entry_ibpb() will clobber AX, CX, DX.
+ *
+ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+@@ -201,11 +201,11 @@ extern void __x86_return_thunk(void);
+ static inline void __x86_return_thunk(void) {}
+ #endif
+
+-extern void zen_return_thunk(void);
++extern void retbleed_return_thunk(void);
+ extern void srso_return_thunk(void);
+ extern void srso_alias_return_thunk(void);
+
+-extern void zen_untrain_ret(void);
++extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+ extern void srso_untrain_ret_alias(void);
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -984,7 +984,7 @@ do_cmd_auto:
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+ if (IS_ENABLED(CONFIG_RETHUNK))
+- x86_return_thunk = zen_return_thunk;
++ x86_return_thunk = retbleed_return_thunk;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -518,7 +518,7 @@ INIT_PER_CPU(irq_stack_backing_store);
+ #endif
+
+ #ifdef CONFIG_RETHUNK
+-. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned");
++. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -129,32 +129,32 @@ SYM_CODE_END(srso_alias_return_thunk)
+
+ /*
+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+ * alignment within the BTB.
+- * 2) The instruction at zen_untrain_ret must contain, and not
++ * 2) The instruction at retbleed_untrain_ret must contain, and not
+ * end with, the 0xc3 byte of the RET.
+ * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+ * from re-poisioning the BTB prediction.
+ */
+ .align 64
+- .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc
+-SYM_FUNC_START_NOALIGN(zen_untrain_ret);
++ .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
++SYM_FUNC_START_NOALIGN(retbleed_untrain_ret);
+
+ /*
+- * As executed from zen_untrain_ret, this is:
++ * As executed from retbleed_untrain_ret, this is:
+ *
+ * TEST $0xcc, %bl
+ * LFENCE
+- * JMP zen_return_thunk
++ * JMP retbleed_return_thunk
+ *
+ * Executing the TEST instruction has a side effect of evicting any BTB
+ * prediction (potentially attacker controlled) attached to the RET, as
+- * zen_return_thunk + 1 isn't an instruction boundary at the moment.
++ * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
+ */
+ .byte 0xf6
+
+ /*
+- * As executed from zen_return_thunk, this is a plain RET.
++ * As executed from retbleed_return_thunk, this is a plain RET.
+ *
+ * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+ *
+@@ -166,13 +166,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret);
+ * With SMT enabled and STIBP active, a sibling thread cannot poison
+ * RET's prediction to a type of its choice, but can evict the
+ * prediction due to competitive sharing. If the prediction is
+- * evicted, zen_return_thunk will suffer Straight Line Speculation
++ * evicted, retbleed_return_thunk will suffer Straight Line Speculation
+ * which will be contained safely by the INT3.
+ */
+-SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL)
++SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
+ ret
+ int3
+-SYM_CODE_END(zen_return_thunk)
++SYM_CODE_END(retbleed_return_thunk)
+
+ /*
+ * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -183,13 +183,13 @@ SYM_CODE_END(zen_return_thunk)
+ * Jump back and execute the RET in the middle of the TEST instruction.
+ * INT3 is for SLS protection.
+ */
+- jmp zen_return_thunk
++ jmp retbleed_return_thunk
+ int3
+-SYM_FUNC_END(zen_untrain_ret)
+-__EXPORT_THUNK(zen_untrain_ret)
++SYM_FUNC_END(retbleed_untrain_ret)
++__EXPORT_THUNK(retbleed_untrain_ret)
+
+ /*
+- * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+ * above. On kernel entry, srso_untrain_ret() is executed which is a
+ *
+ * movabs $0xccccccc308c48348,%rax
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -655,5 +655,5 @@ bool arch_is_rethunk(struct symbol *sym)
+ return !strcmp(sym->name, "__x86_return_thunk") ||
+ !strcmp(sym->name, "srso_untrain_ret") ||
+ !strcmp(sym->name, "srso_safe_ret") ||
+- !strcmp(sym->name, "zen_return_thunk");
++ !strcmp(sym->name, "retbleed_return_thunk");
+ }
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -1165,7 +1165,7 @@ static int add_jump_destinations(struct
+ continue;
+
+ /*
+- * This is a special case for zen_untrain_ret().
++ * This is a special case for retbleed_untrain_ret().
+ * It jumps to __x86_return_thunk(), but objtool
+ * can't find the thunk's starting RET
+ * instruction, because the RET is also in the
--- /dev/null
+From 42be649dd1f2eee6b1fb185f1a231b9494cf095f Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Aug 2023 13:44:33 +0200
+Subject: x86/cpu: Rename srso_(.*)_alias to srso_alias_\1
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 42be649dd1f2eee6b1fb185f1a231b9494cf095f upstream.
+
+For a more consistent namespace.
+
+ [ bp: Fixup names in the doc too. ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/srso.rst | 4 ++--
+ arch/x86/include/asm/nospec-branch.h | 4 ++--
+ arch/x86/kernel/vmlinux.lds.S | 8 ++++----
+ arch/x86/lib/retpoline.S | 26 +++++++++++++-------------
+ 4 files changed, 21 insertions(+), 21 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -124,8 +124,8 @@ sequence.
+ To ensure the safety of this mitigation, the kernel must ensure that the
+ safe return sequence is itself free from attacker interference. In Zen3
+ and Zen4, this is accomplished by creating a BTB alias between the
+-untraining function srso_untrain_ret_alias() and the safe return
+-function srso_safe_ret_alias() which results in evicting a potentially
++untraining function srso_alias_untrain_ret() and the safe return
++function srso_alias_safe_ret() which results in evicting a potentially
+ poisoned BTB entry and using that safe one for all function returns.
+
+ In older Zen1 and Zen2, this is accomplished using a reinterpretation
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -183,7 +183,7 @@
+
+ #ifdef CONFIG_CPU_SRSO
+ ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+- "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
++ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
+ #endif
+ .endm
+
+@@ -207,7 +207,7 @@ extern void srso_alias_return_thunk(void
+
+ extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+-extern void srso_untrain_ret_alias(void);
++extern void srso_alias_untrain_ret(void);
+
+ extern void entry_ibpb(void);
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -141,10 +141,10 @@ SECTIONS
+
+ #ifdef CONFIG_CPU_SRSO
+ /*
+- * See the comment above srso_untrain_ret_alias()'s
++ * See the comment above srso_alias_untrain_ret()'s
+ * definition.
+ */
+- . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+ *(.text.__x86.rethunk_safe)
+ #endif
+ ALIGN_ENTRY_TEXT_END
+@@ -527,8 +527,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+ * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR
+ * of the two function addresses:
+ */
+-. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) -
+- (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
++ (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ "SRSO function pair won't alias");
+ #endif
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -75,55 +75,55 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ #ifdef CONFIG_RETHUNK
+
+ /*
+- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+ * special addresses:
+ *
+- * - srso_untrain_ret_alias() is 2M aligned
+- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * - srso_alias_untrain_ret() is 2M aligned
++ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+ * and 20 in its virtual address are set (while those bits in the
+- * srso_untrain_ret_alias() function are cleared).
++ * srso_alias_untrain_ret() function are cleared).
+ *
+ * This guarantees that those two addresses will alias in the branch
+ * target buffer of Zen3/4 generations, leading to any potential
+ * poisoned entries at that BTB slot to get evicted.
+ *
+- * As a result, srso_safe_ret_alias() becomes a safe return.
++ * As a result, srso_alias_safe_ret() becomes a safe return.
+ */
+ #ifdef CONFIG_CPU_SRSO
+ .section .text.__x86.rethunk_untrain
+
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ UNWIND_HINT_FUNC
+ ASM_NOP2
+ lfence
+ jmp srso_alias_return_thunk
+-SYM_FUNC_END(srso_untrain_ret_alias)
+-__EXPORT_THUNK(srso_untrain_ret_alias)
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
+
+ .section .text.__x86.rethunk_safe
+ #else
+ /* dummy definition for alternatives */
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+-SYM_FUNC_END(srso_untrain_ret_alias)
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+
+-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ add $8, %_ASM_SP
+ UNWIND_HINT_FUNC
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+-SYM_FUNC_END(srso_safe_ret_alias)
++SYM_FUNC_END(srso_alias_safe_ret)
+
+ .section .text.__x86.return_thunk
+
+ SYM_CODE_START(srso_alias_return_thunk)
+ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+- call srso_safe_ret_alias
++ call srso_alias_safe_ret
+ ud2
+ SYM_CODE_END(srso_alias_return_thunk)
+
--- /dev/null
+From f3c95380a42cc13f671c4325103c22379c468342 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Mar 2022 16:30:18 +0100
+Subject: x86/ibt: Add ANNOTATE_NOENDBR
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit c8c301abeae58ec756b8fcb2178a632bd3c9e284 ]
+
+In order to have objtool warn about code references to !ENDBR
+instruction, we need an annotation to allow this for non-control-flow
+instances -- consider text range checks, text patching, or return
+trampolines etc.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20220308154317.578968224@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/objtool.h | 16 ++++++++++++++++
+ tools/include/linux/objtool.h | 16 ++++++++++++++++
+ 2 files changed, 32 insertions(+)
+
+--- a/include/linux/objtool.h
++++ b/include/linux/objtool.h
+@@ -82,6 +82,12 @@ struct unwind_hint {
+ #define STACK_FRAME_NON_STANDARD_FP(func)
+ #endif
+
++#define ANNOTATE_NOENDBR \
++ "986: \n\t" \
++ ".pushsection .discard.noendbr\n\t" \
++ _ASM_PTR " 986b\n\t" \
++ ".popsection\n\t"
++
+ #else /* __ASSEMBLY__ */
+
+ /*
+@@ -128,6 +134,13 @@ struct unwind_hint {
+ .popsection
+ .endm
+
++.macro ANNOTATE_NOENDBR
++.Lhere_\@:
++ .pushsection .discard.noendbr
++ .quad .Lhere_\@
++ .popsection
++.endm
++
+ #endif /* __ASSEMBLY__ */
+
+ #else /* !CONFIG_STACK_VALIDATION */
+@@ -138,10 +151,13 @@ struct unwind_hint {
+ "\n\t"
+ #define STACK_FRAME_NON_STANDARD(func)
+ #define STACK_FRAME_NON_STANDARD_FP(func)
++#define ANNOTATE_NOENDBR
+ #else
+ #define ANNOTATE_INTRA_FUNCTION_CALL
+ .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
+ .endm
++.macro ANNOTATE_NOENDBR
++.endm
+ #endif
+
+ #endif /* CONFIG_STACK_VALIDATION */
+--- a/tools/include/linux/objtool.h
++++ b/tools/include/linux/objtool.h
+@@ -82,6 +82,12 @@ struct unwind_hint {
+ #define STACK_FRAME_NON_STANDARD_FP(func)
+ #endif
+
++#define ANNOTATE_NOENDBR \
++ "986: \n\t" \
++ ".pushsection .discard.noendbr\n\t" \
++ _ASM_PTR " 986b\n\t" \
++ ".popsection\n\t"
++
+ #else /* __ASSEMBLY__ */
+
+ /*
+@@ -128,6 +134,13 @@ struct unwind_hint {
+ .popsection
+ .endm
+
++.macro ANNOTATE_NOENDBR
++.Lhere_\@:
++ .pushsection .discard.noendbr
++ .quad .Lhere_\@
++ .popsection
++.endm
++
+ #endif /* __ASSEMBLY__ */
+
+ #else /* !CONFIG_STACK_VALIDATION */
+@@ -138,10 +151,13 @@ struct unwind_hint {
+ "\n\t"
+ #define STACK_FRAME_NON_STANDARD(func)
+ #define STACK_FRAME_NON_STANDARD_FP(func)
++#define ANNOTATE_NOENDBR
+ #else
+ #define ANNOTATE_INTRA_FUNCTION_CALL
+ .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0
+ .endm
++.macro ANNOTATE_NOENDBR
++.endm
+ #endif
+
+ #endif /* CONFIG_STACK_VALIDATION */
--- /dev/null
+From ba5ca5e5e6a1d55923e88b4a83da452166f5560e Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Fri, 11 Aug 2023 08:52:55 -0700
+Subject: x86/retpoline: Don't clobber RFLAGS during srso_safe_ret()
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit ba5ca5e5e6a1d55923e88b4a83da452166f5560e upstream.
+
+Use LEA instead of ADD when adjusting %rsp in srso_safe_ret{,_alias}()
+so as to avoid clobbering flags. Drop one of the INT3 instructions to
+account for the LEA consuming one more byte than the ADD.
+
+KVM's emulator makes indirect calls into a jump table of sorts, where
+the destination of each call is a small blob of code that performs fast
+emulation by executing the target instruction with fixed operands.
+
+E.g. to emulate ADC, fastop() invokes adcb_al_dl():
+
+ adcb_al_dl:
+ <+0>: adc %dl,%al
+ <+2>: jmp <__x86_return_thunk>
+
+A major motivation for doing fast emulation is to leverage the CPU to
+handle consumption and manipulation of arithmetic flags, i.e. RFLAGS is
+both an input and output to the target of the call. fastop() collects
+the RFLAGS result by pushing RFLAGS onto the stack and popping them back
+into a variable (held in %rdi in this case):
+
+ asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
+
+ <+71>: mov 0xc0(%r8),%rdx
+ <+78>: mov 0x100(%r8),%rcx
+ <+85>: push %rdi
+ <+86>: popf
+ <+87>: call *%rsi
+ <+89>: nop
+ <+90>: nop
+ <+91>: nop
+ <+92>: pushf
+ <+93>: pop %rdi
+
+and then propagating the arithmetic flags into the vCPU's emulator state:
+
+ ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
+
+ <+64>: and $0xfffffffffffff72a,%r9
+ <+94>: and $0x8d5,%edi
+ <+109>: or %rdi,%r9
+ <+122>: mov %r9,0x10(%r8)
+
+The failures can be most easily reproduced by running the "emulator"
+test in KVM-Unit-Tests.
+
+If you're feeling a bit of deja vu, see commit b63f20a778c8
+("x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386").
+
+In addition, this breaks booting of clang-compiled guest on
+a gcc-compiled host where the host contains the %rsp-modifying SRSO
+mitigations.
+
+ [ bp: Massage commit message, extend, remove addresses. ]
+
+Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation")
+Closes: https://lore.kernel.org/all/de474347-122d-54cd-eabf-9dcc95ab9eae@amd.com
+Reported-by: Srikanth Aithal <sraithal@amd.com>
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/20230810013334.GA5354@dev-arch.thelio-3990X/
+Link: https://lore.kernel.org/r/20230811155255.250835-1-seanjc@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -111,7 +111,7 @@ SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+
+ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
+- add $8, %_ASM_SP
++ lea 8(%_ASM_SP), %_ASM_SP
+ UNWIND_HINT_FUNC
+ ANNOTATE_UNRET_SAFE
+ ret
+@@ -211,7 +211,7 @@ __EXPORT_THUNK(retbleed_untrain_ret)
+ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+ * above. On kernel entry, srso_untrain_ret() is executed which is a
+ *
+- * movabs $0xccccccc308c48348,%rax
++ * movabs $0xccccc30824648d48,%rax
+ *
+ * and when the return thunk executes the inner label srso_safe_ret()
+ * later, it is a stack manipulation and a RET which is mispredicted and
+@@ -229,11 +229,10 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL
+ * the stack.
+ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+- add $8, %_ASM_SP
++ lea 8(%_ASM_SP), %_ASM_SP
+ ret
+ int3
+ int3
+- int3
+ /* end of movabs */
+ lfence
+ call srso_safe_ret
--- /dev/null
+From 79cd2a11224eab86d6673fe8a11d2046ae9d2757 Mon Sep 17 00:00:00 2001
+From: Petr Pavlu <petr.pavlu@suse.com>
+Date: Tue, 11 Jul 2023 11:19:51 +0200
+Subject: x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+commit 79cd2a11224eab86d6673fe8a11d2046ae9d2757 upstream.
+
+The linker script arch/x86/kernel/vmlinux.lds.S matches the thunk
+sections ".text.__x86.*" from arch/x86/lib/retpoline.S as follows:
+
+ .text {
+ [...]
+ TEXT_TEXT
+ [...]
+ __indirect_thunk_start = .;
+ *(.text.__x86.*)
+ __indirect_thunk_end = .;
+ [...]
+ }
+
+Macro TEXT_TEXT references TEXT_MAIN which normally expands to only
+".text". However, with CONFIG_LTO_CLANG, TEXT_MAIN becomes
+".text .text.[0-9a-zA-Z_]*" which wrongly matches also the thunk
+sections. The output layout is then different than expected. For
+instance, the currently defined range [__indirect_thunk_start,
+__indirect_thunk_end] becomes empty.
+
+Prevent the problem by using ".." as the first separator, for example,
+".text..__x86.indirect_thunk". This pattern is utilized by other
+explicit section names which start with one of the standard prefixes,
+such as ".text" or ".data", and that need to be individually selected in
+the linker script.
+
+ [ nathan: Fix conflicts with SRSO and fold in fix issue brought up by
+ Andrew Cooper in post-review:
+ https://lore.kernel.org/20230803230323.1478869-1-andrew.cooper3@citrix.com ]
+
+Fixes: dc5723b02e52 ("kbuild: add support for Clang LTO")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230711091952.27944-2-petr.pavlu@suse.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/vmlinux.lds.S | 8 ++++----
+ arch/x86/lib/retpoline.S | 8 ++++----
+ tools/objtool/check.c | 2 +-
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -134,7 +134,7 @@ SECTIONS
+ KPROBES_TEXT
+ ALIGN_ENTRY_TEXT_BEGIN
+ #ifdef CONFIG_CPU_SRSO
+- *(.text.__x86.rethunk_untrain)
++ *(.text..__x86.rethunk_untrain)
+ #endif
+
+ ENTRY_TEXT
+@@ -145,7 +145,7 @@ SECTIONS
+ * definition.
+ */
+ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+- *(.text.__x86.rethunk_safe)
++ *(.text..__x86.rethunk_safe)
+ #endif
+ ALIGN_ENTRY_TEXT_END
+ SOFTIRQENTRY_TEXT
+@@ -155,8 +155,8 @@ SECTIONS
+
+ #ifdef CONFIG_RETPOLINE
+ __indirect_thunk_start = .;
+- *(.text.__x86.indirect_thunk)
+- *(.text.__x86.return_thunk)
++ *(.text..__x86.indirect_thunk)
++ *(.text..__x86.return_thunk)
+ __indirect_thunk_end = .;
+ #endif
+ } :text =0xcccc
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -11,7 +11,7 @@
+ #include <asm/frame.h>
+ #include <asm/nops.h>
+
+- .section .text.__x86.indirect_thunk
++ .section .text..__x86.indirect_thunk
+
+ .macro RETPOLINE reg
+ ANNOTATE_INTRA_FUNCTION_CALL
+@@ -90,7 +90,7 @@ SYM_CODE_END(__x86_indirect_thunk_array)
+ * As a result, srso_alias_safe_ret() becomes a safe return.
+ */
+ #ifdef CONFIG_CPU_SRSO
+- .section .text.__x86.rethunk_untrain
++ .section .text..__x86.rethunk_untrain
+
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ UNWIND_HINT_FUNC
+@@ -100,7 +100,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_
+ SYM_FUNC_END(srso_alias_untrain_ret)
+ __EXPORT_THUNK(srso_alias_untrain_ret)
+
+- .section .text.__x86.rethunk_safe
++ .section .text..__x86.rethunk_safe
+ #else
+ /* dummy definition for alternatives */
+ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+@@ -118,7 +118,7 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLO
+ int3
+ SYM_FUNC_END(srso_alias_safe_ret)
+
+- .section .text.__x86.return_thunk
++ .section .text..__x86.return_thunk
+
+ SYM_CODE_START(srso_alias_return_thunk)
+ UNWIND_HINT_FUNC
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -369,7 +369,7 @@ static int decode_instructions(struct ob
+
+ if (!strcmp(sec->name, ".noinstr.text") ||
+ !strcmp(sec->name, ".entry.text") ||
+- !strncmp(sec->name, ".text.__x86.", 12))
++ !strncmp(sec->name, ".text..__x86.", 13))
+ sec->noinstr = true;
+
+ for (offset = 0; offset < sec->len; offset += insn->len) {
-From 64b84382738484038b4f27175158e1dff2f3044f Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
Date: Tue, 15 Aug 2023 11:53:13 +0200
Subject: x86/srso: Correct the mitigation status when SMT is disabled
From: Borislav Petkov (AMD) <bp@alien8.de>
-[ Upstream commit 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 ]
+commit 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 upstream.
Specify how is SRSO mitigated when SMT is disabled. Also, correct the
SMT check for that.
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Link: https://lore.kernel.org/r/20230814200813.p5czl47zssuej7nv@treble
-Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
- arch/x86/kernel/cpu/bugs.c | 5 ++---
+ arch/x86/kernel/cpu/bugs.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index 876fb0d24602d..ac6c7a7b4fcae 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2318,8 +2318,7 @@ static void __init srso_select_mitigation(void)
+@@ -2328,8 +2328,7 @@ static void __init srso_select_mitigatio
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*/
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
return;
}
-@@ -2605,7 +2604,7 @@ static ssize_t gds_show_state(char *buf)
+@@ -2619,7 +2618,7 @@ static ssize_t gds_show_state(char *buf)
static ssize_t srso_show_state(char *buf)
{
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
---
-2.40.1
-
-From c3c3fa15ae8db4a256697757311a78c6116fd4d8 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
Date: Sun, 13 Aug 2023 12:39:34 +0200
Subject: x86/srso: Disable the mitigation on unaffected configurations
From: Borislav Petkov (AMD) <bp@alien8.de>
-[ Upstream commit e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 ]
+commit e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 upstream.
Skip the srso cmd line parsing which is not needed on Zen1/2 with SMT
disabled and with the proper microcode applied (latter should be the
Fixes: 5a15d8348881 ("x86/srso: Tie SBPB bit setting to microcode patch detection")
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230813104517.3346-1-bp@alien8.de
-Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
- arch/x86/kernel/cpu/bugs.c | 7 ++++++-
+ arch/x86/kernel/cpu/bugs.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
-diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
-index d31639e3ce282..876fb0d24602d 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
-@@ -2319,8 +2319,10 @@ static void __init srso_select_mitigation(void)
+@@ -2329,8 +2329,10 @@ static void __init srso_select_mitigatio
* IBPB microcode has been applied.
*/
if ((boot_cpu_data.x86 < 0x19) &&
}
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
-@@ -2602,6 +2604,9 @@ static ssize_t gds_show_state(char *buf)
+@@ -2616,6 +2618,9 @@ static ssize_t gds_show_state(char *buf)
static ssize_t srso_show_state(char *buf)
{
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
---
-2.40.1
-
--- /dev/null
+From 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Mon, 14 Aug 2023 21:29:50 +0200
+Subject: x86/srso: Explain the untraining sequences a bit more
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e upstream.
+
+The goal is to eventually have a proper documentation about all this.
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20230814164447.GFZNpZ/64H4lENIe94@fat_crate.local
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/retpoline.S | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -128,6 +128,25 @@ SYM_CODE_START(srso_alias_return_thunk)
+ SYM_CODE_END(srso_alias_return_thunk)
+
+ /*
++ * Some generic notes on the untraining sequences:
++ *
++ * They are interchangeable when it comes to flushing potentially wrong
++ * RET predictions from the BTB.
++ *
++ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
++ * Retbleed sequence because the return sequence done there
++ * (srso_safe_ret()) is longer and the return sequence must fully nest
++ * (end before) the untraining sequence. Therefore, the untraining
++ * sequence must fully overlap the return sequence.
++ *
++ * Regarding alignment - the instructions which need to be untrained,
++ * must all start at a cacheline boundary for Zen1/2 generations. That
++ * is, instruction sequences starting at srso_safe_ret() and
++ * the respective instruction sequences at retbleed_return_thunk()
++ * must start at a cacheline boundary.
++ */
++
++/*
+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+ * alignment within the BTB.
-From a680d27b6225238d25e06d9f5713dc7f6757b790 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From 54097309620ef0dc2d7083783dc521c6a5fef957 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 16 Aug 2023 12:44:19 +0200
Subject: x86/static_call: Fix __static_call_fixup()
From: Peter Zijlstra <peterz@infradead.org>
-[ Upstream commit 54097309620ef0dc2d7083783dc521c6a5fef957 ]
+commit 54097309620ef0dc2d7083783dc521c6a5fef957 upstream.
Christian reported spurious module load crashes after some of Song's
module memory layout patches.
Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
Link: https://lkml.kernel.org/r/20230816104419.GA982867@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
- arch/x86/kernel/static_call.c | 13 +++++++++++++
+ arch/x86/kernel/static_call.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
-diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
-index 2973b3fb0ec1a..759b986b7f033 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
-@@ -123,6 +123,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+@@ -123,6 +123,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_trans
*/
bool __static_call_fixup(void *tramp, u8 op, void *dest)
{
if (memcmp(tramp+5, tramp_ud, 3)) {
/* Not a trampoline site, not our problem. */
return false;
---
-2.40.1
-