--- /dev/null
+From 353f7988dd8413c47718f7ca79c030b6fb62cfe5 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Tue, 19 Jul 2022 11:09:01 -0700
+Subject: watchqueue: make sure to serialize 'wqueue->defunct' properly
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 353f7988dd8413c47718f7ca79c030b6fb62cfe5 upstream.
+
+When the pipe is closed, we mark the associated watchqueue defunct by
+calling watch_queue_clear(). However, while that is protected by the
+watchqueue lock, new watchqueue entries aren't actually added under that
+lock at all: they use the pipe->rd_wait.lock instead, and looking up
+that pipe happens without any locking.
+
+The watchqueue code uses the RCU read-side section to make sure that the
+wqueue entry itself hasn't disappeared, but that does not protect the
+pipe_info in any way.
+
+So make sure to actually hold the wqueue lock when posting watch events,
+properly serializing against the pipe being torn down.
+
+Reported-by: Noam Rathaus <noamr@ssd-disclosure.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/watch_queue.c | 53 +++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 37 insertions(+), 16 deletions(-)
+
+--- a/kernel/watch_queue.c
++++ b/kernel/watch_queue.c
+@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL");
+ #define WATCH_QUEUE_NOTE_SIZE 128
+ #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
+
++/*
++ * This must be called under the RCU read-lock, which makes
++ * sure that the wqueue still exists. It can then take the lock,
++ * and check that the wqueue hasn't been destroyed, which in
++ * turn makes sure that the notification pipe still exists.
++ */
++static inline bool lock_wqueue(struct watch_queue *wqueue)
++{
++ spin_lock_bh(&wqueue->lock);
++ if (unlikely(wqueue->defunct)) {
++ spin_unlock_bh(&wqueue->lock);
++ return false;
++ }
++ return true;
++}
++
++static inline void unlock_wqueue(struct watch_queue *wqueue)
++{
++ spin_unlock_bh(&wqueue->lock);
++}
++
+ static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+ {
+@@ -69,6 +90,10 @@ static const struct pipe_buf_operations
+
+ /*
+ * Post a notification to a watch queue.
++ *
++ * Must be called with the RCU lock for reading, and the
++ * watch_queue lock held, which guarantees that the pipe
++ * hasn't been released.
+ */
+ static bool post_one_notification(struct watch_queue *wqueue,
+ struct watch_notification *n)
+@@ -85,9 +110,6 @@ static bool post_one_notification(struct
+
+ spin_lock_irq(&pipe->rd_wait.lock);
+
+- if (wqueue->defunct)
+- goto out;
+-
+ mask = pipe->ring_size - 1;
+ head = pipe->head;
+ tail = pipe->tail;
+@@ -203,7 +225,10 @@ void __post_watch_notification(struct wa
+ if (security_post_notification(watch->cred, cred, n) < 0)
+ continue;
+
+- post_one_notification(wqueue, n);
++ if (lock_wqueue(wqueue)) {
++ post_one_notification(wqueue, n);
++ unlock_wqueue(wqueue);;
++ }
+ }
+
+ rcu_read_unlock();
+@@ -465,11 +490,12 @@ int add_watch_to_object(struct watch *wa
+ return -EAGAIN;
+ }
+
+- spin_lock_bh(&wqueue->lock);
+- kref_get(&wqueue->usage);
+- kref_get(&watch->usage);
+- hlist_add_head(&watch->queue_node, &wqueue->watches);
+- spin_unlock_bh(&wqueue->lock);
++ if (lock_wqueue(wqueue)) {
++ kref_get(&wqueue->usage);
++ kref_get(&watch->usage);
++ hlist_add_head(&watch->queue_node, &wqueue->watches);
++ unlock_wqueue(wqueue);
++ }
+
+ hlist_add_head(&watch->list_node, &wlist->watchers);
+ return 0;
+@@ -523,20 +549,15 @@ found:
+
+ wqueue = rcu_dereference(watch->queue);
+
+- /* We don't need the watch list lock for the next bit as RCU is
+- * protecting *wqueue from deallocation.
+- */
+- if (wqueue) {
++ if (lock_wqueue(wqueue)) {
+ post_one_notification(wqueue, &n.watch);
+
+- spin_lock_bh(&wqueue->lock);
+-
+ if (!hlist_unhashed(&watch->queue_node)) {
+ hlist_del_init_rcu(&watch->queue_node);
+ put_watch(watch);
+ }
+
+- spin_unlock_bh(&wqueue->lock);
++ unlock_wqueue(wqueue);
+ }
+
+ if (wlist->release_watch) {
--- /dev/null
+From 65cdf0d623bedf0e069bb64ed52e8bb20105e2ba Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Wed, 13 Jul 2022 14:38:19 -0700
+Subject: x86/alternative: Report missing return thunk details
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 65cdf0d623bedf0e069bb64ed52e8bb20105e2ba upstream.
+
+Debugging missing return thunks is easier if we can see where they're
+happening.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/lkml/Ys66hwtFcGbYmoiZ@hirez.programming.kicks-ass.net/
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/alternative.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -709,7 +709,9 @@ void __init_or_module noinline apply_ret
+ dest = addr + insn.length + insn.immediate.value;
+
+ if (__static_call_fixup(addr, op, dest) ||
+- WARN_ON_ONCE(dest != &__x86_return_thunk))
++ WARN_ONCE(dest != &__x86_return_thunk,
++ "missing return thunk: %pS-%pS: %*ph",
++ addr, dest, 5, addr))
+ continue;
+
+ DPRINTK("return thunk at: %pS (%px) len: %d to: %pS",
--- /dev/null
+From 28a99e95f55c61855983d36a88c05c178d966bb7 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 18 Jul 2022 13:41:37 +0200
+Subject: x86/amd: Use IBPB for firmware calls
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 28a99e95f55c61855983d36a88c05c178d966bb7 upstream.
+
+On AMD IBRS does not prevent Retbleed; as such use IBPB before a
+firmware call to flush the branch history state.
+
+And because in order to do an EFI call, the kernel maps a whole lot of
+the kernel page table into the EFI page table, do an IBPB just in case
+in order to prevent the scenario of poisoning the BTB and causing an EFI
+call using the unprotected RET there.
+
+ [ bp: Massage. ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lore.kernel.org/r/20220715194550.793957-1-cascardo@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/include/asm/nospec-branch.h | 2 ++
+ arch/x86/kernel/cpu/bugs.c | 11 ++++++++++-
+ 3 files changed, 13 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -298,6 +298,7 @@
+ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
+ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
++#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -298,6 +298,8 @@ do { \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, \
+ spec_ctrl_current() | SPEC_CTRL_IBRS, \
+ X86_FEATURE_USE_IBRS_FW); \
++ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
++ X86_FEATURE_USE_IBPB_FW); \
+ } while (0)
+
+ #define firmware_restrict_branch_speculation_end() \
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1475,7 +1475,16 @@ static void __init spectre_v2_select_mit
+ * the CPU supports Enhanced IBRS, kernel might un-intentionally not
+ * enable IBRS around firmware calls.
+ */
+- if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
++ if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
++ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
++ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
++
++ if (retbleed_cmd != RETBLEED_CMD_IBPB) {
++ setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
++ pr_info("Enabling Speculation Barrier for firmware calls\n");
++ }
++
++ } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
+ }