--- /dev/null
+From 5fba5a571858ce2d787fdaf55814e42725bfa895 Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 21 Nov 2023 13:38:32 +0000
+Subject: btrfs: fix off-by-one when checking chunk map includes logical address
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit 5fba5a571858ce2d787fdaf55814e42725bfa895 upstream.
+
+At btrfs_get_chunk_map() we get the extent map for the chunk that contains
+the given logical address stored in the 'logical' argument. Then we do
+sanity checks to verify the extent map contains the logical address. One
+of these checks verifies if the extent map covers a range with an end
+offset behind the target logical address - however this check has an
+off-by-one error since it will consider an extent map whose start offset
+plus its length matches the target logical address as inclusive, while
+the fact is that the last byte it covers is behind the target logical
+address (by 1).
+
+So fix this condition by using '<=' rather than '<' when comparing the
+extent map's "start + length" against the target logical address.
+
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -2823,7 +2823,7 @@ static struct extent_map *get_chunk_map(
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (em->start > logical || em->start + em->len < logical) {
++ if (em->start > logical || em->start + em->len <= logical) {
+ btrfs_crit(fs_info,
+ "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
+ logical, length, em->start, em->start + em->len);
--- /dev/null
+From 0ac1d13a55eb37d398b63e6ff6db4a09a2c9128c Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Fri, 24 Nov 2023 17:48:31 +0100
+Subject: btrfs: send: ensure send_fd is writable
+
+From: Jann Horn <jannh@google.com>
+
+commit 0ac1d13a55eb37d398b63e6ff6db4a09a2c9128c upstream.
+
+kernel_write() requires the caller to ensure that the file is writable.
+Let's do that directly after looking up the ->send_fd.
+
+We don't need a separate bailout path because the "out" path already
+does fput() if ->send_filp is non-NULL.
+
+This has no security impact for two reasons:
+
+ - the ioctl requires CAP_SYS_ADMIN
+ - __kernel_write() bails out on read-only files - but only since 5.8,
+ see commit a01ac27be472 ("fs: check FMODE_WRITE in __kernel_write")
+
+Reported-and-tested-by: syzbot+12e098239d20385264d3@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=12e098239d20385264d3
+Fixes: 31db9f7c23fb ("Btrfs: introduce BTRFS_IOC_SEND for btrfs send/receive")
+CC: stable@vger.kernel.org # 4.14+
+Signed-off-by: Jann Horn <jannh@google.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/send.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -6680,7 +6680,7 @@ long btrfs_ioctl_send(struct file *mnt_f
+ sctx->flags = arg->flags;
+
+ sctx->send_filp = fget(arg->send_fd);
+- if (!sctx->send_filp) {
++ if (!sctx->send_filp || !(sctx->send_filp->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ goto out;
+ }
--- /dev/null
+From 5e1d824f9a283cbf90f25241b66d1f69adb3835b Mon Sep 17 00:00:00 2001
+From: Timothy Pearson <tpearson@raptorengineering.com>
+Date: Sun, 19 Nov 2023 09:18:02 -0600
+Subject: powerpc: Don't clobber f0/vs0 during fp|altivec register save
+
+From: Timothy Pearson <tpearson@raptorengineering.com>
+
+commit 5e1d824f9a283cbf90f25241b66d1f69adb3835b upstream.
+
+During floating point and vector save to thread data f0/vs0 are
+clobbered by the FPSCR/VSCR store routine. This has been obvserved to
+lead to userspace register corruption and application data corruption
+with io-uring.
+
+Fix it by restoring f0/vs0 after FPSCR/VSCR store has completed for
+all the FP, altivec, VMX register save paths.
+
+Tested under QEMU in kvm mode, running on a Talos II workstation with
+dual POWER9 DD2.2 CPUs.
+
+Additional detail (mpe):
+
+Typically save_fpu() is called from __giveup_fpu() which saves the FP
+regs and also *turns off FP* in the tasks MSR, meaning the kernel will
+reload the FP regs from the thread struct before letting the task use FP
+again. So in that case save_fpu() is free to clobber f0 because the FP
+regs no longer hold live values for the task.
+
+There is another case though, which is the path via:
+ sys_clone()
+ ...
+ copy_process()
+ dup_task_struct()
+ arch_dup_task_struct()
+ flush_all_to_thread()
+ save_all()
+
+That path saves the FP regs but leaves them live. That's meant as an
+optimisation for a process that's using FP/VSX and then calls fork(),
+leaving the regs live means the parent process doesn't have to take a
+fault after the fork to get its FP regs back. The optimisation was added
+in commit 8792468da5e1 ("powerpc: Add the ability to save FPU without
+giving it up").
+
+That path does clobber f0, but f0 is volatile across function calls,
+and typically programs reach copy_process() from userspace via a syscall
+wrapper function. So in normal usage f0 being clobbered across a
+syscall doesn't cause visible data corruption.
+
+But there is now a new path, because io-uring can call copy_process()
+via create_io_thread() from the signal handling path. That's OK if the
+signal is handled as part of syscall return, but it's not OK if the
+signal is handled due to some other interrupt.
+
+That path is:
+
+interrupt_return_srr_user()
+ interrupt_exit_user_prepare()
+ interrupt_exit_user_prepare_main()
+ do_notify_resume()
+ get_signal()
+ task_work_run()
+ create_worker_cb()
+ create_io_worker()
+ copy_process()
+ dup_task_struct()
+ arch_dup_task_struct()
+ flush_all_to_thread()
+ save_all()
+ if (tsk->thread.regs->msr & MSR_FP)
+ save_fpu()
+ # f0 is clobbered and potentially live in userspace
+
+Note the above discussion applies equally to save_altivec().
+
+Fixes: 8792468da5e1 ("powerpc: Add the ability to save FPU without giving it up")
+Cc: stable@vger.kernel.org # v4.6+
+Closes: https://lore.kernel.org/all/480932026.45576726.1699374859845.JavaMail.zimbra@raptorengineeringinc.com/
+Closes: https://lore.kernel.org/linuxppc-dev/480221078.47953493.1700206777956.JavaMail.zimbra@raptorengineeringinc.com/
+Tested-by: Timothy Pearson <tpearson@raptorengineering.com>
+Tested-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Timothy Pearson <tpearson@raptorengineering.com>
+[mpe: Reword change log to describe exact path of corruption & other minor tweaks]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/1921539696.48534988.1700407082933.JavaMail.zimbra@raptorengineeringinc.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/fpu.S | 13 +++++++++++++
+ arch/powerpc/kernel/vector.S | 2 ++
+ 2 files changed, 15 insertions(+)
+
+--- a/arch/powerpc/kernel/fpu.S
++++ b/arch/powerpc/kernel/fpu.S
+@@ -27,6 +27,15 @@
+ #include <asm/export.h>
+
+ #ifdef CONFIG_VSX
++#define __REST_1FPVSR(n,c,base) \
++BEGIN_FTR_SECTION \
++ b 2f; \
++END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
++ REST_FPR(n,base); \
++ b 3f; \
++2: REST_VSR(n,c,base); \
++3:
++
+ #define __REST_32FPVSRS(n,c,base) \
+ BEGIN_FTR_SECTION \
+ b 2f; \
+@@ -45,9 +54,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);
+ 2: SAVE_32VSRS(n,c,base); \
+ 3:
+ #else
++#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
+ #define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+ #define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
+ #endif
++#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
+ #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+ #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
+
+@@ -70,6 +81,7 @@ _GLOBAL(store_fp_state)
+ SAVE_32FPVSRS(0, R4, R3)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r3)
++ REST_1FPVSR(0, R4, R3)
+ blr
+ EXPORT_SYMBOL(store_fp_state)
+
+@@ -134,6 +146,7 @@ _GLOBAL(save_fpu)
+ 2: SAVE_32FPVSRS(0, R4, R6)
+ mffs fr0
+ stfd fr0,FPSTATE_FPSCR(r6)
++ REST_1FPVSR(0, R4, R6)
+ blr
+
+ /*
+--- a/arch/powerpc/kernel/vector.S
++++ b/arch/powerpc/kernel/vector.S
+@@ -30,6 +30,7 @@ _GLOBAL(store_vr_state)
+ mfvscr v0
+ li r4, VRSTATE_VSCR
+ stvx v0, r4, r3
++ lvx v0, 0, r3
+ blr
+ EXPORT_SYMBOL(store_vr_state)
+
+@@ -100,6 +101,7 @@ _GLOBAL(save_altivec)
+ mfvscr v0
+ li r4,VRSTATE_VSCR
+ stvx v0,r4,r7
++ lvx v0,0,r7
+ blr
+
+ #ifdef CONFIG_VSX