--- /dev/null
+From b642b52d0b50f4d398cb4293f64992d0eed2e2ce Mon Sep 17 00:00:00 2001
+From: Ethan Lien <ethanlien@synology.com>
+Date: Mon, 7 Mar 2022 18:00:04 +0800
+Subject: btrfs: fix qgroup reserve overflow the qgroup limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ethan Lien <ethanlien@synology.com>
+
+commit b642b52d0b50f4d398cb4293f64992d0eed2e2ce upstream.
+
+We use extent_changeset->bytes_changed in qgroup_reserve_data() to record
+how many bytes we set for EXTENT_QGROUP_RESERVED state. Currently the
+bytes_changed is set as "unsigned int", and it will overflow if we try to
+fallocate a range larger than 4GiB. The result is we reserve less bytes
+and eventually break the qgroup limit.
+
+Unlike regular buffered/direct write, which we use one changeset for
+each ordered extent, which can never be larger than 256M. For
+fallocate, we use one changeset for the whole range, thus it no longer
+respects the 256M per extent limit, and caused the problem.
+
+The following example test script reproduces the problem:
+
+ $ cat qgroup-overflow.sh
+ #!/bin/bash
+
+ DEV=/dev/sdj
+ MNT=/mnt/sdj
+
+ mkfs.btrfs -f $DEV
+ mount $DEV $MNT
+
+ # Set qgroup limit to 2GiB.
+ btrfs quota enable $MNT
+ btrfs qgroup limit 2G $MNT
+
+ # Try to fallocate a 3GiB file. This should fail.
+ echo
+ echo "Try to fallocate a 3GiB file..."
+ fallocate -l 3G $MNT/3G.file
+
+ # Try to fallocate a 5GiB file.
+ echo
+ echo "Try to fallocate a 5GiB file..."
+ fallocate -l 5G $MNT/5G.file
+
+ # See we break the qgroup limit.
+ echo
+ sync
+ btrfs qgroup show -r $MNT
+
+ umount $MNT
+
+When running the test:
+
+ $ ./qgroup-overflow.sh
+ (...)
+
+ Try to fallocate a 3GiB file...
+ fallocate: fallocate failed: Disk quota exceeded
+
+ Try to fallocate a 5GiB file...
+
+ qgroupid rfer excl max_rfer
+ -------- ---- ---- --------
+ 0/5 5.00GiB 5.00GiB 2.00GiB
+
+Since we have no control of how bytes_changed is used, it's better to
+set it to u64.
+
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: Ethan Lien <ethanlien@synology.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/extent_io.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/extent_io.h
++++ b/fs/btrfs/extent_io.h
+@@ -117,7 +117,7 @@ struct btrfs_bio_ctrl {
+ */
+ struct extent_changeset {
+ /* How many bytes are set/cleared in this operation */
+- unsigned int bytes_changed;
++ u64 bytes_changed;
+
+ /* Changed ranges */
+ struct ulist range_changed;
--- /dev/null
+From 60021bd754c6ca0addc6817994f20290a321d8d6 Mon Sep 17 00:00:00 2001
+From: Kaiwen Hu <kevinhu@synology.com>
+Date: Wed, 23 Mar 2022 15:10:32 +0800
+Subject: btrfs: prevent subvol with swapfile from being deleted
+
+From: Kaiwen Hu <kevinhu@synology.com>
+
+commit 60021bd754c6ca0addc6817994f20290a321d8d6 upstream.
+
+A subvolume with an active swapfile must not be deleted otherwise it
+would not be possible to deactivate it.
+
+After the subvolume is deleted, we cannot swapoff the swapfile in this
+deleted subvolume because the path is unreachable. The swapfile is
+still active and holding references, the filesystem cannot be unmounted.
+
+The test looks like this:
+
+ mkfs.btrfs -f $dev > /dev/null
+ mount $dev $mnt
+
+ btrfs sub create $mnt/subvol
+ touch $mnt/subvol/swapfile
+ chmod 600 $mnt/subvol/swapfile
+ chattr +C $mnt/subvol/swapfile
+ dd if=/dev/zero of=$mnt/subvol/swapfile bs=1K count=4096
+ mkswap $mnt/subvol/swapfile
+ swapon $mnt/subvol/swapfile
+
+ btrfs sub delete $mnt/subvol
+ swapoff $mnt/subvol/swapfile # failed: No such file or directory
+ swapoff --all
+
+ unmount $mnt # target is busy.
+
+To prevent above issue, we simply check that whether the subvolume
+contains any active swapfile, and stop the deleting process. This
+behavior is like snapshot ioctl dealing with a swapfile.
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Robbie Ko <robbieko@synology.com>
+Reviewed-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Kaiwen Hu <kevinhu@synology.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4450,6 +4450,13 @@ int btrfs_delete_subvolume(struct inode
+ dest->root_key.objectid);
+ return -EPERM;
+ }
++ if (atomic_read(&dest->nr_swapfiles)) {
++ spin_unlock(&dest->root_item_lock);
++ btrfs_warn(fs_info,
++ "attempt to delete subvolume %llu with active swapfile",
++ root->root_key.objectid);
++ return -EPERM;
++ }
+ root_flags = btrfs_root_flags(&dest->root_item);
+ btrfs_set_root_flags(&dest->root_item,
+ root_flags | BTRFS_ROOT_SUBVOL_DEAD);
+@@ -10721,8 +10728,23 @@ static int btrfs_swap_activate(struct sw
+ * set. We use this counter to prevent snapshots. We must increment it
+ * before walking the extents because we don't want a concurrent
+ * snapshot to run after we've already checked the extents.
+- */
++ *
++ * It is possible that subvolume is marked for deletion but still not
++ * removed yet. To prevent this race, we check the root status before
++ * activating the swapfile.
++ */
++ spin_lock(&root->root_item_lock);
++ if (btrfs_root_dead(root)) {
++ spin_unlock(&root->root_item_lock);
++
++ btrfs_exclop_finish(fs_info);
++ btrfs_warn(fs_info,
++ "cannot activate swapfile because subvolume %llu is being deleted",
++ root->root_key.objectid);
++ return -EPERM;
++ }
+ atomic_inc(&root->nr_swapfiles);
++ spin_unlock(&root->root_item_lock);
+
+ isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+
--- /dev/null
+From e677edbcabee849bfdd43f1602bccbecf736a646 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 8 Apr 2022 11:08:58 -0600
+Subject: io_uring: fix race between timeout flush and removal
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit e677edbcabee849bfdd43f1602bccbecf736a646 upstream.
+
+io_flush_timeouts() assumes the timeout isn't in progress of triggering
+or being removed/canceled, so it unconditionally removes it from the
+timeout list and attempts to cancel it.
+
+Leave it on the list and let the normal timeout cancelation take care
+of it.
+
+Cc: stable@vger.kernel.org # 5.5+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1538,12 +1538,11 @@ static void io_flush_timeouts(struct io_
+ __must_hold(&ctx->completion_lock)
+ {
+ u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++ struct io_kiocb *req, *tmp;
+
+ spin_lock_irq(&ctx->timeout_lock);
+- while (!list_empty(&ctx->timeout_list)) {
++ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ u32 events_needed, events_got;
+- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
+- struct io_kiocb, timeout.list);
+
+ if (io_is_timeout_noseq(req))
+ break;
+@@ -1560,7 +1559,6 @@ static void io_flush_timeouts(struct io_
+ if (events_got < events_needed)
+ break;
+
+- list_del_init(&req->timeout.list);
+ io_kill_timeout(req, 0);
+ }
+ ctx->cq_last_tm_flush = seq;
+@@ -6205,6 +6203,7 @@ static int io_timeout_prep(struct io_kio
+ if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
+ return -EFAULT;
+
++ INIT_LIST_HEAD(&req->timeout.list);
+ data->mode = io_translate_timeout_mode(flags);
+ hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
+
--- /dev/null
+From 0f5e4b83b37a96e3643951588ed7176b9b187c0a Mon Sep 17 00:00:00 2001
+From: Eugene Syromiatnikov <esyr@redhat.com>
+Date: Wed, 6 Apr 2022 13:55:33 +0200
+Subject: io_uring: implement compat handling for IORING_REGISTER_IOWQ_AFF
+
+From: Eugene Syromiatnikov <esyr@redhat.com>
+
+commit 0f5e4b83b37a96e3643951588ed7176b9b187c0a upstream.
+
+Similarly to the way it is done im mbind syscall.
+
+Cc: stable@vger.kernel.org # 5.14
+Fixes: fe76421d1da1dcdb ("io_uring: allow user configurable IO thread CPU affinity")
+Signed-off-by: Eugene Syromiatnikov <esyr@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/io_uring.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -10683,7 +10683,15 @@ static int io_register_iowq_aff(struct i
+ if (len > cpumask_size())
+ len = cpumask_size();
+
+- if (copy_from_user(new_mask, arg, len)) {
++ if (in_compat_syscall()) {
++ ret = compat_get_bitmap(cpumask_bits(new_mask),
++ (const compat_ulong_t __user *)arg,
++ len * 8 /* CHAR_BIT */);
++ } else {
++ ret = copy_from_user(new_mask, arg, len);
++ }
++
++ if (ret) {
+ free_cpumask_var(new_mask);
+ return -EFAULT;
+ }
--- /dev/null
+From e590928de7547454469693da9bc7ffd562e54b7e Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Mon, 28 Mar 2022 08:49:03 -0700
+Subject: perf/x86/intel: Update the FRONTEND MSR mask on Sapphire Rapids
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit e590928de7547454469693da9bc7ffd562e54b7e upstream.
+
+On Sapphire Rapids, the FRONTEND_RETIRED.MS_FLOWS event requires the
+FRONTEND MSR value 0x8. However, the current FRONTEND MSR mask doesn't
+support it.
+
+Update intel_spr_extra_regs[] to support it.
+
+Fixes: 61b985e3e775 ("perf/x86/intel: Add perf core PMU support for Sapphire Rapids")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/1648482543-14923-2-git-send-email-kan.liang@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -281,7 +281,7 @@ static struct extra_reg intel_spr_extra_
+ INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+- INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
++ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
+ EVENT_EXTRA_END
mm-mempolicy-fix-mpol_new-leak-in-shared_policy_replace.patch
io_uring-don-t-check-req-file-in-io_fsync_prep.patch
io_uring-defer-splice-tee-file-validity-check-until-command-issue.patch
+io_uring-implement-compat-handling-for-ioring_register_iowq_aff.patch
+io_uring-fix-race-between-timeout-flush-and-removal.patch
+x86-pm-save-the-msr-validity-status-at-context-setup.patch
+x86-speculation-restore-speculation-related-msrs-during-s3-resume.patch
+perf-x86-intel-update-the-frontend-msr-mask-on-sapphire-rapids.patch
+btrfs-fix-qgroup-reserve-overflow-the-qgroup-limit.patch
+btrfs-prevent-subvol-with-swapfile-from-being-deleted.patch
+spi-core-add-dma_map_dev-for-__spi_unmap_msg.patch
--- /dev/null
+From 409543cec01a84610029d6440c480c3fdd7214fb Mon Sep 17 00:00:00 2001
+From: Vinod Koul <vkoul@kernel.org>
+Date: Wed, 6 Apr 2022 18:52:38 +0530
+Subject: spi: core: add dma_map_dev for __spi_unmap_msg()
+
+From: Vinod Koul <vkoul@kernel.org>
+
+commit 409543cec01a84610029d6440c480c3fdd7214fb upstream.
+
+Commit b470e10eb43f ("spi: core: add dma_map_dev for dma device") added
+dma_map_dev for _spi_map_msg() but missed to add for unmap routine,
+__spi_unmap_msg(), so add it now.
+
+Fixes: b470e10eb43f ("spi: core: add dma_map_dev for dma device")
+Cc: stable@vger.kernel.org # v5.14+
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Link: https://lore.kernel.org/r/20220406132238.1029249-1-vkoul@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1072,11 +1072,15 @@ static int __spi_unmap_msg(struct spi_co
+
+ if (ctlr->dma_tx)
+ tx_dev = ctlr->dma_tx->device->dev;
++ else if (ctlr->dma_map_dev)
++ tx_dev = ctlr->dma_map_dev;
+ else
+ tx_dev = ctlr->dev.parent;
+
+ if (ctlr->dma_rx)
+ rx_dev = ctlr->dma_rx->device->dev;
++ else if (ctlr->dma_map_dev)
++ rx_dev = ctlr->dma_map_dev;
+ else
+ rx_dev = ctlr->dev.parent;
+
--- /dev/null
+From 73924ec4d560257004d5b5116b22a3647661e364 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Mon, 4 Apr 2022 17:34:19 -0700
+Subject: x86/pm: Save the MSR validity status at context setup
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit 73924ec4d560257004d5b5116b22a3647661e364 upstream.
+
+The mechanism to save/restore MSRs during S3 suspend/resume checks for
+the MSR validity during suspend, and only restores the MSR if its a
+valid MSR. This is not optimal, as an invalid MSR will unnecessarily
+throw an exception for every suspend cycle. The more invalid MSRs,
+higher the impact will be.
+
+Check and save the MSR validity at setup. This ensures that only valid
+MSRs that are guaranteed to not throw an exception will be attempted
+during suspend.
+
+Fixes: 7a9c2dd08ead ("x86/pm: Introduce quirk framework to save/restore extra MSR registers around suspend/resume")
+Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/power/cpu.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -40,7 +40,8 @@ static void msr_save_context(struct save
+ struct saved_msr *end = msr + ctxt->saved_msrs.num;
+
+ while (msr < end) {
+- msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
++ if (msr->valid)
++ rdmsrl(msr->info.msr_no, msr->info.reg.q);
+ msr++;
+ }
+ }
+@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *
+ }
+
+ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
++ u64 dummy;
++
+ msr_array[i].info.msr_no = msr_id[j];
+- msr_array[i].valid = false;
++ msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy);
+ msr_array[i].info.reg.q = 0;
+ }
+ saved_msrs->num = total_num;
--- /dev/null
+From e2a1256b17b16f9b9adf1b6fea56819e7b68e463 Mon Sep 17 00:00:00 2001
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Date: Mon, 4 Apr 2022 17:35:45 -0700
+Subject: x86/speculation: Restore speculation related MSRs during S3 resume
+
+From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+
+commit e2a1256b17b16f9b9adf1b6fea56819e7b68e463 upstream.
+
+After resuming from suspend-to-RAM, the MSRs that control CPU's
+speculative execution behavior are not being restored on the boot CPU.
+
+These MSRs are used to mitigate speculative execution vulnerabilities.
+Not restoring them correctly may leave the CPU vulnerable. Secondary
+CPU's MSRs are correctly being restored at S3 resume by
+identify_secondary_cpu().
+
+During S3 resume, restore these MSRs for boot CPU when restoring its
+processor state.
+
+Fixes: 772439717dbf ("x86/bugs/intel: Set proper CPU features and setup RDS")
+Reported-by: Neelima Krishnan <neelima.krishnan@intel.com>
+Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Tested-by: Neelima Krishnan <neelima.krishnan@intel.com>
+Acked-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/power/cpu.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -503,10 +503,24 @@ static int pm_cpu_check(const struct x86
+ return ret;
+ }
+
++static void pm_save_spec_msr(void)
++{
++ u32 spec_msr_id[] = {
++ MSR_IA32_SPEC_CTRL,
++ MSR_IA32_TSX_CTRL,
++ MSR_TSX_FORCE_ABORT,
++ MSR_IA32_MCU_OPT_CTRL,
++ MSR_AMD64_LS_CFG,
++ };
++
++ msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
++}
++
+ static int pm_check_save_msr(void)
+ {
+ dmi_check_system(msr_save_dmi_table);
+ pm_cpu_check(msr_save_cpu_table);
++ pm_save_spec_msr();
+
+ return 0;
+ }