--- /dev/null
+From df6e44b8f9be7362e755c1f76d734be8236eb6c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Dec 2023 14:20:32 +0100
+Subject: ASoC: cs35l34: Fix GPIO name and drop legacy include
+
+From: Linus Walleij <linus.walleij@linaro.org>
+
+[ Upstream commit a6122b0b4211d132934ef99e7b737910e6d54d2f ]
+
+This driver includes the legacy GPIO APIs <linux/gpio.h> and
+<linux/of_gpio.h> but does not use any symbols from any of
+them.
+
+Drop the includes.
+
+Further the driver is requesting "reset-gpios" rather than
+just "reset" from the GPIO framework. This is wrong because
+the gpiolib core will add "-gpios" before processing the
+request from e.g. device tree. Drop the suffix.
+
+The last problem means that the optional RESET GPIO has
+never been properly retrieved and used even if it existed,
+but nobody noticed.
+
+Fixes: c1124c09e103 ("ASoC: cs35l34: Initial commit of the cs35l34 CODEC driver.")
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Link: https://lore.kernel.org/r/20231201-descriptors-sound-cirrus-v2-3-ee9f9d4655eb@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs35l34.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c
+index 6974dd4614103..04d9117b31ac7 100644
+--- a/sound/soc/codecs/cs35l34.c
++++ b/sound/soc/codecs/cs35l34.c
+@@ -20,14 +20,12 @@
+ #include <linux/regulator/machine.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/of_device.h>
+-#include <linux/of_gpio.h>
+ #include <linux/of_irq.h>
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+ #include <sound/pcm_params.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+-#include <linux/gpio.h>
+ #include <linux/gpio/consumer.h>
+ #include <sound/initval.h>
+ #include <sound/tlv.h>
+@@ -1061,7 +1059,7 @@ static int cs35l34_i2c_probe(struct i2c_client *i2c_client)
+ dev_err(&i2c_client->dev, "Failed to request IRQ: %d\n", ret);
+
+ cs35l34->reset_gpio = devm_gpiod_get_optional(&i2c_client->dev,
+- "reset-gpios", GPIOD_OUT_LOW);
++ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(cs35l34->reset_gpio)) {
+ ret = PTR_ERR(cs35l34->reset_gpio);
+ goto err_regulator;
+--
+2.43.0
+
--- /dev/null
+From 17bbc515a9f99a98471bbb1d6b17d7eec1e93ed6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Feb 2024 11:56:02 -0500
+Subject: btrfs: fix deadlock with fiemap and extent locking
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+[ Upstream commit b0ad381fa7690244802aed119b478b4bdafc31dd ]
+
+While working on the patchset to remove extent locking I got a lockdep
+splat with fiemap and pagefaulting with my new extent lock replacement
+lock.
+
+This deadlock exists with our normal code, we just don't have lockdep
+annotations with the extent locking so we've never noticed it.
+
+Since we're copying the fiemap extent to user space on every iteration
+we have the chance of pagefaulting. Because we hold the extent lock for
+the entire range we could mkwrite into a range in the file that we have
+mmap'ed. This would deadlock with the following stack trace
+
+[<0>] lock_extent+0x28d/0x2f0
+[<0>] btrfs_page_mkwrite+0x273/0x8a0
+[<0>] do_page_mkwrite+0x50/0xb0
+[<0>] do_fault+0xc1/0x7b0
+[<0>] __handle_mm_fault+0x2fa/0x460
+[<0>] handle_mm_fault+0xa4/0x330
+[<0>] do_user_addr_fault+0x1f4/0x800
+[<0>] exc_page_fault+0x7c/0x1e0
+[<0>] asm_exc_page_fault+0x26/0x30
+[<0>] rep_movs_alternative+0x33/0x70
+[<0>] _copy_to_user+0x49/0x70
+[<0>] fiemap_fill_next_extent+0xc8/0x120
+[<0>] emit_fiemap_extent+0x4d/0xa0
+[<0>] extent_fiemap+0x7f8/0xad0
+[<0>] btrfs_fiemap+0x49/0x80
+[<0>] __x64_sys_ioctl+0x3e1/0xb50
+[<0>] do_syscall_64+0x94/0x1a0
+[<0>] entry_SYSCALL_64_after_hwframe+0x6e/0x76
+
+I wrote an fstest to reproduce this deadlock without my replacement lock
+and verified that the deadlock exists with our existing locking.
+
+To fix this simply don't take the extent lock for the entire duration of
+the fiemap. This is safe in general because we keep track of where we
+are when we're searching the tree, so if an ordered extent updates in
+the middle of our fiemap call we'll still emit the correct extents
+because we know what offset we were on before.
+
+The only place we maintain the lock is searching delalloc. Since the
+delalloc stuff can change during writeback we want to lock the extent
+range so we have a consistent view of delalloc at the time we're
+checking to see if we need to set the delalloc flag.
+
+With this patch applied we no longer deadlock with my testcase.
+
+CC: stable@vger.kernel.org # 6.1+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent_io.c | 62 ++++++++++++++++++++++++++++++++------------
+ 1 file changed, 45 insertions(+), 17 deletions(-)
+
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 03c10e0ba0e27..70c6c7a0d4014 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2646,16 +2646,34 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
+ * it beyond i_size.
+ */
+ while (cur_offset < end && cur_offset < i_size) {
++ struct extent_state *cached_state = NULL;
+ u64 delalloc_start;
+ u64 delalloc_end;
+ u64 prealloc_start;
++ u64 lockstart;
++ u64 lockend;
+ u64 prealloc_len = 0;
+ bool delalloc;
+
++ lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
++ lockend = round_up(end, inode->root->fs_info->sectorsize);
++
++ /*
++ * We are only locking for the delalloc range because that's the
++ * only thing that can change here. With fiemap we have a lock
++ * on the inode, so no buffered or direct writes can happen.
++ *
++ * However mmaps and normal page writeback will cause this to
++ * change arbitrarily. We have to lock the extent lock here to
++ * make sure that nobody messes with the tree while we're doing
++ * btrfs_find_delalloc_in_range.
++ */
++ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
+ delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
++ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ if (!delalloc)
+ break;
+
+@@ -2823,15 +2841,15 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
+ {
+ const u64 ino = btrfs_ino(inode);
+- struct extent_state *cached_state = NULL;
+ struct extent_state *delalloc_cached_state = NULL;
+ struct btrfs_path *path;
+ struct fiemap_cache cache = { 0 };
+ struct btrfs_backref_share_check_ctx *backref_ctx;
+ u64 last_extent_end;
+ u64 prev_extent_end;
+- u64 lockstart;
+- u64 lockend;
++ u64 range_start;
++ u64 range_end;
++ const u64 sectorsize = inode->root->fs_info->sectorsize;
+ bool stopped = false;
+ int ret;
+
+@@ -2842,12 +2860,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ goto out;
+ }
+
+- lockstart = round_down(start, inode->root->fs_info->sectorsize);
+- lockend = round_up(start + len, inode->root->fs_info->sectorsize);
+- prev_extent_end = lockstart;
++ range_start = round_down(start, sectorsize);
++ range_end = round_up(start + len, sectorsize);
++ prev_extent_end = range_start;
+
+ btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
+- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+
+ ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
+ if (ret < 0)
+@@ -2855,7 +2872,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ btrfs_release_path(path);
+
+ path->reada = READA_FORWARD;
+- ret = fiemap_search_slot(inode, path, lockstart);
++ ret = fiemap_search_slot(inode, path, range_start);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+@@ -2867,7 +2884,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ goto check_eof_delalloc;
+ }
+
+- while (prev_extent_end < lockend) {
++ while (prev_extent_end < range_end) {
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_file_extent_item *ei;
+ struct btrfs_key key;
+@@ -2890,19 +2907,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ * The first iteration can leave us at an extent item that ends
+ * before our range's start. Move to the next item.
+ */
+- if (extent_end <= lockstart)
++ if (extent_end <= range_start)
+ goto next_item;
+
+ backref_ctx->curr_leaf_bytenr = leaf->start;
+
+ /* We have in implicit hole (NO_HOLES feature enabled). */
+ if (prev_extent_end < key.offset) {
+- const u64 range_end = min(key.offset, lockend) - 1;
++ const u64 hole_end = min(key.offset, range_end) - 1;
+
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state,
+ backref_ctx, 0, 0, 0,
+- prev_extent_end, range_end);
++ prev_extent_end, hole_end);
+ if (ret < 0) {
+ goto out_unlock;
+ } else if (ret > 0) {
+@@ -2912,7 +2929,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ }
+
+ /* We've reached the end of the fiemap range, stop. */
+- if (key.offset >= lockend) {
++ if (key.offset >= range_end) {
+ stopped = true;
+ break;
+ }
+@@ -3006,29 +3023,41 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ btrfs_free_path(path);
+ path = NULL;
+
+- if (!stopped && prev_extent_end < lockend) {
++ if (!stopped && prev_extent_end < range_end) {
+ ret = fiemap_process_hole(inode, fieinfo, &cache,
+ &delalloc_cached_state, backref_ctx,
+- 0, 0, 0, prev_extent_end, lockend - 1);
++ 0, 0, 0, prev_extent_end, range_end - 1);
+ if (ret < 0)
+ goto out_unlock;
+- prev_extent_end = lockend;
++ prev_extent_end = range_end;
+ }
+
+ if (cache.cached && cache.offset + cache.len >= last_extent_end) {
+ const u64 i_size = i_size_read(&inode->vfs_inode);
+
+ if (prev_extent_end < i_size) {
++ struct extent_state *cached_state = NULL;
+ u64 delalloc_start;
+ u64 delalloc_end;
++ u64 lockstart;
++ u64 lockend;
+ bool delalloc;
+
++ lockstart = round_down(prev_extent_end, sectorsize);
++ lockend = round_up(i_size, sectorsize);
++
++ /*
++ * See the comment in fiemap_process_hole as to why
++ * we're doing the locking here.
++ */
++ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ delalloc = btrfs_find_delalloc_in_range(inode,
+ prev_extent_end,
+ i_size - 1,
+ &delalloc_cached_state,
+ &delalloc_start,
+ &delalloc_end);
++ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ if (!delalloc)
+ cache.flags |= FIEMAP_EXTENT_LAST;
+ } else {
+@@ -3039,7 +3068,6 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
+ ret = emit_last_fiemap_cache(fieinfo, &cache);
+
+ out_unlock:
+- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
+ out:
+ free_extent_state(delalloc_cached_state);
+--
+2.43.0
+
--- /dev/null
+From 52c36c64b07a83bf913bcac5b4584d01bbfe9f50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Dec 2023 22:23:34 +0900
+Subject: ksmbd: fix wrong allocation size update in smb2_open()
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit a9f106c765c12d2f58aa33431bd8ce8e9d8a404a ]
+
+When client send SMB2_CREATE_ALLOCATION_SIZE create context, ksmbd update
+old size to ->AllocationSize in smb2 create response. ksmbd_vfs_getattr()
+should be called after it to get updated stat result.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/smb2pdu.c | 36 ++++++++++++++++++------------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 3e885cdc5ffc7..e8c03445271d0 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -2529,7 +2529,7 @@ static void smb2_new_xattrs(struct ksmbd_tree_connect *tcon, const struct path *
+ da.flags = XATTR_DOSINFO_ATTRIB | XATTR_DOSINFO_CREATE_TIME |
+ XATTR_DOSINFO_ITIME;
+
+- rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, false);
++ rc = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path->mnt), path, &da, true);
+ if (rc)
+ ksmbd_debug(SMB, "failed to store file attribute into xattr\n");
+ }
+@@ -3198,23 +3198,6 @@ int smb2_open(struct ksmbd_work *work)
+ goto err_out;
+ }
+
+- rc = ksmbd_vfs_getattr(&path, &stat);
+- if (rc)
+- goto err_out;
+-
+- if (stat.result_mask & STATX_BTIME)
+- fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+- else
+- fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+- if (req->FileAttributes || fp->f_ci->m_fattr == 0)
+- fp->f_ci->m_fattr =
+- cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
+-
+- if (!created)
+- smb2_update_xattrs(tcon, &path, fp);
+- else
+- smb2_new_xattrs(tcon, &path, fp);
+-
+ if (file_present || created)
+ ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+
+@@ -3315,6 +3298,23 @@ int smb2_open(struct ksmbd_work *work)
+ }
+ }
+
++ rc = ksmbd_vfs_getattr(&path, &stat);
++ if (rc)
++ goto err_out1;
++
++ if (stat.result_mask & STATX_BTIME)
++ fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
++ else
++ fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
++ if (req->FileAttributes || fp->f_ci->m_fattr == 0)
++ fp->f_ci->m_fattr =
++ cpu_to_le32(smb2_get_dos_mode(&stat, le32_to_cpu(req->FileAttributes)));
++
++ if (!created)
++ smb2_update_xattrs(tcon, &path, fp);
++ else
++ smb2_new_xattrs(tcon, &path, fp);
++
+ memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
+
+ rsp->StructureSize = cpu_to_le16(89);
+--
+2.43.0
+
--- /dev/null
+btrfs-fix-deadlock-with-fiemap-and-extent-locking.patch
+ubifs-fix-possible-dereference-after-free.patch
+asoc-cs35l34-fix-gpio-name-and-drop-legacy-include.patch
+ksmbd-fix-wrong-allocation-size-update-in-smb2_open.patch
+ublk-move-ublk_cancel_dev-out-of-ub-mutex.patch
--- /dev/null
+From 4f6f3f612358eb750efbfcb1bf4df2c550826fac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Sep 2023 18:12:22 +0800
+Subject: ubifs: fix possible dereference after free
+
+From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+
+[ Upstream commit d81efd66106c03771ffc8637855a6ec24caa6350 ]
+
+'old_idx' could be dereferenced after free via 'rb_link_node' function
+call.
+
+Fixes: b5fda08ef213 ("ubifs: Fix memleak when insert_old_idx() failed")
+Co-developed-by: Ivanov Mikhail <ivanov.mikhail1@huawei-partners.com>
+Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/tnc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 6b7d95b65f4b6..f4728e65d1bda 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
++ return;
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+--
+2.43.0
+
--- /dev/null
+From 879b52ffe5c129b48f883a7323fadc31b5ce05da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 17:33:18 +0800
+Subject: ublk: move ublk_cancel_dev() out of ub->mutex
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 85248d670b71d9edda9459ee14fdc85c8e9632c0 ]
+
+ublk_cancel_dev() just calls ublk_cancel_queue() to cancel all pending
+io commands after ublk request queue is idle. The only protection is just
+the read & write of ubq->nr_io_ready and avoid duplicated command cancel,
+so add one per-queue lock with cancel flag for providing this protection,
+meantime move ublk_cancel_dev() out of ub->mutex.
+
+Then we needn't to call io_uring_cmd_complete_in_task() to cancel
+pending command. And the same cancel logic will be re-used for
+cancelable uring command.
+
+This patch basically reverts commit ac5902f84bb5 ("ublk: fix AB-BA lockdep warning").
+
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20231009093324.957829-4-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index 630ddfe6657bc..f4e0573c47114 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -115,6 +115,9 @@ struct ublk_uring_cmd_pdu {
+ */
+ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
++/* atomic RW with ubq->cancel_lock */
++#define UBLK_IO_FLAG_CANCELED 0x80000000
++
+ struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+@@ -139,6 +142,7 @@ struct ublk_queue {
+ bool force_abort;
+ bool timeout;
+ unsigned short nr_io_ready; /* how many ios setup */
++ spinlock_t cancel_lock;
+ struct ublk_device *dev;
+ struct ublk_io ios[];
+ };
+@@ -1477,28 +1481,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ return ubq->nr_io_ready == ubq->q_depth;
+ }
+
+-static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+-{
+- io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+-}
+-
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ int i;
+
+- if (!ublk_queue_ready(ubq))
+- return;
+-
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+- if (io->flags & UBLK_IO_FLAG_ACTIVE)
+- io_uring_cmd_complete_in_task(io->cmd,
+- ublk_cmd_cancel_cb);
+- }
++ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
++ bool done;
+
+- /* all io commands are canceled */
+- ubq->nr_io_ready = 0;
++ spin_lock(&ubq->cancel_lock);
++ done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
++ if (!done)
++ io->flags |= UBLK_IO_FLAG_CANCELED;
++ spin_unlock(&ubq->cancel_lock);
++
++ if (!done)
++ io_uring_cmd_done(io->cmd,
++ UBLK_IO_RES_ABORT, 0,
++ IO_URING_F_UNLOCKED);
++ }
++ }
+ }
+
+ /* Cancel all pending commands, must be called after del_gendisk() returns */
+@@ -1545,7 +1549,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+- ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+@@ -1568,6 +1571,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ }
+
+ static void ublk_unquiesce_dev(struct ublk_device *ub)
+@@ -1607,8 +1611,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+- ublk_cancel_dev(ub);
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ cancel_delayed_work_sync(&ub->monitor_work);
+ }
+
+@@ -1962,6 +1966,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
+ void *ptr;
+ int size;
+
++ spin_lock_init(&ubq->cancel_lock);
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+@@ -2569,8 +2574,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
++
+ /* All old ioucmds have to be completed */
+- WARN_ON_ONCE(ubq->nr_io_ready);
++ ubq->nr_io_ready = 0;
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+--
+2.43.0
+