--- /dev/null
+From 994e5d9b45871c168ff9759f280fc87b3658d067 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 15:34:21 -0500
+Subject: cifs: fix potential oops in cifs_oplock_break
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit e8f5f849ffce24490eb9449e98312b66c0dba76f ]
+
+With deferred close we can have closes that race with lease breaks,
+and so with the current checks for whether to send the lease response,
+oplock_response(), this can mean that an unmount (kill_sb) can occur
+just before we were checking if the tcon->ses is valid. See below:
+
+[Fri Aug 4 04:12:50 2023] RIP: 0010:cifs_oplock_break+0x1f7/0x5b0 [cifs]
+[Fri Aug 4 04:12:50 2023] Code: 7d a8 48 8b 7d c0 c0 e9 02 48 89 45 b8 41 89 cf e8 3e f5 ff ff 4c 89 f7 41 83 e7 01 e8 82 b3 03 f2 49 8b 45 50 48 85 c0 74 5e <48> 83 78 60 00 74 57 45 84 ff 75 52 48 8b 43 98 48 83 eb 68 48 39
+[Fri Aug 4 04:12:50 2023] RSP: 0018:ffffb30607ddbdf8 EFLAGS: 00010206
+[Fri Aug 4 04:12:50 2023] RAX: 632d223d32612022 RBX: ffff97136944b1e0 RCX: 0000000080100009
+[Fri Aug 4 04:12:50 2023] RDX: 0000000000000001 RSI: 0000000080100009 RDI: ffff97136944b188
+[Fri Aug 4 04:12:50 2023] RBP: ffffb30607ddbe58 R08: 0000000000000001 R09: ffffffffc08e0900
+[Fri Aug 4 04:12:50 2023] R10: 0000000000000001 R11: 000000000000000f R12: ffff97136944b138
+[Fri Aug 4 04:12:50 2023] R13: ffff97149147c000 R14: ffff97136944b188 R15: 0000000000000000
+[Fri Aug 4 04:12:50 2023] FS: 0000000000000000(0000) GS:ffff9714f7c00000(0000) knlGS:0000000000000000
+[Fri Aug 4 04:12:50 2023] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[Fri Aug 4 04:12:50 2023] CR2: 00007fd8de9c7590 CR3: 000000011228e000 CR4: 0000000000350ef0
+[Fri Aug 4 04:12:50 2023] Call Trace:
+[Fri Aug 4 04:12:50 2023] <TASK>
+[Fri Aug 4 04:12:50 2023] process_one_work+0x225/0x3d0
+[Fri Aug 4 04:12:50 2023] worker_thread+0x4d/0x3e0
+[Fri Aug 4 04:12:50 2023] ? process_one_work+0x3d0/0x3d0
+[Fri Aug 4 04:12:50 2023] kthread+0x12a/0x150
+[Fri Aug 4 04:12:50 2023] ? set_kthread_struct+0x50/0x50
+[Fri Aug 4 04:12:50 2023] ret_from_fork+0x22/0x30
+[Fri Aug 4 04:12:50 2023] </TASK>
+
+To fix this change the ordering of the checks before sending the oplock_response
+to first check if the openFileList is empty.
+
+Fixes: da787d5b7498 ("SMB3: Do not send lease break acknowledgment if all file handles have been closed")
+Suggested-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/file.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index d554bca7e07eb..4e9d26d4404ab 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -4878,9 +4878,11 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = d_inode(cfile->dentry);
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+- struct TCP_Server_Info *server = tcon->ses->server;
++ struct cifs_tcon *tcon;
++ struct TCP_Server_Info *server;
++ struct tcon_link *tlink;
+ int rc = 0;
+ bool purge_cache = false, oplock_break_cancelled;
+ __u64 persistent_fid, volatile_fid;
+@@ -4889,6 +4891,12 @@ void cifs_oplock_break(struct work_struct *work)
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink))
++ goto out;
++ tcon = tlink_tcon(tlink);
++ server = tcon->ses->server;
++
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
+
+@@ -4938,18 +4946,19 @@ void cifs_oplock_break(struct work_struct *work)
+ /*
+ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ * an acknowledgment to be sent when the file has already been closed.
+- * check for server null, since can race with kill_sb calling tree disconnect.
+ */
+ spin_lock(&cinode->open_file_lock);
+- if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+- !list_empty(&cinode->openFileList)) {
++ /* check list empty since can race with kill_sb calling tree disconnect */
++ if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
+ spin_unlock(&cinode->open_file_lock);
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+- volatile_fid, net_fid, cinode);
++ rc = server->ops->oplock_response(tcon, persistent_fid,
++ volatile_fid, net_fid, cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ } else
+ spin_unlock(&cinode->open_file_lock);
+
++ cifs_put_tlink(tlink);
++out:
+ cifs_done_oplock_break(cinode);
+ }
+
+--
+2.40.1
+
--- /dev/null
+From b8fa23ca7a53d8c9609a3d7544850af604c79b93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Aug 2023 12:57:02 +0300
+Subject: regulator: qcom-rpmh: Fix LDO 12 regulator for PM8550
+
+From: Abel Vesa <abel.vesa@linaro.org>
+
+[ Upstream commit 7cdf55462c5533a1c78ae13ab8563558e30e4130 ]
+
+The LDO 12 is NLDO 515 low voltage type, so fix accordingly.
+
+Fixes: e6e3776d682d ("regulator: qcom-rpmh: Add support for PM8550 regulators")
+Signed-off-by: Abel Vesa <abel.vesa@linaro.org>
+Reviewed-by: Neil Armstrong <neil.armstrong@linaro.org>
+Link: https://lore.kernel.org/r/20230801095702.2891127-1-abel.vesa@linaro.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/qcom-rpmh-regulator.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index f3b280af07737..cd077b7c4aff3 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1068,7 +1068,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"),
+- RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"),
++ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"),
+--
+2.40.1
+
ring-buffer-do-not-swap-cpu_buffer-during-resize-pro.patch
btrfs-move-out-now-unused-bg-from-the-reclaim-list.patch
btrfs-fix-use-after-free-of-new-block-group-that-bec.patch
+regulator-qcom-rpmh-fix-ldo-12-regulator-for-pm8550.patch
+virtio-mmio-don-t-break-lifecycle-of-vm_dev.patch
+vduse-use-proper-spinlock-for-irq-injection.patch
+virtio-vdpa-fix-cpumask-memory-leak-in-virtio_vdpa_f.patch
+vdpa-mlx5-fix-mr-initialized-semantics.patch
+vdpa-mlx5-delete-control-vq-iotlb-in-destroy_mr-only.patch
+cifs-fix-potential-oops-in-cifs_oplock_break.patch
--- /dev/null
+From 94808915cfa84891c5bc090de6da7d3027a3c142 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Aug 2023 20:12:20 +0300
+Subject: vdpa/mlx5: Delete control vq iotlb in destroy_mr only when necessary
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eugenio Pérez <eperezma@redhat.com>
+
+[ Upstream commit ad03a0f44cdb97b46e5c84ed353dac9b8ae2c276 ]
+
+mlx5_vdpa_destroy_mr can be called from .set_map with data ASID after
+the control virtqueue ASID iotlb has been populated. The control vq
+iotlb must not be cleared, since it will not be populated again.
+
+So call the ASID aware destroy function which makes sure that the
+right vq resource is destroyed.
+
+Fixes: 8fcd20c30704 ("vdpa/mlx5: Support different address spaces for control and data")
+Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Message-Id: <20230802171231.11001-5-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
+ drivers/vdpa/mlx5/core/mr.c | 2 +-
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 4 ++--
+ 3 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index a0420be5059f4..b53420e874acb 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -122,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+
+ #define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 4ae14a248a4bc..5a1971fcd87b1 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -515,7 +515,7 @@ static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int
+ mr->initialized = false;
+ }
+
+-static void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 279ac6a558d29..f18a9301ab94e 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2564,7 +2564,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ goto err_mr;
+
+ teardown_driver(ndev);
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ if (err)
+ goto err_mr;
+@@ -2580,7 +2580,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ return 0;
+
+ err_setup:
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err_mr:
+ return err;
+ }
+--
+2.40.1
+
--- /dev/null
+From 1c7ac441bfc2cb0ddc25d5cdb3dd0ce606faa9f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Aug 2023 20:12:18 +0300
+Subject: vdpa/mlx5: Fix mr->initialized semantics
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dragos Tatulea <dtatulea@nvidia.com>
+
+[ Upstream commit 9ee811009ad8f87982b69e61d07447d12233ad01 ]
+
+The mr->initialized flag is shared between the control vq and data vq
+part of the mr init/uninit. But if the control vq and data vq get placed
+in different ASIDs, it can happen that initializing the control vq will
+prevent the data vq mr from being initialized.
+
+This patch consolidates the control and data vq init parts into their
+own init functions. The mr->initialized will now be used for the data vq
+only. The control vq currently doesn't need a flag.
+
+The uninitializing part is also taken care of: mlx5_vdpa_destroy_mr got
+split into data and control vq functions which are now also ASID aware.
+
+Fixes: 8fcd20c30704 ("vdpa/mlx5: Support different address spaces for control and data")
+Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
+Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Message-Id: <20230802171231.11001-3-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
+ drivers/vdpa/mlx5/core/mr.c | 97 +++++++++++++++++++++---------
+ 2 files changed, 71 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 25fc4120b618d..a0420be5059f4 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
++ /* state of dvq mr */
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 03e5432297912..4ae14a248a4bc 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -489,60 +489,103 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
+ }
+ }
+
+-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return;
++
++ prune_iotlb(mvdev);
++}
++
++static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+- mutex_lock(&mr->mkey_mtx);
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
++ return;
++
+ if (!mr->initialized)
+- goto out;
++ return;
+
+- prune_iotlb(mvdev);
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ mr->initialized = false;
+-out:
++}
++
++static void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ struct mlx5_vdpa_mr *mr = &mvdev->mr;
++
++ mutex_lock(&mr->mkey_mtx);
++
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
++ _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
++
+ mutex_unlock(&mr->mkey_mtx);
+ }
+
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+- struct vhost_iotlb *iotlb, unsigned int asid)
++void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++{
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
++}
++
++static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return 0;
++
++ return dup_iotlb(mvdev, iotlb);
++}
++
++static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+- if (mr->initialized)
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ return 0;
+
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- err = create_user_mr(mvdev, iotlb);
+- else
+- err = create_dma_mr(mvdev, mr);
++ if (mr->initialized)
++ return 0;
+
+- if (err)
+- return err;
+- }
++ if (iotlb)
++ err = create_user_mr(mvdev, iotlb);
++ else
++ err = create_dma_mr(mvdev, mr);
+
+- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+- err = dup_iotlb(mvdev, iotlb);
+- if (err)
+- goto out_err;
+- }
++ if (err)
++ return err;
+
+ mr->initialized = true;
++
++ return 0;
++}
++
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb, unsigned int asid)
++{
++ int err;
++
++ err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
++ if (err)
++ return err;
++
++ err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
++ if (err)
++ goto out_err;
++
+ return 0;
+
+ out_err:
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- destroy_user_mr(mvdev, mr);
+- else
+- destroy_dma_mr(mvdev, mr);
+- }
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+
+ return err;
+ }
+--
+2.40.1
+
--- /dev/null
+From 6f4ab4dfa86e69a9ff9f98faa78c4858efdc2fa1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 13:45:05 +0200
+Subject: vduse: Use proper spinlock for IRQ injection
+
+From: Maxime Coquelin <maxime.coquelin@redhat.com>
+
+[ Upstream commit 7ca26efb09a1543fddb29308ea3b63b66cb5d3ee ]
+
+The IRQ injection work used spin_lock_irq() to protect the
+scheduling of the softirq, but spin_lock_bh() should be
+used.
+
+With spin_lock_irq(), we noticed delay of more than 6
+seconds between the time a NAPI polling work is scheduled
+and the time it is executed.
+
+Fixes: c8a6153b6c59 ("vduse: Introduce VDUSE - vDPA Device in Userspace")
+Cc: xieyongji@bytedance.com
+
+Suggested-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
+Message-Id: <20230705114505.63274-1-maxime.coquelin@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/vdpa_user/vduse_dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 0d84e6a9c3cca..76d4ab451f599 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -935,10 +935,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
+ {
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+- spin_lock_irq(&dev->irq_lock);
++ spin_lock_bh(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+- spin_unlock_irq(&dev->irq_lock);
++ spin_unlock_bh(&dev->irq_lock);
+ }
+
+ static void vduse_vq_irq_inject(struct work_struct *work)
+@@ -946,10 +946,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+- spin_lock_irq(&vq->irq_lock);
++ spin_lock_bh(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+- spin_unlock_irq(&vq->irq_lock);
++ spin_unlock_bh(&vq->irq_lock);
+ }
+
+ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
+--
+2.40.1
+
--- /dev/null
+From e6fef3a682df6cf1f82f035eff14ccc18d5e0403 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jun 2023 14:05:26 +0200
+Subject: virtio-mmio: don't break lifecycle of vm_dev
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit 55c91fedd03d7b9cf0c5199b2eb12b9b8e95281a ]
+
+vm_dev has a separate lifecycle because it has a 'struct device'
+embedded. Thus, having a release callback for it is correct.
+
+Allocating the vm_dev struct with devres totally breaks this protection,
+though. Instead of waiting for the vm_dev release callback, the memory
+is freed when the platform_device is removed. Resulting in a
+use-after-free when finally the callback is to be called.
+
+To easily see the problem, compile the kernel with
+CONFIG_DEBUG_KOBJECT_RELEASE and unbind with sysfs.
+
+The fix is easy, don't use devres in this case.
+
+Found during my research about object lifetime problems.
+
+Fixes: 7eb781b1bbb7 ("virtio_mmio: add cleanup for virtio_mmio_probe")
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Message-Id: <20230629120526.7184-1-wsa+renesas@sang-engineering.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_mmio.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index a46a4a29e9295..97760f6112959 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -607,9 +607,8 @@ static void virtio_mmio_release_dev(struct device *_d)
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+- struct platform_device *pdev = vm_dev->pdev;
+
+- devm_kfree(&pdev->dev, vm_dev);
++ kfree(vm_dev);
+ }
+
+ /* Platform device */
+@@ -620,7 +619,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ unsigned long magic;
+ int rc;
+
+- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+--
+2.40.1
+
--- /dev/null
+From 8adfa3dcbd345ff83232e802c48a19c66bef9858 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 22:10:07 +0300
+Subject: virtio-vdpa: Fix cpumask memory leak in virtio_vdpa_find_vqs()
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit df9557046440b0a62250fee3169a8f6a139f55a6 ]
+
+Free the cpumask allocated by create_affinity_masks() before returning
+from the function.
+
+Fixes: 3dad56823b53 ("virtio-vdpa: Support interrupt affinity spreading mechanism")
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Message-Id: <20230726191036.14324-1-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_vdpa.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
+index 989e2d7184ce4..961161da59000 100644
+--- a/drivers/virtio/virtio_vdpa.c
++++ b/drivers/virtio/virtio_vdpa.c
+@@ -393,11 +393,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ cb.callback = virtio_vdpa_config_cb;
+ cb.private = vd_dev;
+ ops->set_config_cb(vdpa, &cb);
++ kfree(masks);
+
+ return 0;
+
+ err_setup_vq:
+ virtio_vdpa_del_vqs(vdev);
++ kfree(masks);
+ return err;
+ }
+
+--
+2.40.1
+