--- /dev/null
+From a8fcbdc3a9339d48f22505ab00644ba7fd6a574f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Aug 2023 15:34:21 -0500
+Subject: cifs: fix potential oops in cifs_oplock_break
+
+From: Steve French <stfrench@microsoft.com>
+
+[ Upstream commit e8f5f849ffce24490eb9449e98312b66c0dba76f ]
+
+With deferred close we can have closes that race with lease breaks,
+and so with the current checks for whether to send the lease response,
+oplock_response(), this can mean that an unmount (kill_sb) can occur
+just before we were checking if the tcon->ses is valid. See below:
+
+[Fri Aug 4 04:12:50 2023] RIP: 0010:cifs_oplock_break+0x1f7/0x5b0 [cifs]
+[Fri Aug 4 04:12:50 2023] Code: 7d a8 48 8b 7d c0 c0 e9 02 48 89 45 b8 41 89 cf e8 3e f5 ff ff 4c 89 f7 41 83 e7 01 e8 82 b3 03 f2 49 8b 45 50 48 85 c0 74 5e <48> 83 78 60 00 74 57 45 84 ff 75 52 48 8b 43 98 48 83 eb 68 48 39
+[Fri Aug 4 04:12:50 2023] RSP: 0018:ffffb30607ddbdf8 EFLAGS: 00010206
+[Fri Aug 4 04:12:50 2023] RAX: 632d223d32612022 RBX: ffff97136944b1e0 RCX: 0000000080100009
+[Fri Aug 4 04:12:50 2023] RDX: 0000000000000001 RSI: 0000000080100009 RDI: ffff97136944b188
+[Fri Aug 4 04:12:50 2023] RBP: ffffb30607ddbe58 R08: 0000000000000001 R09: ffffffffc08e0900
+[Fri Aug 4 04:12:50 2023] R10: 0000000000000001 R11: 000000000000000f R12: ffff97136944b138
+[Fri Aug 4 04:12:50 2023] R13: ffff97149147c000 R14: ffff97136944b188 R15: 0000000000000000
+[Fri Aug 4 04:12:50 2023] FS: 0000000000000000(0000) GS:ffff9714f7c00000(0000) knlGS:0000000000000000
+[Fri Aug 4 04:12:50 2023] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[Fri Aug 4 04:12:50 2023] CR2: 00007fd8de9c7590 CR3: 000000011228e000 CR4: 0000000000350ef0
+[Fri Aug 4 04:12:50 2023] Call Trace:
+[Fri Aug 4 04:12:50 2023] <TASK>
+[Fri Aug 4 04:12:50 2023] process_one_work+0x225/0x3d0
+[Fri Aug 4 04:12:50 2023] worker_thread+0x4d/0x3e0
+[Fri Aug 4 04:12:50 2023] ? process_one_work+0x3d0/0x3d0
+[Fri Aug 4 04:12:50 2023] kthread+0x12a/0x150
+[Fri Aug 4 04:12:50 2023] ? set_kthread_struct+0x50/0x50
+[Fri Aug 4 04:12:50 2023] ret_from_fork+0x22/0x30
+[Fri Aug 4 04:12:50 2023] </TASK>
+
+To fix this change the ordering of the checks before sending the oplock_response
+to first check if the openFileList is empty.
+
+Fixes: da787d5b7498 ("SMB3: Do not send lease break acknowledgment if all file handles have been closed")
+Suggested-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Bharath SM <bharathsm@microsoft.com>
+Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
+Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/file.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 27c6d14e369f1..0a8adec515aed 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -5082,9 +5082,11 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = d_inode(cfile->dentry);
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+- struct TCP_Server_Info *server = tcon->ses->server;
++ struct cifs_tcon *tcon;
++ struct TCP_Server_Info *server;
++ struct tcon_link *tlink;
+ int rc = 0;
+ bool purge_cache = false, oplock_break_cancelled;
+ __u64 persistent_fid, volatile_fid;
+@@ -5093,6 +5095,12 @@ void cifs_oplock_break(struct work_struct *work)
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink))
++ goto out;
++ tcon = tlink_tcon(tlink);
++ server = tcon->ses->server;
++
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
+
+@@ -5142,18 +5150,19 @@ void cifs_oplock_break(struct work_struct *work)
+ /*
+ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ * an acknowledgment to be sent when the file has already been closed.
+- * check for server null, since can race with kill_sb calling tree disconnect.
+ */
+ spin_lock(&cinode->open_file_lock);
+- if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+- !list_empty(&cinode->openFileList)) {
++ /* check list empty since can race with kill_sb calling tree disconnect */
++ if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
+ spin_unlock(&cinode->open_file_lock);
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+- volatile_fid, net_fid, cinode);
++ rc = server->ops->oplock_response(tcon, persistent_fid,
++ volatile_fid, net_fid, cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ } else
+ spin_unlock(&cinode->open_file_lock);
+
++ cifs_put_tlink(tlink);
++out:
+ cifs_done_oplock_break(cinode);
+ }
+
+--
+2.40.1
+
btrfs-convert-btrfs_block_group-needs_free_space-to-.patch
btrfs-convert-btrfs_block_group-seq_zone-to-runtime-.patch
btrfs-fix-use-after-free-of-new-block-group-that-bec.patch
+virtio-mmio-don-t-break-lifecycle-of-vm_dev.patch
+vduse-use-proper-spinlock-for-irq-injection.patch
+vdpa-mlx5-fix-mr-initialized-semantics.patch
+vdpa-mlx5-delete-control-vq-iotlb-in-destroy_mr-only.patch
+cifs-fix-potential-oops-in-cifs_oplock_break.patch
--- /dev/null
+From 4087e98a801a56127579efae26fdfec84b865275 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Aug 2023 20:12:20 +0300
+Subject: vdpa/mlx5: Delete control vq iotlb in destroy_mr only when necessary
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eugenio Pérez <eperezma@redhat.com>
+
+[ Upstream commit ad03a0f44cdb97b46e5c84ed353dac9b8ae2c276 ]
+
+mlx5_vdpa_destroy_mr can be called from .set_map with data ASID after
+the control virtqueue ASID iotlb has been populated. The control vq
+iotlb must not be cleared, since it will not be populated again.
+
+So call the ASID aware destroy function which makes sure that the
+right vq resource is destroyed.
+
+Fixes: 8fcd20c30704 ("vdpa/mlx5: Support different address spaces for control and data")
+Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Message-Id: <20230802171231.11001-5-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
+ drivers/vdpa/mlx5/core/mr.c | 2 +-
+ drivers/vdpa/mlx5/net/mlx5_vnet.c | 4 ++--
+ 3 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index a0420be5059f4..b53420e874acb 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -122,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+
+ #define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index b3609867d5676..113aac0446de5 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -518,7 +518,7 @@ static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int
+ mr->initialized = false;
+ }
+
+-static void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index daac3ab314785..bf99654371b35 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2406,7 +2406,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ goto err_mr;
+
+ teardown_driver(ndev);
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ if (err)
+ goto err_mr;
+@@ -2422,7 +2422,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ return 0;
+
+ err_setup:
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err_mr:
+ return err;
+ }
+--
+2.40.1
+
--- /dev/null
+From c3080b0e026b0ddc501f2468ae644acb4829735a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Aug 2023 20:12:18 +0300
+Subject: vdpa/mlx5: Fix mr->initialized semantics
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dragos Tatulea <dtatulea@nvidia.com>
+
+[ Upstream commit 9ee811009ad8f87982b69e61d07447d12233ad01 ]
+
+The mr->initialized flag is shared between the control vq and data vq
+part of the mr init/uninit. But if the control vq and data vq get placed
+in different ASIDs, it can happen that initializing the control vq will
+prevent the data vq mr from being initialized.
+
+This patch consolidates the control and data vq init parts into their
+own init functions. The mr->initialized will now be used for the data vq
+only. The control vq currently doesn't need a flag.
+
+The uninitializing part is also taken care of: mlx5_vdpa_destroy_mr got
+split into data and control vq functions which are now also ASID aware.
+
+Fixes: 8fcd20c30704 ("vdpa/mlx5: Support different address spaces for control and data")
+Signed-off-by: Dragos Tatulea <dtatulea@nvidia.com>
+Reviewed-by: Eugenio Pérez <eperezma@redhat.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Message-Id: <20230802171231.11001-3-dtatulea@nvidia.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/mlx5/core/mlx5_vdpa.h | 1 +
+ drivers/vdpa/mlx5/core/mr.c | 97 +++++++++++++++++++++---------
+ 2 files changed, 71 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 25fc4120b618d..a0420be5059f4 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
++ /* state of dvq mr */
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index a4d7ee2339fa5..b3609867d5676 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -491,15 +491,24 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
+ }
+ }
+
+-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return;
++
++ prune_iotlb(mvdev);
++}
++
++static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+- mutex_lock(&mr->mkey_mtx);
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
++ return;
++
+ if (!mr->initialized)
+- goto out;
++ return;
+
+- prune_iotlb(mvdev);
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+@@ -507,45 +516,79 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+
+ memset(mr, 0, sizeof(*mr));
+ mr->initialized = false;
+-out:
++}
++
++static void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ struct mlx5_vdpa_mr *mr = &mvdev->mr;
++
++ mutex_lock(&mr->mkey_mtx);
++
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
++ _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
++
+ mutex_unlock(&mr->mkey_mtx);
+ }
+
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+- struct vhost_iotlb *iotlb, unsigned int asid)
++void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++{
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
++}
++
++static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return 0;
++
++ return dup_iotlb(mvdev, iotlb);
++}
++
++static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+- if (mr->initialized)
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ return 0;
+
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- err = create_user_mr(mvdev, iotlb);
+- else
+- err = create_dma_mr(mvdev, mr);
++ if (mr->initialized)
++ return 0;
+
+- if (err)
+- return err;
+- }
++ if (iotlb)
++ err = create_user_mr(mvdev, iotlb);
++ else
++ err = create_dma_mr(mvdev, mr);
+
+- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+- err = dup_iotlb(mvdev, iotlb);
+- if (err)
+- goto out_err;
+- }
++ if (err)
++ return err;
+
+ mr->initialized = true;
++
++ return 0;
++}
++
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb, unsigned int asid)
++{
++ int err;
++
++ err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
++ if (err)
++ return err;
++
++ err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
++ if (err)
++ goto out_err;
++
+ return 0;
+
+ out_err:
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- destroy_user_mr(mvdev, mr);
+- else
+- destroy_dma_mr(mvdev, mr);
+- }
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+
+ return err;
+ }
+--
+2.40.1
+
--- /dev/null
+From cd8e45e756c1718bfbb8842763312133ef03620b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Jul 2023 13:45:05 +0200
+Subject: vduse: Use proper spinlock for IRQ injection
+
+From: Maxime Coquelin <maxime.coquelin@redhat.com>
+
+[ Upstream commit 7ca26efb09a1543fddb29308ea3b63b66cb5d3ee ]
+
+The IRQ injection work used spin_lock_irq() to protect the
+scheduling of the softirq, but spin_lock_bh() should be
+used.
+
+With spin_lock_irq(), we noticed delay of more than 6
+seconds between the time a NAPI polling work is scheduled
+and the time it is executed.
+
+Fixes: c8a6153b6c59 ("vduse: Introduce VDUSE - vDPA Device in Userspace")
+Cc: xieyongji@bytedance.com
+
+Suggested-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
+Message-Id: <20230705114505.63274-1-maxime.coquelin@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xie Yongji <xieyongji@bytedance.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vdpa/vdpa_user/vduse_dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 72f924ec4658d..edcd74cc4c0f7 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -899,10 +899,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
+ {
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+- spin_lock_irq(&dev->irq_lock);
++ spin_lock_bh(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+- spin_unlock_irq(&dev->irq_lock);
++ spin_unlock_bh(&dev->irq_lock);
+ }
+
+ static void vduse_vq_irq_inject(struct work_struct *work)
+@@ -910,10 +910,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+- spin_lock_irq(&vq->irq_lock);
++ spin_lock_bh(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+- spin_unlock_irq(&vq->irq_lock);
++ spin_unlock_bh(&vq->irq_lock);
+ }
+
+ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+--
+2.40.1
+
--- /dev/null
+From dbc982795046608e97691718339224cae5470aed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jun 2023 14:05:26 +0200
+Subject: virtio-mmio: don't break lifecycle of vm_dev
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit 55c91fedd03d7b9cf0c5199b2eb12b9b8e95281a ]
+
+vm_dev has a separate lifecycle because it has a 'struct device'
+embedded. Thus, having a release callback for it is correct.
+
+Allocating the vm_dev struct with devres totally breaks this protection,
+though. Instead of waiting for the vm_dev release callback, the memory
+is freed when the platform_device is removed. Resulting in a
+use-after-free when finally the callback is to be called.
+
+To easily see the problem, compile the kernel with
+CONFIG_DEBUG_KOBJECT_RELEASE and unbind with sysfs.
+
+The fix is easy, don't use devres in this case.
+
+Found during my research about object lifetime problems.
+
+Fixes: 7eb781b1bbb7 ("virtio_mmio: add cleanup for virtio_mmio_probe")
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Message-Id: <20230629120526.7184-1-wsa+renesas@sang-engineering.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_mmio.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index 3ff746e3f24aa..dec3cba884586 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -590,9 +590,8 @@ static void virtio_mmio_release_dev(struct device *_d)
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+- struct platform_device *pdev = vm_dev->pdev;
+
+- devm_kfree(&pdev->dev, vm_dev);
++ kfree(vm_dev);
+ }
+
+ /* Platform device */
+@@ -603,7 +602,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ unsigned long magic;
+ int rc;
+
+- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+--
+2.40.1
+