struct phys_vec *phys_vec;
struct p2pdma_provider *provider;
u32 nr_ranges;
+ struct kref kref;
+ struct completion comp;
u8 revoked : 1;
};
return 0;
}
+static void vfio_pci_dma_buf_done(struct kref *kref)
+{
+ struct vfio_pci_dma_buf *priv =
+ container_of(kref, struct vfio_pci_dma_buf, kref);
+
+ complete(&priv->comp);
+}
+
static struct sg_table *
vfio_pci_dma_buf_map(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+ struct sg_table *ret;
dma_resv_assert_held(priv->dmabuf->resv);
if (priv->revoked)
return ERR_PTR(-ENODEV);
- return dma_buf_phys_vec_to_sgt(attachment, priv->provider,
- priv->phys_vec, priv->nr_ranges,
- priv->size, dir);
+ ret = dma_buf_phys_vec_to_sgt(attachment, priv->provider,
+ priv->phys_vec, priv->nr_ranges,
+ priv->size, dir);
+ if (IS_ERR(ret))
+ return ret;
+
+ kref_get(&priv->kref);
+ return ret;
}
static void vfio_pci_dma_buf_unmap(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction dir)
{
+ struct vfio_pci_dma_buf *priv = attachment->dmabuf->priv;
+
+ dma_resv_assert_held(priv->dmabuf->resv);
+
dma_buf_free_sgt(attachment, sgt, dir);
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
}
static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf)
goto err_dev_put;
}
+ kref_init(&priv->kref);
+ init_completion(&priv->comp);
+
/* dma_buf_put() now frees priv */
INIT_LIST_HEAD(&priv->dmabufs_elm);
down_write(&vdev->memory_lock);
if (priv->revoked != revoked) {
dma_resv_lock(priv->dmabuf->resv, NULL);
- priv->revoked = revoked;
+ if (revoked)
+ priv->revoked = true;
dma_buf_invalidate_mappings(priv->dmabuf);
+ dma_resv_wait_timeout(priv->dmabuf->resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(priv->dmabuf->resv);
+ if (revoked) {
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
+ wait_for_completion(&priv->comp);
+ } else {
+ /*
+ * Kref is initialize again, because when revoke
+ * was performed the reference counter was decreased
+ * to zero to trigger completion.
+ */
+ kref_init(&priv->kref);
+ /*
+ * There is no need to wait as no mapping was
+ * performed when the previous status was
+ * priv->revoked == true.
+ */
+ reinit_completion(&priv->comp);
+ dma_resv_lock(priv->dmabuf->resv, NULL);
+ priv->revoked = false;
+ dma_resv_unlock(priv->dmabuf->resv);
+ }
}
fput(priv->dmabuf->file);
}
priv->vdev = NULL;
priv->revoked = true;
dma_buf_invalidate_mappings(priv->dmabuf);
+ dma_resv_wait_timeout(priv->dmabuf->resv,
+ DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
dma_resv_unlock(priv->dmabuf->resv);
+ kref_put(&priv->kref, vfio_pci_dma_buf_done);
+ wait_for_completion(&priv->comp);
vfio_device_put_registration(&vdev->vdev);
fput(priv->dmabuf->file);
}