]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dma-buf: drop caching of sg_tables
authorChristian König <christian.koenig@amd.com>
Tue, 11 Feb 2025 16:20:53 +0000 (17:20 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 6 Mar 2025 14:21:36 +0000 (15:21 +0100)
That was purely for the transition from static to dynamic dma-buf
handling and can be removed again now.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-5-christian.koenig@amd.com
drivers/dma-buf/dma-buf.c
drivers/dma-buf/udmabuf.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/virtio/virtgpu_prime.c
include/linux/dma-buf.h

index 1f7349b08df8be880807b3776b902807417fa036..0c48d41dd5eb66895e73331e3f643ef2c3989ecd 100644 (file)
@@ -636,10 +636,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
                    || !exp_info->ops->release))
                return ERR_PTR(-EINVAL);
 
-       if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
-                   (exp_info->ops->pin || exp_info->ops->unpin)))
-               return ERR_PTR(-EINVAL);
-
        if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
                return ERR_PTR(-EINVAL);
 
@@ -963,17 +959,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
                return;
 
        dma_resv_lock(dmabuf->resv, NULL);
-
-       if (attach->sgt) {
-               mangle_sg_table(attach->sgt);
-               attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt,
-                                                  attach->dir);
-
-               if (dma_buf_pin_on_map(attach))
-                       dma_buf_unpin(attach);
-       }
        list_del(&attach->node);
-
        dma_resv_unlock(dmabuf->resv);
 
        if (dmabuf->ops->detach)
@@ -1068,18 +1054,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 
        dma_resv_assert_held(attach->dmabuf->resv);
 
-       if (attach->sgt) {
-               /*
-                * Two mappings with different directions for the same
-                * attachment are not allowed.
-                */
-               if (attach->dir != direction &&
-                   attach->dir != DMA_BIDIRECTIONAL)
-                       return ERR_PTR(-EBUSY);
-
-               return attach->sgt;
-       }
-
        if (dma_buf_pin_on_map(attach)) {
                ret = attach->dmabuf->ops->pin(attach);
                /*
@@ -1109,11 +1083,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        }
        mangle_sg_table(sg_table);
 
-       if (attach->dmabuf->ops->cache_sgt_mapping) {
-               attach->sgt = sg_table;
-               attach->dir = direction;
-       }
-
 #ifdef CONFIG_DMA_API_DEBUG
        {
                struct scatterlist *sg;
@@ -1194,9 +1163,6 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 
        dma_resv_assert_held(attach->dmabuf->resv);
 
-       if (attach->sgt == sg_table)
-               return;
-
        mangle_sg_table(sg_table);
        attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 
index cc7398cc17d67fca0634e763534901f8e6b454f8..2fa2c9135eac3580dd4701834a7f1e9560736318 100644 (file)
@@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
 }
 
 static const struct dma_buf_ops udmabuf_ops = {
-       .cache_sgt_mapping = true,
        .map_dma_buf       = map_udmabuf,
        .unmap_dma_buf     = unmap_udmabuf,
        .release           = release_udmabuf,
index bdb51c8f262e7dfb2ee6326be4c5367155834260..a3d64f93a225944a760cd618c362b82031edfb56 100644 (file)
@@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
-       .cache_sgt_mapping = true,
        .attach = drm_gem_map_attach,
        .detach = drm_gem_map_detach,
        .map_dma_buf = drm_gem_map_dma_buf,
index fe6a0b018571751f85d45b19997efb3cf6368176..c6f3be3cb914cd458f29f50927def2768185c58e 100644 (file)
@@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
 
 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
        .ops = {
-               .cache_sgt_mapping = true,
                .attach = virtio_dma_buf_attach,
                .detach = drm_gem_map_detach,
                .map_dma_buf = virtgpu_gem_map_dma_buf,
index c54ff2dda8cbceed537b0e0aabd9305bd071eaaa..544f8f8c3f447fa183165a41e5b86f7cc836ee64 100644 (file)
@@ -34,15 +34,6 @@ struct dma_buf_attachment;
  * @vunmap: [optional] unmaps a vmap from the buffer
  */
 struct dma_buf_ops {
-       /**
-         * @cache_sgt_mapping:
-         *
-         * If true the framework will cache the first mapping made for each
-         * attachment. This avoids creating mappings for attachments multiple
-         * times.
-         */
-       bool cache_sgt_mapping;
-
        /**
         * @attach:
         *
@@ -493,8 +484,6 @@ struct dma_buf_attach_ops {
  * @dmabuf: buffer for this attachment.
  * @dev: device attached to the buffer.
  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
- * @sgt: cached mapping.
- * @dir: direction of cached mapping.
  * @peer2peer: true if the importer can handle peer resources without pages.
  * @priv: exporter specific attachment data.
  * @importer_ops: importer operations for this attachment, if provided
@@ -514,8 +503,6 @@ struct dma_buf_attachment {
        struct dma_buf *dmabuf;
        struct device *dev;
        struct list_head node;
-       struct sg_table *sgt;
-       enum dma_data_direction dir;
        bool peer2peer;
        const struct dma_buf_attach_ops *importer_ops;
        void *importer_priv;