]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
dma-buf: dma-buf: stop mapping sg_tables on attach v2
authorChristian König <christian.koenig@amd.com>
Tue, 11 Feb 2025 14:26:16 +0000 (15:26 +0100)
committerChristian König <christian.koenig@amd.com>
Thu, 6 Mar 2025 14:21:29 +0000 (15:21 +0100)
As a workaround to smoothly transit from static to dynamic DMA-buf
handling we cached the sg_table on attach if dynamic handling mismatched
between exporter and importer.

Since Dmitry and Thomas cleaned that up and also documented the lock
handling we can drop this workaround now.

V2: implement Sima's comments

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-4-christian.koenig@amd.com
drivers/dma-buf/dma-buf.c
include/linux/dma-buf.h

index 5baa83b855156516a0a766bee0789b122473efb3..1f7349b08df8be880807b3776b902807417fa036 100644 (file)
@@ -782,7 +782,7 @@ static void mangle_sg_table(struct sg_table *sg_table)
 
        /* To catch abuse of the underlying struct page by importers mix
         * up the bits, but take care to preserve the low SG_ bits to
-        * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+        * not corrupt the sgt. The mixing is undone on unmap
         * before passing the sgt back to the exporter.
         */
        for_each_sgtable_sg(sg_table, sg, i)
@@ -790,29 +790,19 @@ static void mangle_sg_table(struct sg_table *sg_table)
 #endif
 
 }
-static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
-                                      enum dma_data_direction direction)
-{
-       struct sg_table *sg_table;
-       signed long ret;
 
-       sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
-       if (IS_ERR_OR_NULL(sg_table))
-               return sg_table;
-
-       if (!dma_buf_attachment_is_dynamic(attach)) {
-               ret = dma_resv_wait_timeout(attach->dmabuf->resv,
-                                           DMA_RESV_USAGE_KERNEL, true,
-                                           MAX_SCHEDULE_TIMEOUT);
-               if (ret < 0) {
-                       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
-                                                          direction);
-                       return ERR_PTR(ret);
-               }
-       }
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+       return !!attach->importer_ops;
+}
 
-       mangle_sg_table(sg_table);
-       return sg_table;
+static bool
+dma_buf_pin_on_map(struct dma_buf_attachment *attach)
+{
+       return attach->dmabuf->ops->pin &&
+               (!dma_buf_attachment_is_dynamic(attach) ||
+                !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
 }
 
 /**
@@ -935,48 +925,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
        list_add(&attach->node, &dmabuf->attachments);
        dma_resv_unlock(dmabuf->resv);
 
-       /* When either the importer or the exporter can't handle dynamic
-        * mappings we cache the mapping here to avoid issues with the
-        * reservation object lock.
-        */
-       if (dma_buf_attachment_is_dynamic(attach) !=
-           dma_buf_is_dynamic(dmabuf)) {
-               struct sg_table *sgt;
-
-               dma_resv_lock(attach->dmabuf->resv, NULL);
-               if (dma_buf_is_dynamic(attach->dmabuf)) {
-                       ret = dmabuf->ops->pin(attach);
-                       if (ret)
-                               goto err_unlock;
-               }
-
-               sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
-               if (!sgt)
-                       sgt = ERR_PTR(-ENOMEM);
-               if (IS_ERR(sgt)) {
-                       ret = PTR_ERR(sgt);
-                       goto err_unpin;
-               }
-               dma_resv_unlock(attach->dmabuf->resv);
-               attach->sgt = sgt;
-               attach->dir = DMA_BIDIRECTIONAL;
-       }
-
        return attach;
 
 err_attach:
        kfree(attach);
        return ERR_PTR(ret);
-
-err_unpin:
-       if (dma_buf_is_dynamic(attach->dmabuf))
-               dmabuf->ops->unpin(attach);
-
-err_unlock:
-       dma_resv_unlock(attach->dmabuf->resv);
-
-       dma_buf_detach(dmabuf, attach);
-       return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
 
@@ -995,16 +948,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
 
-static void __unmap_dma_buf(struct dma_buf_attachment *attach,
-                           struct sg_table *sg_table,
-                           enum dma_data_direction direction)
-{
-       /* uses XOR, hence this unmangles */
-       mangle_sg_table(sg_table);
-
-       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
-}
-
 /**
  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
  * @dmabuf:    [in]    buffer to detach from.
@@ -1022,11 +965,12 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
        dma_resv_lock(dmabuf->resv, NULL);
 
        if (attach->sgt) {
+               mangle_sg_table(attach->sgt);
+               attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt,
+                                                  attach->dir);
 
-               __unmap_dma_buf(attach, attach->sgt, attach->dir);
-
-               if (dma_buf_is_dynamic(attach->dmabuf))
-                       dmabuf->ops->unpin(attach);
+               if (dma_buf_pin_on_map(attach))
+                       dma_buf_unpin(attach);
        }
        list_del(&attach->node);
 
@@ -1058,7 +1002,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach)
        struct dma_buf *dmabuf = attach->dmabuf;
        int ret = 0;
 
-       WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+       WARN_ON(!attach->importer_ops);
 
        dma_resv_assert_held(dmabuf->resv);
 
@@ -1081,7 +1025,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach)
 {
        struct dma_buf *dmabuf = attach->dmabuf;
 
-       WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+       WARN_ON(!attach->importer_ops);
 
        dma_resv_assert_held(dmabuf->resv);
 
@@ -1115,7 +1059,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
                                        enum dma_data_direction direction)
 {
        struct sg_table *sg_table;
-       int r;
+       signed long ret;
 
        might_sleep();
 
@@ -1136,29 +1080,42 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
                return attach->sgt;
        }
 
-       if (dma_buf_is_dynamic(attach->dmabuf)) {
-               if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
-                       r = attach->dmabuf->ops->pin(attach);
-                       if (r)
-                               return ERR_PTR(r);
-               }
+       if (dma_buf_pin_on_map(attach)) {
+               ret = attach->dmabuf->ops->pin(attach);
+               /*
+                * Catch exporters making buffers inaccessible even when
+                * attachments preventing that exist.
+                */
+               WARN_ON_ONCE(ret == EBUSY);
+               if (ret)
+                       return ERR_PTR(ret);
        }
 
-       sg_table = __map_dma_buf(attach, direction);
+       sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
        if (!sg_table)
                sg_table = ERR_PTR(-ENOMEM);
+       if (IS_ERR(sg_table))
+               goto error_unpin;
 
-       if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
-            !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
-               attach->dmabuf->ops->unpin(attach);
+       /*
+        * Importers with static attachments don't wait for fences.
+        */
+       if (!dma_buf_attachment_is_dynamic(attach)) {
+               ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+                                           DMA_RESV_USAGE_KERNEL, true,
+                                           MAX_SCHEDULE_TIMEOUT);
+               if (ret < 0)
+                       goto error_unmap;
+       }
+       mangle_sg_table(sg_table);
 
-       if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+       if (attach->dmabuf->ops->cache_sgt_mapping) {
                attach->sgt = sg_table;
                attach->dir = direction;
        }
 
 #ifdef CONFIG_DMA_API_DEBUG
-       if (!IS_ERR(sg_table)) {
+       {
                struct scatterlist *sg;
                u64 addr;
                int len;
@@ -1175,6 +1132,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
        }
 #endif /* CONFIG_DMA_API_DEBUG */
        return sg_table;
+
+error_unmap:
+       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+       sg_table = ERR_PTR(ret);
+
+error_unpin:
+       if (dma_buf_pin_on_map(attach))
+               attach->dmabuf->ops->unpin(attach);
+
+       return sg_table;
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
 
@@ -1230,11 +1197,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
        if (attach->sgt == sg_table)
                return;
 
-       __unmap_dma_buf(attach, sg_table, direction);
+       mangle_sg_table(sg_table);
+       attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 
-       if (dma_buf_is_dynamic(attach->dmabuf) &&
-           !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
-               dma_buf_unpin(attach);
+       if (dma_buf_pin_on_map(attach))
+               attach->dmabuf->ops->unpin(attach);
 }
 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
 
index 36216d28d8bdc01a9c9c47e27c392413f7f6c5fb..c54ff2dda8cbceed537b0e0aabd9305bd071eaaa 100644 (file)
@@ -583,20 +583,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
        return !!dmabuf->ops->pin;
 }
 
-/**
- * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
- * mappings
- * @attach: the DMA-buf attachment to check
- *
- * Returns true if a DMA-buf importer wants to call the map/unmap functions with
- * the dma_resv lock held.
- */
-static inline bool
-dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
-{
-       return !!attach->importer_ops;
-}
-
 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
                                          struct device *dev);
 struct dma_buf_attachment *