]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
drm/panthor: Provide a custom dma_buf implementation
authorBoris Brezillon <boris.brezillon@collabora.com>
Mon, 8 Dec 2025 10:08:28 +0000 (11:08 +0100)
committerBoris Brezillon <boris.brezillon@collabora.com>
Tue, 9 Dec 2025 12:09:36 +0000 (13:09 +0100)
Before we introduce cached CPU mappings, we want a dma_buf
implementation satisfying synchronization requests around CPU
accesses coming from a dma_buf exported by our driver. Let's
provide our own implementation relying on the default
gem_shmem_prime helpers designed for that purpose.

v5:
- New patch

v6:
- Collect R-b

v7:
- Hand-roll the dma_buf sync/import logic (was previously done by
  generic prime/shmem helpers)

v8:
- No changes

Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patch.msgid.link/20251208100841.730527-2-boris.brezillon@collabora.com
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
drivers/gpu/drm/panthor/panthor_drv.c
drivers/gpu/drm/panthor/panthor_gem.c
drivers/gpu/drm/panthor/panthor_gem.h

index 1cfed4fc35032f21782db7699a5ad3f604593cff..73d26e17e2a23f720541198e62080db1546a5cf7 100644 (file)
@@ -1623,6 +1623,7 @@ static const struct drm_driver panthor_drm_driver = {
 
        .gem_create_object = panthor_gem_create_object,
        .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
+       .gem_prime_import = panthor_gem_prime_import,
 #ifdef CONFIG_DEBUG_FS
        .debugfs_init = panthor_debugfs_init,
 #endif
index 7ae07a9bc9967f55c03e18186edd8c61d1a02515..0de37733a2ef3fc5fa14c383b48ae58e34e6bf88 100644 (file)
@@ -202,14 +202,130 @@ err_free_bo:
        return ERR_PTR(ret);
 }
 
+static struct sg_table *
+panthor_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+                             enum dma_data_direction dir)
+{
+       struct sg_table *sgt = drm_gem_map_dma_buf(attach, dir);
+
+       if (!IS_ERR(sgt))
+               attach->priv = sgt;
+
+       return sgt;
+}
+
+static void
+panthor_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       attach->priv = NULL;
+       drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
+static int
+panthor_gem_prime_begin_cpu_access(struct dma_buf *dma_buf,
+                                  enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct dma_buf_attachment *attach;
+
+       dma_resv_lock(obj->resv, NULL);
+       if (shmem->sgt)
+               dma_sync_sgtable_for_cpu(dev->dev, shmem->sgt, dir);
+
+       if (shmem->vaddr)
+               invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size);
+
+       list_for_each_entry(attach, &dma_buf->attachments, node) {
+               struct sg_table *sgt = attach->priv;
+
+               if (sgt)
+                       dma_sync_sgtable_for_cpu(attach->dev, sgt, dir);
+       }
+       dma_resv_unlock(obj->resv);
+
+       return 0;
+}
+
+static int
+panthor_gem_prime_end_cpu_access(struct dma_buf *dma_buf,
+                                enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = dma_buf->priv;
+       struct drm_device *dev = obj->dev;
+       struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+       struct dma_buf_attachment *attach;
+
+       dma_resv_lock(obj->resv, NULL);
+       list_for_each_entry(attach, &dma_buf->attachments, node) {
+               struct sg_table *sgt = attach->priv;
+
+               if (sgt)
+                       dma_sync_sgtable_for_device(attach->dev, sgt, dir);
+       }
+
+       if (shmem->vaddr)
+               flush_kernel_vmap_range(shmem->vaddr, shmem->base.size);
+
+       if (shmem->sgt)
+               dma_sync_sgtable_for_device(dev->dev, shmem->sgt, dir);
+
+       dma_resv_unlock(obj->resv);
+       return 0;
+}
+
+static const struct dma_buf_ops panthor_dma_buf_ops = {
+       .attach = drm_gem_map_attach,
+       .detach = drm_gem_map_detach,
+       .map_dma_buf = panthor_gem_prime_map_dma_buf,
+       .unmap_dma_buf = panthor_gem_prime_unmap_dma_buf,
+       .release = drm_gem_dmabuf_release,
+       .mmap = drm_gem_dmabuf_mmap,
+       .vmap = drm_gem_dmabuf_vmap,
+       .vunmap = drm_gem_dmabuf_vunmap,
+       .begin_cpu_access = panthor_gem_prime_begin_cpu_access,
+       .end_cpu_access = panthor_gem_prime_end_cpu_access,
+};
+
 static struct dma_buf *
 panthor_gem_prime_export(struct drm_gem_object *obj, int flags)
 {
+       struct drm_device *dev = obj->dev;
+       struct dma_buf_export_info exp_info = {
+               .exp_name = KBUILD_MODNAME,
+               .owner = THIS_MODULE,
+               .ops = &panthor_dma_buf_ops,
+               .size = obj->size,
+               .flags = flags,
+               .priv = obj,
+               .resv = obj->resv,
+       };
+
        /* We can't export GEMs that have an exclusive VM. */
        if (to_panthor_bo(obj)->exclusive_vm_root_gem)
                return ERR_PTR(-EINVAL);
 
-       return drm_gem_prime_export(obj, flags);
+       return drm_gem_dmabuf_export(dev, &exp_info);
+}
+
+struct drm_gem_object *
+panthor_gem_prime_import(struct drm_device *dev,
+                        struct dma_buf *dma_buf)
+{
+       struct drm_gem_object *obj = dma_buf->priv;
+
+       if (dma_buf->ops == &panthor_dma_buf_ops && obj->dev == dev) {
+               /* Importing dmabuf exported from our own gem increases
+                * refcount on gem itself instead of f_count of dmabuf.
+                */
+               drm_gem_object_get(obj);
+               return obj;
+       }
+
+       return drm_gem_prime_import(dev, dma_buf);
 }
 
 static enum drm_gem_object_status panthor_gem_status(struct drm_gem_object *obj)
index 2eefe9104e5e7dadb99e0734d0e351a700d6b610..262c77a4d3c1f7b9fe3fc92af59be31c329b6435 100644 (file)
@@ -149,6 +149,10 @@ panthor_gem_create_with_handle(struct drm_file *file,
 void panthor_gem_bo_set_label(struct drm_gem_object *obj, const char *label);
 void panthor_gem_kernel_bo_set_label(struct panthor_kernel_bo *bo, const char *label);
 
+struct drm_gem_object *
+panthor_gem_prime_import(struct drm_device *dev,
+                        struct dma_buf *dma_buf);
+
 static inline u64
 panthor_kernel_bo_gpuva(struct panthor_kernel_bo *bo)
 {