return 0;
}
+static struct sg_table *
+panfrost_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct sg_table *sgt = drm_gem_map_dma_buf(attach, dir);
+
+ if (!IS_ERR(sgt))
+ attach->priv = sgt;
+
+ return sgt;
+}
+
+static void
+panfrost_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ attach->priv = NULL;
+ drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
+static int
+panfrost_gem_prime_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct dma_buf_attachment *attach;
+
+ dma_resv_lock(obj->resv, NULL);
+ if (shmem->sgt)
+ dma_sync_sgtable_for_cpu(dev->dev, shmem->sgt, dir);
+
+ if (shmem->vaddr)
+ invalidate_kernel_vmap_range(shmem->vaddr, shmem->base.size);
+
+ list_for_each_entry(attach, &dma_buf->attachments, node) {
+ struct sg_table *sgt = attach->priv;
+
+ if (sgt)
+ dma_sync_sgtable_for_cpu(attach->dev, sgt, dir);
+ }
+ dma_resv_unlock(obj->resv);
+
+ return 0;
+}
+
+static int
+panfrost_gem_prime_end_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct dma_buf_attachment *attach;
+
+ dma_resv_lock(obj->resv, NULL);
+ list_for_each_entry(attach, &dma_buf->attachments, node) {
+ struct sg_table *sgt = attach->priv;
+
+ if (sgt)
+ dma_sync_sgtable_for_device(attach->dev, sgt, dir);
+ }
+
+ if (shmem->vaddr)
+ flush_kernel_vmap_range(shmem->vaddr, shmem->base.size);
+
+ if (shmem->sgt)
+ dma_sync_sgtable_for_device(dev->dev, shmem->sgt, dir);
+
+ dma_resv_unlock(obj->resv);
+ return 0;
+}
+
+static const struct dma_buf_ops panfrost_dma_buf_ops = {
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = panfrost_gem_prime_map_dma_buf,
+ .unmap_dma_buf = panfrost_gem_prime_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+ .begin_cpu_access = panfrost_gem_prime_begin_cpu_access,
+ .end_cpu_access = panfrost_gem_prime_end_cpu_access,
+};
+
+static struct dma_buf *
+panfrost_gem_prime_export(struct drm_gem_object *obj, int flags)
+{
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .ops = &panfrost_dma_buf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+
+struct drm_gem_object *
+panfrost_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ if (dma_buf->ops == &panfrost_dma_buf_ops && obj->dev == dev) {
+ /* Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ return drm_gem_prime_import(dev, dma_buf);
+}
+
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
.open = panfrost_gem_open,
.pin = panfrost_gem_pin,
.unpin = drm_gem_shmem_object_unpin,
.get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .export = panfrost_gem_prime_export,
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,