struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size)
{
- struct ivpu_device *vdev = to_ivpu_device(dev);
struct ivpu_bo *bo;
if (size == 0 || !PAGE_ALIGNED(size))
INIT_LIST_HEAD(&bo->bo_list_node);
- mutex_lock(&vdev->bo_list_lock);
- list_add_tail(&bo->bo_list_node, &vdev->bo_list);
- mutex_unlock(&vdev->bo_list_lock);
-
- ivpu_dbg(vdev, BO, " alloc: bo %8p size %9zu\n", bo, size);
return &bo->base.base;
}
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
struct device *attach_dev = dev->dev;
struct dma_buf_attachment *attach;
struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
int ret;
attach = dma_buf_attach(dma_buf, attach_dev);
obj->import_attach = attach;
obj->resv = dma_buf->resv;
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
return obj;
fail_detach:
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
+
return bo;
}