if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_put(obj);
- return -EACCES;
+ return ERR_PTR(-EACCES);
}
- ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
+ return obj;
+ }
+
+ #ifdef CONFIG_MMU
+ /**
+ * drm_gem_get_unmapped_area - get memory mapping region routine for GEM objects
+ * @filp: DRM file pointer
+ * @uaddr: User address hint
+ * @len: Mapping length
+ * @pgoff: Offset (in pages)
+ * @flags: Mapping flags
+ *
+ * If a driver supports GEM object mapping, before ending up in drm_gem_mmap(),
+ * mmap calls on the DRM file descriptor will first try to find a free linear
+ * address space large enough for a mapping. Since GEM objects are backed by
+ * shmem buffers, this should preferably be handled by the shmem virtual memory
+ * filesystem which can appropriately align addresses to huge page sizes when
+ * needed.
+ *
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created) and call shmem_get_unmapped_area() with
+ * the right file pointer.
+ *
+ * If a GEM object is not available at the given offset or if the caller is not
+ * granted access to it, fall back to mm_get_unmapped_area().
+ */
+ unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+ {
+ struct drm_gem_object *obj;
+ unsigned long ret;
+
+ obj = drm_gem_object_lookup_at_offset(filp, pgoff, len >> PAGE_SHIFT);
+ if (IS_ERR(obj) || !obj->filp || !obj->filp->f_op->get_unmapped_area)
- return mm_get_unmapped_area(current->mm, filp, uaddr, len, 0,
++ return mm_get_unmapped_area(filp, uaddr, len, 0,
+ flags);
+
+ ret = obj->filp->f_op->get_unmapped_area(obj->filp, uaddr, len, 0,
+ flags);
+
+ drm_gem_object_put(obj);
+
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(drm_gem_get_unmapped_area);
+ #endif
+
+ /**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created) and map it with a call to
+ * drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
+ */
+ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+ {
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup_at_offset(filp, vma->vm_pgoff,
+ vma_pages(vma));
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_mmap_obj(obj,
+ drm_vma_node_size(&obj->vma_node) << PAGE_SHIFT,
vma);
drm_gem_object_put(obj);
{
struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base);
struct panthor_vm *vm = priv;
- int ret;
-
- ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
- unmap_vma->base.va.range);
- if (drm_WARN_ON(&vm->ptdev->base, ret))
- return ret;
+ panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr,
+ unmap_vma->base.va.range);
drm_gpuva_unmap(&op->unmap);
- panthor_vma_unlink(vm, unmap_vma);
+ panthor_vma_unlink(unmap_vma);
return 0;
}