]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/tegra/gem.c
drm/tegra: gem: Do not return NULL in tegra_bo_mmap()
[thirdparty/linux.git] / drivers / gpu / drm / tegra / gem.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
de2ba664
AM
2/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
7ecada3c 6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
de2ba664
AM
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
de2ba664
AM
11 */
12
3800391d 13#include <linux/dma-buf.h>
df06b759 14#include <linux/iommu.h>
16b0314a 15#include <linux/module.h>
f75d1982 16#include <linux/vmalloc.h>
eb1df694
SR
17
18#include <drm/drm_drv.h>
19#include <drm/drm_prime.h>
773af77f
TR
20#include <drm/tegra_drm.h>
21
d1f3e1e0 22#include "drm.h"
de2ba664
AM
23#include "gem.h"
24
16b0314a
GKH
25MODULE_IMPORT_NS(DMA_BUF);
26
c6aeaf56 27static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents)
de2ba664 28{
c6aeaf56
TR
29 dma_addr_t next = ~(dma_addr_t)0;
30 unsigned int count = 0, i;
31 struct scatterlist *s;
32
33 for_each_sg(sgl, s, nents, i) {
34 /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */
35 if (!sg_dma_len(s))
36 continue;
37
38 if (sg_dma_address(s) != next) {
39 next = sg_dma_address(s) + sg_dma_len(s);
40 count++;
41 }
42 }
de2ba664 43
c6aeaf56 44 return count;
de2ba664
AM
45}
46
c6aeaf56 47static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt)
1f16deac 48{
c6aeaf56
TR
49 return sg_dma_count_chunks(sgt->sgl, sgt->nents);
50}
1f16deac 51
c6aeaf56
TR
52static void tegra_bo_put(struct host1x_bo *bo)
53{
54 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
1f16deac 55
c6aeaf56 56 drm_gem_object_put(&obj->gem);
1f16deac
TR
57}
58
c6aeaf56
TR
59static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
60 enum dma_data_direction direction)
de2ba664 61{
3be82743 62 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
c6aeaf56
TR
63 struct drm_gem_object *gem = &obj->gem;
64 struct host1x_bo_mapping *map;
80327ce3 65 int err;
de2ba664 66
c6aeaf56
TR
67 map = kzalloc(sizeof(*map), GFP_KERNEL);
68 if (!map)
69 return ERR_PTR(-ENOMEM);
70
1f39b1df 71 kref_init(&map->ref);
c6aeaf56
TR
72 map->bo = host1x_bo_get(bo);
73 map->direction = direction;
74 map->dev = dev;
75
af1cbfb9 76 /*
c6aeaf56 77 * Imported buffers need special treatment to satisfy the semantics of DMA-BUF.
af1cbfb9 78 */
c6aeaf56
TR
79 if (gem->import_attach) {
80 struct dma_buf *buf = gem->import_attach->dmabuf;
81
82 map->attach = dma_buf_attach(buf, dev);
83 if (IS_ERR(map->attach)) {
84 err = PTR_ERR(map->attach);
85 goto free;
86 }
87
f66d48c8 88 map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
c6aeaf56
TR
89 if (IS_ERR(map->sgt)) {
90 dma_buf_detach(buf, map->attach);
91 err = PTR_ERR(map->sgt);
cb7e1abc 92 map->sgt = NULL;
c6aeaf56
TR
93 goto free;
94 }
95
96 err = sgt_dma_count_chunks(map->sgt);
97 map->size = gem->size;
98
99 goto out;
af1cbfb9 100 }
585ee0f2 101
af1cbfb9
TR
102 /*
103 * If we don't have a mapping for this buffer yet, return an SG table
104 * so that host1x can do the mapping for us via the DMA API.
105 */
c6aeaf56
TR
106 map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
107 if (!map->sgt) {
108 err = -ENOMEM;
109 goto free;
110 }
80327ce3
TR
111
112 if (obj->pages) {
1f16deac
TR
113 /*
114 * If the buffer object was allocated from the explicit IOMMU
115 * API code paths, construct an SG table from the pages.
116 */
c6aeaf56
TR
117 err = sg_alloc_table_from_pages(map->sgt, obj->pages, obj->num_pages, 0, gem->size,
118 GFP_KERNEL);
1f16deac
TR
119 if (err < 0)
120 goto free;
80327ce3 121 } else {
1f16deac
TR
122 /*
123 * If the buffer object had no pages allocated and if it was
124 * not imported, it had to be allocated with the DMA API, so
125 * the DMA API helper can be used.
126 */
c6aeaf56 127 err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size);
80327ce3
TR
128 if (err < 0)
129 goto free;
130 }
131
c6aeaf56
TR
132 err = dma_map_sgtable(dev, map->sgt, direction, 0);
133 if (err)
134 goto free_sgt;
80327ce3 135
c6aeaf56
TR
136out:
137 /*
138 * If we've manually mapped the buffer object through the IOMMU, make sure to return the
139 * existing IOVA address of our mapping.
140 */
141 if (!obj->mm) {
142 map->phys = sg_dma_address(map->sgt->sgl);
143 map->chunks = err;
144 } else {
145 map->phys = obj->iova;
146 map->chunks = 1;
147 }
148
149 map->size = gem->size;
150
151 return map;
152
153free_sgt:
154 sg_free_table(map->sgt);
80327ce3 155free:
c6aeaf56
TR
156 kfree(map->sgt);
157 kfree(map);
80327ce3 158 return ERR_PTR(err);
de2ba664
AM
159}
160
c6aeaf56 161static void tegra_bo_unpin(struct host1x_bo_mapping *map)
de2ba664 162{
c6aeaf56 163 if (map->attach) {
f66d48c8
DO
164 dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
165 map->direction);
c6aeaf56
TR
166 dma_buf_detach(map->attach->dmabuf, map->attach);
167 } else {
168 dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
169 sg_free_table(map->sgt);
170 kfree(map->sgt);
af1cbfb9 171 }
c6aeaf56
TR
172
173 host1x_bo_put(map->bo);
174 kfree(map);
de2ba664
AM
175}
176
177static void *tegra_bo_mmap(struct host1x_bo *bo)
178{
3be82743 179 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
3868ff00 180 struct iosys_map map = { 0 };
3f257bc6 181 void *vaddr;
6619ccf1 182 int ret;
de2ba664 183
195dd40c 184 if (obj->vaddr)
7ecada3c 185 return obj->vaddr;
195dd40c
SJ
186
187 if (obj->gem.import_attach) {
f66d48c8 188 ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
3f257bc6
TR
189 if (ret < 0)
190 return ERR_PTR(ret);
191
192 return map.vaddr;
6619ccf1 193 }
195dd40c 194
3f257bc6
TR
195 vaddr = vmap(obj->pages, obj->num_pages, VM_MAP,
196 pgprot_writecombine(PAGE_KERNEL));
197 if (!vaddr)
198 return ERR_PTR(-ENOMEM);
199
200 return vaddr;
de2ba664
AM
201}
202
203static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
204{
7ecada3c 205 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
7938f421 206 struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr);
7ecada3c
AM
207
208 if (obj->vaddr)
209 return;
195dd40c
SJ
210
211 if (obj->gem.import_attach)
212 return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
213
214 vunmap(addr);
de2ba664
AM
215}
216
de2ba664
AM
217static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
218{
3be82743 219 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
de2ba664 220
7664b2fa 221 drm_gem_object_get(&obj->gem);
de2ba664
AM
222
223 return bo;
224}
225
425c0fdc 226static const struct host1x_bo_ops tegra_bo_ops = {
de2ba664
AM
227 .get = tegra_bo_get,
228 .put = tegra_bo_put,
229 .pin = tegra_bo_pin,
230 .unpin = tegra_bo_unpin,
231 .mmap = tegra_bo_mmap,
232 .munmap = tegra_bo_munmap,
de2ba664
AM
233};
234
df06b759
TR
235static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
236{
237 int prot = IOMMU_READ | IOMMU_WRITE;
04184b1f 238 int err;
df06b759
TR
239
240 if (bo->mm)
241 return -EBUSY;
242
243 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
244 if (!bo->mm)
245 return -ENOMEM;
246
347ad49d
TR
247 mutex_lock(&tegra->mm_lock);
248
4e64e553
CW
249 err = drm_mm_insert_node_generic(&tegra->mm,
250 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
df06b759 251 if (err < 0) {
04184b1f 252 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
df06b759 253 err);
347ad49d 254 goto unlock;
df06b759
TR
255 }
256
7e3c53a0 257 bo->iova = bo->mm->start;
df06b759 258
d4fea3e6 259 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
04184b1f
DO
260 if (!bo->size) {
261 dev_err(tegra->drm->dev, "failed to map buffer\n");
262 err = -ENOMEM;
df06b759
TR
263 goto remove;
264 }
265
347ad49d
TR
266 mutex_unlock(&tegra->mm_lock);
267
df06b759
TR
268 return 0;
269
270remove:
271 drm_mm_remove_node(bo->mm);
347ad49d
TR
272unlock:
273 mutex_unlock(&tegra->mm_lock);
df06b759
TR
274 kfree(bo->mm);
275 return err;
276}
277
278static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
279{
280 if (!bo->mm)
281 return 0;
282
347ad49d 283 mutex_lock(&tegra->mm_lock);
7e3c53a0 284 iommu_unmap(tegra->domain, bo->iova, bo->size);
df06b759 285 drm_mm_remove_node(bo->mm);
347ad49d
TR
286 mutex_unlock(&tegra->mm_lock);
287
df06b759
TR
288 kfree(bo->mm);
289
290 return 0;
291}
292
dd311c6f
TZ
293static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
294 .free = tegra_bo_free_object,
295 .export = tegra_gem_prime_export,
296 .vm_ops = &tegra_bo_vm_ops,
297};
298
c28d4a31
TR
299static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
300 size_t size)
301{
302 struct tegra_bo *bo;
303 int err;
304
305 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
306 if (!bo)
307 return ERR_PTR(-ENOMEM);
308
dd311c6f
TZ
309 bo->gem.funcs = &tegra_gem_object_funcs;
310
c28d4a31
TR
311 host1x_bo_init(&bo->base, &tegra_bo_ops);
312 size = round_up(size, PAGE_SIZE);
313
314 err = drm_gem_object_init(drm, &bo->gem, size);
315 if (err < 0)
316 goto free;
317
318 err = drm_gem_create_mmap_offset(&bo->gem);
319 if (err < 0)
320 goto release;
321
322 return bo;
323
324release:
325 drm_gem_object_release(&bo->gem);
326free:
327 kfree(bo);
328 return ERR_PTR(err);
329}
330
df06b759 331static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
de2ba664 332{
df06b759 333 if (bo->pages) {
d4fea3e6 334 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
df06b759
TR
335 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
336 sg_free_table(bo->sgt);
337 kfree(bo->sgt);
7e0180e3 338 } else if (bo->vaddr) {
7e3c53a0 339 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
df06b759
TR
340 }
341}
342
73c42c79 343static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
df06b759 344{
bd43c9f0 345 int err;
a04251fc 346
df06b759
TR
347 bo->pages = drm_gem_get_pages(&bo->gem);
348 if (IS_ERR(bo->pages))
349 return PTR_ERR(bo->pages);
350
73c42c79 351 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
df06b759 352
707d561f 353 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
bd43c9f0
TR
354 if (IS_ERR(bo->sgt)) {
355 err = PTR_ERR(bo->sgt);
a04251fc 356 goto put_pages;
bd43c9f0 357 }
a04251fc 358
d4fea3e6
MS
359 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
360 if (err)
bd43c9f0 361 goto free_sgt;
a04251fc 362
df06b759 363 return 0;
a04251fc 364
bd43c9f0
TR
365free_sgt:
366 sg_free_table(bo->sgt);
367 kfree(bo->sgt);
a04251fc
TR
368put_pages:
369 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
bd43c9f0 370 return err;
df06b759
TR
371}
372
73c42c79 373static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
df06b759
TR
374{
375 struct tegra_drm *tegra = drm->dev_private;
376 int err;
377
378 if (tegra->domain) {
73c42c79 379 err = tegra_bo_get_pages(drm, bo);
df06b759
TR
380 if (err < 0)
381 return err;
382
383 err = tegra_bo_iommu_map(tegra, bo);
384 if (err < 0) {
385 tegra_bo_free(drm, bo);
386 return err;
387 }
388 } else {
73c42c79
TR
389 size_t size = bo->gem.size;
390
7e3c53a0 391 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
f6e45661 392 GFP_KERNEL | __GFP_NOWARN);
df06b759
TR
393 if (!bo->vaddr) {
394 dev_err(drm->dev,
395 "failed to allocate buffer of size %zu\n",
396 size);
397 return -ENOMEM;
398 }
399 }
400
401 return 0;
de2ba664
AM
402}
403
71c38629 404struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
773af77f 405 unsigned long flags)
de2ba664
AM
406{
407 struct tegra_bo *bo;
408 int err;
409
c28d4a31
TR
410 bo = tegra_bo_alloc_object(drm, size);
411 if (IS_ERR(bo))
412 return bo;
de2ba664 413
73c42c79 414 err = tegra_bo_alloc(drm, bo);
df06b759
TR
415 if (err < 0)
416 goto release;
de2ba664 417
773af77f 418 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
c134f019 419 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
773af77f 420
db7fbdfd
TR
421 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
422 bo->flags |= TEGRA_BO_BOTTOM_UP;
423
de2ba664
AM
424 return bo;
425
df06b759
TR
426release:
427 drm_gem_object_release(&bo->gem);
de2ba664 428 kfree(bo);
de2ba664 429 return ERR_PTR(err);
de2ba664
AM
430}
431
432struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
3be82743 433 struct drm_device *drm,
71c38629 434 size_t size,
773af77f 435 unsigned long flags,
71c38629 436 u32 *handle)
de2ba664
AM
437{
438 struct tegra_bo *bo;
a8b48df5 439 int err;
de2ba664 440
773af77f 441 bo = tegra_bo_create(drm, size, flags);
de2ba664
AM
442 if (IS_ERR(bo))
443 return bo;
444
a8b48df5
TR
445 err = drm_gem_handle_create(file, &bo->gem, handle);
446 if (err) {
447 tegra_bo_free_object(&bo->gem);
448 return ERR_PTR(err);
449 }
de2ba664 450
b8912e29 451 drm_gem_object_put(&bo->gem);
de2ba664
AM
452
453 return bo;
de2ba664
AM
454}
455
540457cc
TR
456static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
457 struct dma_buf *buf)
3800391d 458{
df06b759 459 struct tegra_drm *tegra = drm->dev_private;
3800391d
TR
460 struct dma_buf_attachment *attach;
461 struct tegra_bo *bo;
3800391d
TR
462 int err;
463
c28d4a31
TR
464 bo = tegra_bo_alloc_object(drm, buf->size);
465 if (IS_ERR(bo))
466 return bo;
3800391d
TR
467
468 attach = dma_buf_attach(buf, drm->dev);
469 if (IS_ERR(attach)) {
470 err = PTR_ERR(attach);
c28d4a31 471 goto free;
3800391d
TR
472 }
473
474 get_dma_buf(buf);
475
f66d48c8 476 bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
3800391d
TR
477 if (IS_ERR(bo->sgt)) {
478 err = PTR_ERR(bo->sgt);
479 goto detach;
480 }
481
df06b759
TR
482 if (tegra->domain) {
483 err = tegra_bo_iommu_map(tegra, bo);
484 if (err < 0)
485 goto detach;
3800391d
TR
486 }
487
3800391d
TR
488 bo->gem.import_attach = attach;
489
490 return bo;
491
492detach:
493 if (!IS_ERR_OR_NULL(bo->sgt))
f66d48c8 494 dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
3800391d
TR
495
496 dma_buf_detach(buf, attach);
497 dma_buf_put(buf);
3800391d 498free:
c28d4a31 499 drm_gem_object_release(&bo->gem);
3800391d 500 kfree(bo);
3800391d
TR
501 return ERR_PTR(err);
502}
503
de2ba664
AM
504void tegra_bo_free_object(struct drm_gem_object *gem)
505{
df06b759 506 struct tegra_drm *tegra = gem->dev->dev_private;
1f39b1df 507 struct host1x_bo_mapping *mapping, *tmp;
de2ba664
AM
508 struct tegra_bo *bo = to_tegra_bo(gem);
509
1f39b1df
TR
510 /* remove all mappings of this buffer object from any caches */
511 list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
512 if (mapping->cache)
513 host1x_bo_unpin(mapping);
514 else
515 dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
516 dev_name(mapping->dev));
517 }
518
df06b759
TR
519 if (tegra->domain)
520 tegra_bo_iommu_unmap(tegra, bo);
521
3800391d 522 if (gem->import_attach) {
f66d48c8
DO
523 dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
524 DMA_TO_DEVICE);
3800391d
TR
525 drm_prime_gem_destroy(gem, NULL);
526 } else {
df06b759 527 tegra_bo_free(gem->dev, bo);
3800391d
TR
528 }
529
de2ba664 530 drm_gem_object_release(gem);
de2ba664
AM
531 kfree(bo);
532}
533
534int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
535 struct drm_mode_create_dumb *args)
536{
dc6057ec 537 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
d1f3e1e0 538 struct tegra_drm *tegra = drm->dev_private;
de2ba664
AM
539 struct tegra_bo *bo;
540
dc6057ec
TR
541 args->pitch = round_up(min_pitch, tegra->pitch_align);
542 args->size = args->pitch * args->height;
de2ba664 543
773af77f 544 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
3be82743 545 &args->handle);
de2ba664
AM
546 if (IS_ERR(bo))
547 return PTR_ERR(bo);
548
549 return 0;
550}
551
cc7add70 552static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
df06b759 553{
11bac800 554 struct vm_area_struct *vma = vmf->vma;
df06b759
TR
555 struct drm_gem_object *gem = vma->vm_private_data;
556 struct tegra_bo *bo = to_tegra_bo(gem);
557 struct page *page;
558 pgoff_t offset;
df06b759
TR
559
560 if (!bo->pages)
561 return VM_FAULT_SIGBUS;
562
1a29d85e 563 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
df06b759
TR
564 page = bo->pages[offset];
565
cc7add70 566 return vmf_insert_page(vma, vmf->address, page);
df06b759
TR
567}
568
de2ba664 569const struct vm_operations_struct tegra_bo_vm_ops = {
df06b759 570 .fault = tegra_bo_fault,
de2ba664
AM
571 .open = drm_gem_vm_open,
572 .close = drm_gem_vm_close,
573};
574
04c07466 575int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
de2ba664 576{
a8bc8c65 577 struct tegra_bo *bo = to_tegra_bo(gem);
de2ba664 578
df06b759
TR
579 if (!bo->pages) {
580 unsigned long vm_pgoff = vma->vm_pgoff;
a8bc8c65 581 int err;
53ea7213 582
a8bc8c65
TR
583 /*
584 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
585 * and set the vm_pgoff (used as a fake buffer offset by DRM)
586 * to 0 as we want to map the whole buffer.
587 */
1c71222e 588 vm_flags_clear(vma, VM_PFNMAP);
df06b759
TR
589 vma->vm_pgoff = 0;
590
7e3c53a0 591 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
f6e45661 592 gem->size);
a8bc8c65 593 if (err < 0) {
df06b759 594 drm_gem_vm_close(vma);
a8bc8c65 595 return err;
df06b759
TR
596 }
597
598 vma->vm_pgoff = vm_pgoff;
599 } else {
600 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
53ea7213 601
1c71222e 602 vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
df06b759
TR
603
604 vma->vm_page_prot = pgprot_writecombine(prot);
605 }
de2ba664 606
53ea7213 607 return 0;
de2ba664 608}
3800391d 609
a8bc8c65
TR
610int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
611{
612 struct drm_gem_object *gem;
613 int err;
614
615 err = drm_gem_mmap(file, vma);
616 if (err < 0)
617 return err;
618
619 gem = vma->vm_private_data;
620
04c07466 621 return __tegra_gem_mmap(gem, vma);
a8bc8c65
TR
622}
623
3800391d
TR
624static struct sg_table *
625tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
626 enum dma_data_direction dir)
627{
628 struct drm_gem_object *gem = attach->dmabuf->priv;
629 struct tegra_bo *bo = to_tegra_bo(gem);
630 struct sg_table *sgt;
631
632 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
633 if (!sgt)
634 return NULL;
635
df06b759 636 if (bo->pages) {
acf6b77c
TR
637 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
638 0, gem->size, GFP_KERNEL) < 0)
df06b759 639 goto free;
df06b759 640 } else {
d81f3431
TR
641 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
642 gem->size) < 0)
df06b759 643 goto free;
df06b759 644 }
3800391d 645
d4fea3e6 646 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
8b5a3c17
TR
647 goto free;
648
3800391d 649 return sgt;
df06b759
TR
650
651free:
652 sg_free_table(sgt);
653 kfree(sgt);
654 return NULL;
3800391d
TR
655}
656
657static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
658 struct sg_table *sgt,
659 enum dma_data_direction dir)
660{
df06b759
TR
661 struct drm_gem_object *gem = attach->dmabuf->priv;
662 struct tegra_bo *bo = to_tegra_bo(gem);
663
664 if (bo->pages)
d4fea3e6 665 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
df06b759 666
3800391d
TR
667 sg_free_table(sgt);
668 kfree(sgt);
669}
670
671static void tegra_gem_prime_release(struct dma_buf *buf)
672{
673 drm_gem_dmabuf_release(buf);
674}
675
27e92f1f
TR
676static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
677 enum dma_data_direction direction)
678{
679 struct drm_gem_object *gem = buf->priv;
680 struct tegra_bo *bo = to_tegra_bo(gem);
681 struct drm_device *drm = gem->dev;
682
683 if (bo->pages)
d4fea3e6 684 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
27e92f1f
TR
685
686 return 0;
687}
688
689static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
690 enum dma_data_direction direction)
691{
692 struct drm_gem_object *gem = buf->priv;
693 struct tegra_bo *bo = to_tegra_bo(gem);
694 struct drm_device *drm = gem->dev;
695
696 if (bo->pages)
d4fea3e6 697 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
27e92f1f
TR
698
699 return 0;
700}
701
3800391d
TR
702static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
703{
a8bc8c65
TR
704 struct drm_gem_object *gem = buf->priv;
705 int err;
706
707 err = drm_gem_mmap_obj(gem, gem->size, vma);
708 if (err < 0)
709 return err;
710
04c07466 711 return __tegra_gem_mmap(gem, vma);
3800391d
TR
712}
713
7938f421 714static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map)
d40326f4
TR
715{
716 struct drm_gem_object *gem = buf->priv;
717 struct tegra_bo *bo = to_tegra_bo(gem);
c7860cbe 718 void *vaddr;
d40326f4 719
c7860cbe
DO
720 vaddr = tegra_bo_mmap(&bo->base);
721 if (IS_ERR(vaddr))
722 return PTR_ERR(vaddr);
723
724 iosys_map_set_vaddr(map, vaddr);
6619ccf1
TZ
725
726 return 0;
d40326f4
TR
727}
728
7938f421 729static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map)
d40326f4 730{
c7860cbe
DO
731 struct drm_gem_object *gem = buf->priv;
732 struct tegra_bo *bo = to_tegra_bo(gem);
733
734 tegra_bo_munmap(&bo->base, map->vaddr);
d40326f4
TR
735}
736
3800391d
TR
737static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
738 .map_dma_buf = tegra_gem_prime_map_dma_buf,
739 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
740 .release = tegra_gem_prime_release,
27e92f1f
TR
741 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
742 .end_cpu_access = tegra_gem_prime_end_cpu_access,
3800391d 743 .mmap = tegra_gem_prime_mmap,
d40326f4
TR
744 .vmap = tegra_gem_prime_vmap,
745 .vunmap = tegra_gem_prime_vunmap,
3800391d
TR
746};
747
e4fa8457 748struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
3800391d
TR
749 int flags)
750{
d8fbe341
SS
751 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
752
4bd91a5b 753 exp_info.exp_name = KBUILD_MODNAME;
e4fa8457 754 exp_info.owner = gem->dev->driver->fops->owner;
d8fbe341
SS
755 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
756 exp_info.size = gem->size;
757 exp_info.flags = flags;
758 exp_info.priv = gem;
759
e4fa8457 760 return drm_gem_dmabuf_export(gem->dev, &exp_info);
3800391d
TR
761}
762
763struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
764 struct dma_buf *buf)
765{
766 struct tegra_bo *bo;
767
768 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
769 struct drm_gem_object *gem = buf->priv;
770
771 if (gem->dev == drm) {
7664b2fa 772 drm_gem_object_get(gem);
3800391d
TR
773 return gem;
774 }
775 }
776
777 bo = tegra_bo_import(drm, buf);
778 if (IS_ERR(bo))
779 return ERR_CAST(bo);
780
781 return &bo->gem;
782}
f51632cc
MP
783
784struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle)
785{
786 struct drm_gem_object *gem;
787 struct tegra_bo *bo;
788
789 gem = drm_gem_object_lookup(file, handle);
790 if (!gem)
791 return NULL;
792
793 bo = to_tegra_bo(gem);
794 return &bo->base;
795}