]>
git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/msm/msm_gem.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/spinlock.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pfn_t.h>
13 #include "msm_fence.h"
18 static void msm_gem_vunmap_locked(struct drm_gem_object
*obj
);
21 static dma_addr_t
physaddr(struct drm_gem_object
*obj
)
23 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
24 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
25 return (((dma_addr_t
)msm_obj
->vram_node
->start
) << PAGE_SHIFT
) +
29 static bool use_pages(struct drm_gem_object
*obj
)
31 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
32 return !msm_obj
->vram_node
;
35 /* allocate pages from VRAM carveout, used when no IOMMU: */
36 static struct page
**get_pages_vram(struct drm_gem_object
*obj
, int npages
)
38 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
39 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
44 p
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
46 return ERR_PTR(-ENOMEM
);
48 spin_lock(&priv
->vram
.lock
);
49 ret
= drm_mm_insert_node(&priv
->vram
.mm
, msm_obj
->vram_node
, npages
);
50 spin_unlock(&priv
->vram
.lock
);
56 paddr
= physaddr(obj
);
57 for (i
= 0; i
< npages
; i
++) {
58 p
[i
] = phys_to_page(paddr
);
65 static struct page
**get_pages(struct drm_gem_object
*obj
)
67 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
69 if (!msm_obj
->pages
) {
70 struct drm_device
*dev
= obj
->dev
;
72 int npages
= obj
->size
>> PAGE_SHIFT
;
75 p
= drm_gem_get_pages(obj
);
77 p
= get_pages_vram(obj
, npages
);
80 DRM_DEV_ERROR(dev
->dev
, "could not get pages: %ld\n",
87 msm_obj
->sgt
= drm_prime_pages_to_sg(p
, npages
);
88 if (IS_ERR(msm_obj
->sgt
)) {
89 void *ptr
= ERR_CAST(msm_obj
->sgt
);
91 DRM_DEV_ERROR(dev
->dev
, "failed to allocate sgt\n");
96 /* For non-cached buffers, ensure the new pages are clean
97 * because display controller, GPU, etc. are not coherent:
99 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
100 dma_map_sg(dev
->dev
, msm_obj
->sgt
->sgl
,
101 msm_obj
->sgt
->nents
, DMA_BIDIRECTIONAL
);
104 return msm_obj
->pages
;
107 static void put_pages_vram(struct drm_gem_object
*obj
)
109 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
110 struct msm_drm_private
*priv
= obj
->dev
->dev_private
;
112 spin_lock(&priv
->vram
.lock
);
113 drm_mm_remove_node(msm_obj
->vram_node
);
114 spin_unlock(&priv
->vram
.lock
);
116 kvfree(msm_obj
->pages
);
119 static void put_pages(struct drm_gem_object
*obj
)
121 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
123 if (msm_obj
->pages
) {
125 /* For non-cached buffers, ensure the new
126 * pages are clean because display controller,
127 * GPU, etc. are not coherent:
129 if (msm_obj
->flags
& (MSM_BO_WC
|MSM_BO_UNCACHED
))
130 dma_unmap_sg(obj
->dev
->dev
, msm_obj
->sgt
->sgl
,
134 sg_free_table(msm_obj
->sgt
);
139 drm_gem_put_pages(obj
, msm_obj
->pages
, true, false);
143 msm_obj
->pages
= NULL
;
147 struct page
**msm_gem_get_pages(struct drm_gem_object
*obj
)
149 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
152 mutex_lock(&msm_obj
->lock
);
154 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
155 mutex_unlock(&msm_obj
->lock
);
156 return ERR_PTR(-EBUSY
);
160 mutex_unlock(&msm_obj
->lock
);
164 void msm_gem_put_pages(struct drm_gem_object
*obj
)
166 /* when we start tracking the pin count, then do something here */
169 int msm_gem_mmap_obj(struct drm_gem_object
*obj
,
170 struct vm_area_struct
*vma
)
172 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
174 vma
->vm_flags
&= ~VM_PFNMAP
;
175 vma
->vm_flags
|= VM_MIXEDMAP
;
177 if (msm_obj
->flags
& MSM_BO_WC
) {
178 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
179 } else if (msm_obj
->flags
& MSM_BO_UNCACHED
) {
180 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
183 * Shunt off cached objs to shmem file so they have their own
184 * address_space (so unmap_mapping_range does what we want,
185 * in particular in the case of mmap'd dmabufs)
190 vma
->vm_file
= obj
->filp
;
192 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
198 int msm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
202 ret
= drm_gem_mmap(filp
, vma
);
204 DBG("mmap failed: %d", ret
);
208 return msm_gem_mmap_obj(vma
->vm_private_data
, vma
);
211 vm_fault_t
msm_gem_fault(struct vm_fault
*vmf
)
213 struct vm_area_struct
*vma
= vmf
->vma
;
214 struct drm_gem_object
*obj
= vma
->vm_private_data
;
215 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
223 * vm_ops.open/drm_gem_mmap_obj and close get and put
224 * a reference on obj. So, we dont need to hold one here.
226 err
= mutex_lock_interruptible(&msm_obj
->lock
);
228 ret
= VM_FAULT_NOPAGE
;
232 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
)) {
233 mutex_unlock(&msm_obj
->lock
);
234 return VM_FAULT_SIGBUS
;
237 /* make sure we have pages attached now */
238 pages
= get_pages(obj
);
240 ret
= vmf_error(PTR_ERR(pages
));
244 /* We don't use vmf->pgoff since that has the fake offset: */
245 pgoff
= (vmf
->address
- vma
->vm_start
) >> PAGE_SHIFT
;
247 pfn
= page_to_pfn(pages
[pgoff
]);
249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf
->address
,
250 pfn
, pfn
<< PAGE_SHIFT
);
252 ret
= vmf_insert_mixed(vma
, vmf
->address
, __pfn_to_pfn_t(pfn
, PFN_DEV
));
254 mutex_unlock(&msm_obj
->lock
);
259 /** get mmap offset */
260 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
262 struct drm_device
*dev
= obj
->dev
;
263 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
266 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
268 /* Make it mmapable */
269 ret
= drm_gem_create_mmap_offset(obj
);
272 DRM_DEV_ERROR(dev
->dev
, "could not allocate mmap offset\n");
276 return drm_vma_node_offset_addr(&obj
->vma_node
);
279 uint64_t msm_gem_mmap_offset(struct drm_gem_object
*obj
)
282 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
284 mutex_lock(&msm_obj
->lock
);
285 offset
= mmap_offset(obj
);
286 mutex_unlock(&msm_obj
->lock
);
290 static struct msm_gem_vma
*add_vma(struct drm_gem_object
*obj
,
291 struct msm_gem_address_space
*aspace
)
293 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
294 struct msm_gem_vma
*vma
;
296 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
298 vma
= kzalloc(sizeof(*vma
), GFP_KERNEL
);
300 return ERR_PTR(-ENOMEM
);
302 vma
->aspace
= aspace
;
304 list_add_tail(&vma
->list
, &msm_obj
->vmas
);
309 static struct msm_gem_vma
*lookup_vma(struct drm_gem_object
*obj
,
310 struct msm_gem_address_space
*aspace
)
312 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
313 struct msm_gem_vma
*vma
;
315 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
317 list_for_each_entry(vma
, &msm_obj
->vmas
, list
) {
318 if (vma
->aspace
== aspace
)
325 static void del_vma(struct msm_gem_vma
*vma
)
330 list_del(&vma
->list
);
334 /* Called with msm_obj->lock locked */
336 put_iova(struct drm_gem_object
*obj
)
338 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
339 struct msm_gem_vma
*vma
, *tmp
;
341 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
343 list_for_each_entry_safe(vma
, tmp
, &msm_obj
->vmas
, list
) {
344 msm_gem_purge_vma(vma
->aspace
, vma
);
345 msm_gem_close_vma(vma
->aspace
, vma
);
350 static int msm_gem_get_iova_locked(struct drm_gem_object
*obj
,
351 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
353 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
354 struct msm_gem_vma
*vma
;
357 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
359 vma
= lookup_vma(obj
, aspace
);
362 vma
= add_vma(obj
, aspace
);
366 ret
= msm_gem_init_vma(aspace
, vma
, obj
->size
>> PAGE_SHIFT
);
377 static int msm_gem_pin_iova(struct drm_gem_object
*obj
,
378 struct msm_gem_address_space
*aspace
)
380 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
381 struct msm_gem_vma
*vma
;
383 int prot
= IOMMU_READ
;
385 if (!(msm_obj
->flags
& MSM_BO_GPU_READONLY
))
388 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
390 if (WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
))
393 vma
= lookup_vma(obj
, aspace
);
397 pages
= get_pages(obj
);
399 return PTR_ERR(pages
);
401 return msm_gem_map_vma(aspace
, vma
, prot
,
402 msm_obj
->sgt
, obj
->size
>> PAGE_SHIFT
);
405 /* get iova and pin it. Should have a matching put */
406 int msm_gem_get_and_pin_iova(struct drm_gem_object
*obj
,
407 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
409 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
413 mutex_lock(&msm_obj
->lock
);
415 ret
= msm_gem_get_iova_locked(obj
, aspace
, &local
);
418 ret
= msm_gem_pin_iova(obj
, aspace
);
423 mutex_unlock(&msm_obj
->lock
);
428 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
429 * valid for the life of the object
431 int msm_gem_get_iova(struct drm_gem_object
*obj
,
432 struct msm_gem_address_space
*aspace
, uint64_t *iova
)
434 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
437 mutex_lock(&msm_obj
->lock
);
438 ret
= msm_gem_get_iova_locked(obj
, aspace
, iova
);
439 mutex_unlock(&msm_obj
->lock
);
444 /* get iova without taking a reference, used in places where you have
445 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
447 uint64_t msm_gem_iova(struct drm_gem_object
*obj
,
448 struct msm_gem_address_space
*aspace
)
450 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
451 struct msm_gem_vma
*vma
;
453 mutex_lock(&msm_obj
->lock
);
454 vma
= lookup_vma(obj
, aspace
);
455 mutex_unlock(&msm_obj
->lock
);
458 return vma
? vma
->iova
: 0;
462 * Unpin a iova by updating the reference counts. The memory isn't actually
463 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
466 void msm_gem_unpin_iova(struct drm_gem_object
*obj
,
467 struct msm_gem_address_space
*aspace
)
469 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
470 struct msm_gem_vma
*vma
;
472 mutex_lock(&msm_obj
->lock
);
473 vma
= lookup_vma(obj
, aspace
);
476 msm_gem_unmap_vma(aspace
, vma
);
478 mutex_unlock(&msm_obj
->lock
);
481 int msm_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
482 struct drm_mode_create_dumb
*args
)
484 args
->pitch
= align_pitch(args
->width
, args
->bpp
);
485 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
486 return msm_gem_new_handle(dev
, file
, args
->size
,
487 MSM_BO_SCANOUT
| MSM_BO_WC
, &args
->handle
, "dumb");
490 int msm_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
491 uint32_t handle
, uint64_t *offset
)
493 struct drm_gem_object
*obj
;
496 /* GEM does all our handle to object mapping */
497 obj
= drm_gem_object_lookup(file
, handle
);
503 *offset
= msm_gem_mmap_offset(obj
);
505 drm_gem_object_put_unlocked(obj
);
511 static void *get_vaddr(struct drm_gem_object
*obj
, unsigned madv
)
513 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
516 mutex_lock(&msm_obj
->lock
);
518 if (WARN_ON(msm_obj
->madv
> madv
)) {
519 DRM_DEV_ERROR(obj
->dev
->dev
, "Invalid madv state: %u vs %u\n",
520 msm_obj
->madv
, madv
);
521 mutex_unlock(&msm_obj
->lock
);
522 return ERR_PTR(-EBUSY
);
525 /* increment vmap_count *before* vmap() call, so shrinker can
526 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
527 * This guarantees that we won't try to msm_gem_vunmap() this
528 * same object from within the vmap() call (while we already
529 * hold msm_obj->lock)
531 msm_obj
->vmap_count
++;
533 if (!msm_obj
->vaddr
) {
534 struct page
**pages
= get_pages(obj
);
536 ret
= PTR_ERR(pages
);
539 msm_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
540 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
541 if (msm_obj
->vaddr
== NULL
) {
547 mutex_unlock(&msm_obj
->lock
);
548 return msm_obj
->vaddr
;
551 msm_obj
->vmap_count
--;
552 mutex_unlock(&msm_obj
->lock
);
556 void *msm_gem_get_vaddr(struct drm_gem_object
*obj
)
558 return get_vaddr(obj
, MSM_MADV_WILLNEED
);
562 * Don't use this! It is for the very special case of dumping
563 * submits from GPU hangs or faults, were the bo may already
564 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
567 void *msm_gem_get_vaddr_active(struct drm_gem_object
*obj
)
569 return get_vaddr(obj
, __MSM_MADV_PURGED
);
572 void msm_gem_put_vaddr(struct drm_gem_object
*obj
)
574 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
576 mutex_lock(&msm_obj
->lock
);
577 WARN_ON(msm_obj
->vmap_count
< 1);
578 msm_obj
->vmap_count
--;
579 mutex_unlock(&msm_obj
->lock
);
582 /* Update madvise status, returns true if not purged, else
585 int msm_gem_madvise(struct drm_gem_object
*obj
, unsigned madv
)
587 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
589 mutex_lock(&msm_obj
->lock
);
591 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
593 if (msm_obj
->madv
!= __MSM_MADV_PURGED
)
594 msm_obj
->madv
= madv
;
596 madv
= msm_obj
->madv
;
598 mutex_unlock(&msm_obj
->lock
);
600 return (madv
!= __MSM_MADV_PURGED
);
603 void msm_gem_purge(struct drm_gem_object
*obj
, enum msm_gem_lock subclass
)
605 struct drm_device
*dev
= obj
->dev
;
606 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
608 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
609 WARN_ON(!is_purgeable(msm_obj
));
610 WARN_ON(obj
->import_attach
);
612 mutex_lock_nested(&msm_obj
->lock
, subclass
);
616 msm_gem_vunmap_locked(obj
);
620 msm_obj
->madv
= __MSM_MADV_PURGED
;
622 drm_vma_node_unmap(&obj
->vma_node
, dev
->anon_inode
->i_mapping
);
623 drm_gem_free_mmap_offset(obj
);
625 /* Our goal here is to return as much of the memory as
626 * is possible back to the system as we are called from OOM.
627 * To do this we must instruct the shmfs to drop all of its
628 * backing pages, *now*.
630 shmem_truncate_range(file_inode(obj
->filp
), 0, (loff_t
)-1);
632 invalidate_mapping_pages(file_inode(obj
->filp
)->i_mapping
,
635 mutex_unlock(&msm_obj
->lock
);
638 static void msm_gem_vunmap_locked(struct drm_gem_object
*obj
)
640 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
642 WARN_ON(!mutex_is_locked(&msm_obj
->lock
));
644 if (!msm_obj
->vaddr
|| WARN_ON(!is_vunmapable(msm_obj
)))
647 vunmap(msm_obj
->vaddr
);
648 msm_obj
->vaddr
= NULL
;
651 void msm_gem_vunmap(struct drm_gem_object
*obj
, enum msm_gem_lock subclass
)
653 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
655 mutex_lock_nested(&msm_obj
->lock
, subclass
);
656 msm_gem_vunmap_locked(obj
);
657 mutex_unlock(&msm_obj
->lock
);
660 /* must be called before _move_to_active().. */
661 int msm_gem_sync_object(struct drm_gem_object
*obj
,
662 struct msm_fence_context
*fctx
, bool exclusive
)
664 struct reservation_object_list
*fobj
;
665 struct dma_fence
*fence
;
668 fobj
= reservation_object_get_list(obj
->resv
);
669 if (!fobj
|| (fobj
->shared_count
== 0)) {
670 fence
= reservation_object_get_excl(obj
->resv
);
671 /* don't need to wait on our own fences, since ring is fifo */
672 if (fence
&& (fence
->context
!= fctx
->context
)) {
673 ret
= dma_fence_wait(fence
, true);
679 if (!exclusive
|| !fobj
)
682 for (i
= 0; i
< fobj
->shared_count
; i
++) {
683 fence
= rcu_dereference_protected(fobj
->shared
[i
],
684 reservation_object_held(obj
->resv
));
685 if (fence
->context
!= fctx
->context
) {
686 ret
= dma_fence_wait(fence
, true);
695 void msm_gem_move_to_active(struct drm_gem_object
*obj
,
696 struct msm_gpu
*gpu
, bool exclusive
, struct dma_fence
*fence
)
698 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
699 WARN_ON(msm_obj
->madv
!= MSM_MADV_WILLNEED
);
702 reservation_object_add_excl_fence(obj
->resv
, fence
);
704 reservation_object_add_shared_fence(obj
->resv
, fence
);
705 list_del_init(&msm_obj
->mm_list
);
706 list_add_tail(&msm_obj
->mm_list
, &gpu
->active_list
);
709 void msm_gem_move_to_inactive(struct drm_gem_object
*obj
)
711 struct drm_device
*dev
= obj
->dev
;
712 struct msm_drm_private
*priv
= dev
->dev_private
;
713 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
715 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
718 list_del_init(&msm_obj
->mm_list
);
719 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
722 int msm_gem_cpu_prep(struct drm_gem_object
*obj
, uint32_t op
, ktime_t
*timeout
)
724 bool write
= !!(op
& MSM_PREP_WRITE
);
725 unsigned long remain
=
726 op
& MSM_PREP_NOSYNC
? 0 : timeout_to_jiffies(timeout
);
729 ret
= reservation_object_wait_timeout_rcu(obj
->resv
, write
,
732 return remain
== 0 ? -EBUSY
: -ETIMEDOUT
;
736 /* TODO cache maintenance */
741 int msm_gem_cpu_fini(struct drm_gem_object
*obj
)
743 /* TODO cache maintenance */
747 #ifdef CONFIG_DEBUG_FS
748 static void describe_fence(struct dma_fence
*fence
, const char *type
,
751 if (!dma_fence_is_signaled(fence
))
752 seq_printf(m
, "\t%9s: %s %s seq %llu\n", type
,
753 fence
->ops
->get_driver_name(fence
),
754 fence
->ops
->get_timeline_name(fence
),
758 void msm_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
760 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
761 struct reservation_object
*robj
= obj
->resv
;
762 struct reservation_object_list
*fobj
;
763 struct dma_fence
*fence
;
764 struct msm_gem_vma
*vma
;
765 uint64_t off
= drm_vma_node_start(&obj
->vma_node
);
768 mutex_lock(&msm_obj
->lock
);
770 switch (msm_obj
->madv
) {
771 case __MSM_MADV_PURGED
:
774 case MSM_MADV_DONTNEED
:
777 case MSM_MADV_WILLNEED
:
783 seq_printf(m
, "%08x: %c %2d (%2d) %08llx %p",
784 msm_obj
->flags
, is_active(msm_obj
) ? 'A' : 'I',
785 obj
->name
, kref_read(&obj
->refcount
),
786 off
, msm_obj
->vaddr
);
788 seq_printf(m
, " %08zu %9s %-32s\n", obj
->size
, madv
, msm_obj
->name
);
790 if (!list_empty(&msm_obj
->vmas
)) {
792 seq_puts(m
, " vmas:");
794 list_for_each_entry(vma
, &msm_obj
->vmas
, list
)
795 seq_printf(m
, " [%s: %08llx,%s,inuse=%d]",
796 vma
->aspace
!= NULL
? vma
->aspace
->name
: NULL
,
797 vma
->iova
, vma
->mapped
? "mapped" : "unmapped",
804 fobj
= rcu_dereference(robj
->fence
);
806 unsigned int i
, shared_count
= fobj
->shared_count
;
808 for (i
= 0; i
< shared_count
; i
++) {
809 fence
= rcu_dereference(fobj
->shared
[i
]);
810 describe_fence(fence
, "Shared", m
);
814 fence
= rcu_dereference(robj
->fence_excl
);
816 describe_fence(fence
, "Exclusive", m
);
819 mutex_unlock(&msm_obj
->lock
);
822 void msm_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
824 struct msm_gem_object
*msm_obj
;
828 seq_puts(m
, " flags id ref offset kaddr size madv name\n");
829 list_for_each_entry(msm_obj
, list
, mm_list
) {
830 struct drm_gem_object
*obj
= &msm_obj
->base
;
832 msm_gem_describe(obj
, m
);
837 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
841 /* don't call directly! Use drm_gem_object_put() and friends */
842 void msm_gem_free_object(struct drm_gem_object
*obj
)
844 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
845 struct drm_device
*dev
= obj
->dev
;
846 struct msm_drm_private
*priv
= dev
->dev_private
;
848 if (llist_add(&msm_obj
->freed
, &priv
->free_list
))
849 queue_work(priv
->wq
, &priv
->free_work
);
852 static void free_object(struct msm_gem_object
*msm_obj
)
854 struct drm_gem_object
*obj
= &msm_obj
->base
;
855 struct drm_device
*dev
= obj
->dev
;
857 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
859 /* object should not be on active list: */
860 WARN_ON(is_active(msm_obj
));
862 list_del(&msm_obj
->mm_list
);
864 mutex_lock(&msm_obj
->lock
);
868 if (obj
->import_attach
) {
870 dma_buf_vunmap(obj
->import_attach
->dmabuf
, msm_obj
->vaddr
);
872 /* Don't drop the pages for imported dmabuf, as they are not
873 * ours, just free the array we allocated:
876 kvfree(msm_obj
->pages
);
878 drm_prime_gem_destroy(obj
, msm_obj
->sgt
);
880 msm_gem_vunmap_locked(obj
);
884 drm_gem_object_release(obj
);
886 mutex_unlock(&msm_obj
->lock
);
890 void msm_gem_free_work(struct work_struct
*work
)
892 struct msm_drm_private
*priv
=
893 container_of(work
, struct msm_drm_private
, free_work
);
894 struct drm_device
*dev
= priv
->dev
;
895 struct llist_node
*freed
;
896 struct msm_gem_object
*msm_obj
, *next
;
898 while ((freed
= llist_del_all(&priv
->free_list
))) {
900 mutex_lock(&dev
->struct_mutex
);
902 llist_for_each_entry_safe(msm_obj
, next
,
904 free_object(msm_obj
);
906 mutex_unlock(&dev
->struct_mutex
);
913 /* convenience method to construct a GEM buffer object, and userspace handle */
914 int msm_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
915 uint32_t size
, uint32_t flags
, uint32_t *handle
,
918 struct drm_gem_object
*obj
;
921 obj
= msm_gem_new(dev
, size
, flags
);
927 msm_gem_object_set_name(obj
, "%s", name
);
929 ret
= drm_gem_handle_create(file
, obj
, handle
);
931 /* drop reference from allocate - handle holds it now */
932 drm_gem_object_put_unlocked(obj
);
937 static int msm_gem_new_impl(struct drm_device
*dev
,
938 uint32_t size
, uint32_t flags
,
939 struct reservation_object
*resv
,
940 struct drm_gem_object
**obj
,
941 bool struct_mutex_locked
)
943 struct msm_drm_private
*priv
= dev
->dev_private
;
944 struct msm_gem_object
*msm_obj
;
946 switch (flags
& MSM_BO_CACHE_MASK
) {
947 case MSM_BO_UNCACHED
:
952 DRM_DEV_ERROR(dev
->dev
, "invalid cache flag: %x\n",
953 (flags
& MSM_BO_CACHE_MASK
));
957 msm_obj
= kzalloc(sizeof(*msm_obj
), GFP_KERNEL
);
961 mutex_init(&msm_obj
->lock
);
963 msm_obj
->flags
= flags
;
964 msm_obj
->madv
= MSM_MADV_WILLNEED
;
967 msm_obj
->base
.resv
= resv
;
969 INIT_LIST_HEAD(&msm_obj
->submit_entry
);
970 INIT_LIST_HEAD(&msm_obj
->vmas
);
972 if (struct_mutex_locked
) {
973 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
974 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
976 mutex_lock(&dev
->struct_mutex
);
977 list_add_tail(&msm_obj
->mm_list
, &priv
->inactive_list
);
978 mutex_unlock(&dev
->struct_mutex
);
981 *obj
= &msm_obj
->base
;
986 static struct drm_gem_object
*_msm_gem_new(struct drm_device
*dev
,
987 uint32_t size
, uint32_t flags
, bool struct_mutex_locked
)
989 struct msm_drm_private
*priv
= dev
->dev_private
;
990 struct drm_gem_object
*obj
= NULL
;
991 bool use_vram
= false;
994 size
= PAGE_ALIGN(size
);
996 if (!msm_use_mmu(dev
))
998 else if ((flags
& (MSM_BO_STOLEN
| MSM_BO_SCANOUT
)) && priv
->vram
.size
)
1001 if (WARN_ON(use_vram
&& !priv
->vram
.size
))
1002 return ERR_PTR(-EINVAL
);
1004 /* Disallow zero sized objects as they make the underlying
1005 * infrastructure grumpy
1008 return ERR_PTR(-EINVAL
);
1010 ret
= msm_gem_new_impl(dev
, size
, flags
, NULL
, &obj
, struct_mutex_locked
);
1015 struct msm_gem_vma
*vma
;
1016 struct page
**pages
;
1017 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
1019 mutex_lock(&msm_obj
->lock
);
1021 vma
= add_vma(obj
, NULL
);
1022 mutex_unlock(&msm_obj
->lock
);
1028 to_msm_bo(obj
)->vram_node
= &vma
->node
;
1030 drm_gem_private_object_init(dev
, obj
, size
);
1032 pages
= get_pages(obj
);
1033 if (IS_ERR(pages
)) {
1034 ret
= PTR_ERR(pages
);
1038 vma
->iova
= physaddr(obj
);
1040 ret
= drm_gem_object_init(dev
, obj
, size
);
1044 * Our buffers are kept pinned, so allocating them from the
1045 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1046 * See comments above new_inode() why this is required _and_
1047 * expected if you're going to pin these pages.
1049 mapping_set_gfp_mask(obj
->filp
->f_mapping
, GFP_HIGHUSER
);
1055 drm_gem_object_put_unlocked(obj
);
1056 return ERR_PTR(ret
);
1059 struct drm_gem_object
*msm_gem_new_locked(struct drm_device
*dev
,
1060 uint32_t size
, uint32_t flags
)
1062 return _msm_gem_new(dev
, size
, flags
, true);
1065 struct drm_gem_object
*msm_gem_new(struct drm_device
*dev
,
1066 uint32_t size
, uint32_t flags
)
1068 return _msm_gem_new(dev
, size
, flags
, false);
1071 struct drm_gem_object
*msm_gem_import(struct drm_device
*dev
,
1072 struct dma_buf
*dmabuf
, struct sg_table
*sgt
)
1074 struct msm_gem_object
*msm_obj
;
1075 struct drm_gem_object
*obj
;
1079 /* if we don't have IOMMU, don't bother pretending we can import: */
1080 if (!msm_use_mmu(dev
)) {
1081 DRM_DEV_ERROR(dev
->dev
, "cannot import without IOMMU\n");
1082 return ERR_PTR(-EINVAL
);
1085 size
= PAGE_ALIGN(dmabuf
->size
);
1087 ret
= msm_gem_new_impl(dev
, size
, MSM_BO_WC
, dmabuf
->resv
, &obj
, false);
1091 drm_gem_private_object_init(dev
, obj
, size
);
1093 npages
= size
/ PAGE_SIZE
;
1095 msm_obj
= to_msm_bo(obj
);
1096 mutex_lock(&msm_obj
->lock
);
1098 msm_obj
->pages
= kvmalloc_array(npages
, sizeof(struct page
*), GFP_KERNEL
);
1099 if (!msm_obj
->pages
) {
1100 mutex_unlock(&msm_obj
->lock
);
1105 ret
= drm_prime_sg_to_page_addr_arrays(sgt
, msm_obj
->pages
, NULL
, npages
);
1107 mutex_unlock(&msm_obj
->lock
);
1111 mutex_unlock(&msm_obj
->lock
);
1115 drm_gem_object_put_unlocked(obj
);
1116 return ERR_PTR(ret
);
1119 static void *_msm_gem_kernel_new(struct drm_device
*dev
, uint32_t size
,
1120 uint32_t flags
, struct msm_gem_address_space
*aspace
,
1121 struct drm_gem_object
**bo
, uint64_t *iova
, bool locked
)
1124 struct drm_gem_object
*obj
= _msm_gem_new(dev
, size
, flags
, locked
);
1128 return ERR_CAST(obj
);
1131 ret
= msm_gem_get_and_pin_iova(obj
, aspace
, iova
);
1136 vaddr
= msm_gem_get_vaddr(obj
);
1137 if (IS_ERR(vaddr
)) {
1138 msm_gem_unpin_iova(obj
, aspace
);
1139 ret
= PTR_ERR(vaddr
);
1149 drm_gem_object_put(obj
);
1151 drm_gem_object_put_unlocked(obj
);
1153 return ERR_PTR(ret
);
1157 void *msm_gem_kernel_new(struct drm_device
*dev
, uint32_t size
,
1158 uint32_t flags
, struct msm_gem_address_space
*aspace
,
1159 struct drm_gem_object
**bo
, uint64_t *iova
)
1161 return _msm_gem_kernel_new(dev
, size
, flags
, aspace
, bo
, iova
, false);
1164 void *msm_gem_kernel_new_locked(struct drm_device
*dev
, uint32_t size
,
1165 uint32_t flags
, struct msm_gem_address_space
*aspace
,
1166 struct drm_gem_object
**bo
, uint64_t *iova
)
1168 return _msm_gem_kernel_new(dev
, size
, flags
, aspace
, bo
, iova
, true);
1171 void msm_gem_kernel_put(struct drm_gem_object
*bo
,
1172 struct msm_gem_address_space
*aspace
, bool locked
)
1174 if (IS_ERR_OR_NULL(bo
))
1177 msm_gem_put_vaddr(bo
);
1178 msm_gem_unpin_iova(bo
, aspace
);
1181 drm_gem_object_put(bo
);
1183 drm_gem_object_put_unlocked(bo
);
1186 void msm_gem_object_set_name(struct drm_gem_object
*bo
, const char *fmt
, ...)
1188 struct msm_gem_object
*msm_obj
= to_msm_bo(bo
);
1195 vsnprintf(msm_obj
->name
, sizeof(msm_obj
->name
), fmt
, ap
);