1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
10 #include <linux/of_address.h>
11 #include <linux/uaccess.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_of.h>
19 #include "msm_debugfs.h"
21 #include "adreno/adreno_gpu.h"
25 * - 1.0.0 - initial interface
26 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
27 * - 1.2.0 - adds explicit fence support for submit ioctl
28 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
29 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
31 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
32 * GEM object's debug name
33 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
34 * - 1.6.0 - Syncobj support
35 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
36 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
37 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
38 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
39 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
41 #define MSM_VERSION_MAJOR 1
42 #define MSM_VERSION_MINOR 10
43 #define MSM_VERSION_PATCHLEVEL 0
45 static void msm_deinit_vram(struct drm_device
*ddev
);
47 static char *vram
= "16m";
48 MODULE_PARM_DESC(vram
, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
49 module_param(vram
, charp
, 0);
52 MODULE_PARM_DESC(dumpstate
, "Dump KMS state on errors");
53 module_param(dumpstate
, bool, 0600);
55 static bool modeset
= true;
56 MODULE_PARM_DESC(modeset
, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
57 module_param(modeset
, bool, 0600);
59 #ifdef CONFIG_FAULT_INJECTION
60 DECLARE_FAULT_ATTR(fail_gem_alloc
);
61 DECLARE_FAULT_ATTR(fail_gem_iova
);
64 static int msm_drm_uninit(struct device
*dev
)
66 struct platform_device
*pdev
= to_platform_device(dev
);
67 struct msm_drm_private
*priv
= platform_get_drvdata(pdev
);
68 struct drm_device
*ddev
= priv
->dev
;
71 * Shutdown the hw if we're far enough along where things might be on.
72 * If we run this too early, we'll end up panicking in any variety of
73 * places. Since we don't register the drm device until late in
74 * msm_drm_init, drm_dev->registered is used as an indicator that the
75 * shutdown will be successful.
77 if (ddev
->registered
) {
78 drm_dev_unregister(ddev
);
80 drm_atomic_helper_shutdown(ddev
);
83 /* We must cancel and cleanup any pending vblank enable/disable
84 * work before msm_irq_uninstall() to avoid work re-enabling an
85 * irq after uninstall has disabled it.
88 flush_workqueue(priv
->wq
);
90 msm_gem_shrinker_cleanup(ddev
);
92 msm_perf_debugfs_cleanup(priv
);
93 msm_rd_debugfs_cleanup(priv
);
96 msm_drm_kms_uninit(dev
);
98 msm_deinit_vram(ddev
);
100 component_unbind_all(dev
, ddev
);
102 ddev
->dev_private
= NULL
;
105 destroy_workqueue(priv
->wq
);
110 bool msm_use_mmu(struct drm_device
*dev
)
112 struct msm_drm_private
*priv
= dev
->dev_private
;
115 * a2xx comes with its own MMU
116 * On other platforms IOMMU can be declared specified either for the
117 * MDP/DPU device or for its parent, MDSS device.
119 return priv
->is_a2xx
||
120 device_iommu_mapped(dev
->dev
) ||
121 device_iommu_mapped(dev
->dev
->parent
);
124 static int msm_init_vram(struct drm_device
*dev
)
126 struct msm_drm_private
*priv
= dev
->dev_private
;
127 struct device_node
*node
;
128 unsigned long size
= 0;
131 /* In the device-tree world, we could have a 'memory-region'
132 * phandle, which gives us a link to our "vram". Allocating
133 * is all nicely abstracted behind the dma api, but we need
134 * to know the entire size to allocate it all in one go. There
136 * 1) device with no IOMMU, in which case we need exclusive
137 * access to a VRAM carveout big enough for all gpu
139 * 2) device with IOMMU, but where the bootloader puts up
140 * a splash screen. In this case, the VRAM carveout
141 * need only be large enough for fbdev fb. But we need
142 * exclusive access to the buffer to avoid the kernel
143 * using those pages for other purposes (which appears
144 * as corruption on screen before we have a chance to
145 * load and do initial modeset)
148 node
= of_parse_phandle(dev
->dev
->of_node
, "memory-region", 0);
151 ret
= of_address_to_resource(node
, 0, &r
);
155 size
= r
.end
- r
.start
+ 1;
156 DRM_INFO("using VRAM carveout: %lx@%pa\n", size
, &r
.start
);
158 /* if we have no IOMMU, then we need to use carveout allocator.
159 * Grab the entire DMA chunk carved out in early startup in
162 } else if (!msm_use_mmu(dev
)) {
163 DRM_INFO("using %s VRAM carveout\n", vram
);
164 size
= memparse(vram
, NULL
);
168 unsigned long attrs
= 0;
171 priv
->vram
.size
= size
;
173 drm_mm_init(&priv
->vram
.mm
, 0, (size
>> PAGE_SHIFT
) - 1);
174 spin_lock_init(&priv
->vram
.lock
);
176 attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
177 attrs
|= DMA_ATTR_WRITE_COMBINE
;
179 /* note that for no-kernel-mapping, the vaddr returned
180 * is bogus, but non-null if allocation succeeded:
182 p
= dma_alloc_attrs(dev
->dev
, size
,
183 &priv
->vram
.paddr
, GFP_KERNEL
, attrs
);
185 DRM_DEV_ERROR(dev
->dev
, "failed to allocate VRAM\n");
186 priv
->vram
.paddr
= 0;
190 DRM_DEV_INFO(dev
->dev
, "VRAM: %08x->%08x\n",
191 (uint32_t)priv
->vram
.paddr
,
192 (uint32_t)(priv
->vram
.paddr
+ size
));
198 static void msm_deinit_vram(struct drm_device
*ddev
)
200 struct msm_drm_private
*priv
= ddev
->dev_private
;
201 unsigned long attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
203 if (!priv
->vram
.paddr
)
206 drm_mm_takedown(&priv
->vram
.mm
);
207 dma_free_attrs(ddev
->dev
, priv
->vram
.size
, NULL
, priv
->vram
.paddr
,
211 static int msm_drm_init(struct device
*dev
, const struct drm_driver
*drv
)
213 struct msm_drm_private
*priv
= dev_get_drvdata(dev
);
214 struct drm_device
*ddev
;
217 if (drm_firmware_drivers_only())
220 ddev
= drm_dev_alloc(drv
, dev
);
222 DRM_DEV_ERROR(dev
, "failed to allocate drm_device\n");
223 return PTR_ERR(ddev
);
225 ddev
->dev_private
= priv
;
228 priv
->wq
= alloc_ordered_workqueue("msm", 0);
234 INIT_LIST_HEAD(&priv
->objects
);
235 mutex_init(&priv
->obj_lock
);
238 * Initialize the LRUs:
240 mutex_init(&priv
->lru
.lock
);
241 drm_gem_lru_init(&priv
->lru
.unbacked
, &priv
->lru
.lock
);
242 drm_gem_lru_init(&priv
->lru
.pinned
, &priv
->lru
.lock
);
243 drm_gem_lru_init(&priv
->lru
.willneed
, &priv
->lru
.lock
);
244 drm_gem_lru_init(&priv
->lru
.dontneed
, &priv
->lru
.lock
);
246 /* Teach lockdep about lock ordering wrt. shrinker: */
247 fs_reclaim_acquire(GFP_KERNEL
);
248 might_lock(&priv
->lru
.lock
);
249 fs_reclaim_release(GFP_KERNEL
);
251 if (priv
->kms_init
) {
252 ret
= drmm_mode_config_init(ddev
);
257 ret
= msm_init_vram(ddev
);
261 dma_set_max_seg_size(dev
, UINT_MAX
);
263 /* Bind all our sub-components: */
264 ret
= component_bind_all(dev
, ddev
);
266 goto err_deinit_vram
;
268 ret
= msm_gem_shrinker_init(ddev
);
272 if (priv
->kms_init
) {
273 ret
= msm_drm_kms_init(dev
, drv
);
277 /* valid only for the dummy headless case, where of_node=NULL */
278 WARN_ON(dev
->of_node
);
279 ddev
->driver_features
&= ~DRIVER_MODESET
;
280 ddev
->driver_features
&= ~DRIVER_ATOMIC
;
283 ret
= drm_dev_register(ddev
, 0);
287 ret
= msm_debugfs_late_init(ddev
);
291 drm_kms_helper_poll_init(ddev
);
293 if (priv
->kms_init
) {
294 drm_kms_helper_poll_init(ddev
);
295 msm_fbdev_setup(ddev
);
306 msm_deinit_vram(ddev
);
308 destroy_workqueue(priv
->wq
);
319 static void load_gpu(struct drm_device
*dev
)
321 static DEFINE_MUTEX(init_lock
);
322 struct msm_drm_private
*priv
= dev
->dev_private
;
324 mutex_lock(&init_lock
);
327 priv
->gpu
= adreno_load_gpu(dev
);
329 mutex_unlock(&init_lock
);
332 static int context_init(struct drm_device
*dev
, struct drm_file
*file
)
334 static atomic_t ident
= ATOMIC_INIT(0);
335 struct msm_drm_private
*priv
= dev
->dev_private
;
336 struct msm_file_private
*ctx
;
338 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
342 INIT_LIST_HEAD(&ctx
->submitqueues
);
343 rwlock_init(&ctx
->queuelock
);
345 kref_init(&ctx
->ref
);
346 msm_submitqueue_init(dev
, ctx
);
348 ctx
->aspace
= msm_gpu_create_private_address_space(priv
->gpu
, current
);
349 file
->driver_priv
= ctx
;
351 ctx
->seqno
= atomic_inc_return(&ident
);
356 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
358 /* For now, load gpu on open.. to avoid the requirement of having
359 * firmware in the initrd.
363 return context_init(dev
, file
);
366 static void context_close(struct msm_file_private
*ctx
)
368 msm_submitqueue_close(ctx
);
369 msm_file_private_put(ctx
);
372 static void msm_postclose(struct drm_device
*dev
, struct drm_file
*file
)
374 struct msm_drm_private
*priv
= dev
->dev_private
;
375 struct msm_file_private
*ctx
= file
->driver_priv
;
378 * It is not possible to set sysprof param to non-zero if gpu
379 * is not initialized:
382 msm_file_private_set_sysprof(ctx
, priv
->gpu
, 0);
391 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
392 struct drm_file
*file
)
394 struct msm_drm_private
*priv
= dev
->dev_private
;
395 struct drm_msm_param
*args
= data
;
398 /* for now, we just have 3d pipe.. eventually this would need to
399 * be more clever to dispatch to appropriate gpu module:
401 if ((args
->pipe
!= MSM_PIPE_3D0
) || (args
->pad
!= 0))
409 return gpu
->funcs
->get_param(gpu
, file
->driver_priv
,
410 args
->param
, &args
->value
, &args
->len
);
413 static int msm_ioctl_set_param(struct drm_device
*dev
, void *data
,
414 struct drm_file
*file
)
416 struct msm_drm_private
*priv
= dev
->dev_private
;
417 struct drm_msm_param
*args
= data
;
420 if ((args
->pipe
!= MSM_PIPE_3D0
) || (args
->pad
!= 0))
428 return gpu
->funcs
->set_param(gpu
, file
->driver_priv
,
429 args
->param
, args
->value
, args
->len
);
432 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
433 struct drm_file
*file
)
435 struct drm_msm_gem_new
*args
= data
;
436 uint32_t flags
= args
->flags
;
438 if (args
->flags
& ~MSM_BO_FLAGS
) {
439 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
444 * Uncached CPU mappings are deprecated, as of:
446 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
448 * So promote them to WC.
450 if (flags
& MSM_BO_UNCACHED
) {
451 flags
&= ~MSM_BO_CACHED
;
455 if (should_fail(&fail_gem_alloc
, args
->size
))
458 return msm_gem_new_handle(dev
, file
, args
->size
,
459 args
->flags
, &args
->handle
, NULL
);
462 static inline ktime_t
to_ktime(struct drm_msm_timespec timeout
)
464 return ktime_set(timeout
.tv_sec
, timeout
.tv_nsec
);
467 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
468 struct drm_file
*file
)
470 struct drm_msm_gem_cpu_prep
*args
= data
;
471 struct drm_gem_object
*obj
;
472 ktime_t timeout
= to_ktime(args
->timeout
);
475 if (args
->op
& ~MSM_PREP_FLAGS
) {
476 DRM_ERROR("invalid op: %08x\n", args
->op
);
480 obj
= drm_gem_object_lookup(file
, args
->handle
);
484 ret
= msm_gem_cpu_prep(obj
, args
->op
, &timeout
);
486 drm_gem_object_put(obj
);
491 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
492 struct drm_file
*file
)
494 struct drm_msm_gem_cpu_fini
*args
= data
;
495 struct drm_gem_object
*obj
;
498 obj
= drm_gem_object_lookup(file
, args
->handle
);
502 ret
= msm_gem_cpu_fini(obj
);
504 drm_gem_object_put(obj
);
509 static int msm_ioctl_gem_info_iova(struct drm_device
*dev
,
510 struct drm_file
*file
, struct drm_gem_object
*obj
,
513 struct msm_drm_private
*priv
= dev
->dev_private
;
514 struct msm_file_private
*ctx
= file
->driver_priv
;
519 if (should_fail(&fail_gem_iova
, obj
->size
))
523 * Don't pin the memory here - just get an address so that userspace can
526 return msm_gem_get_iova(obj
, ctx
->aspace
, iova
);
529 static int msm_ioctl_gem_info_set_iova(struct drm_device
*dev
,
530 struct drm_file
*file
, struct drm_gem_object
*obj
,
533 struct msm_drm_private
*priv
= dev
->dev_private
;
534 struct msm_file_private
*ctx
= file
->driver_priv
;
539 /* Only supported if per-process address space is supported: */
540 if (priv
->gpu
->aspace
== ctx
->aspace
)
543 if (should_fail(&fail_gem_iova
, obj
->size
))
546 return msm_gem_set_iova(obj
, ctx
->aspace
, iova
);
549 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
550 struct drm_file
*file
)
552 struct drm_msm_gem_info
*args
= data
;
553 struct drm_gem_object
*obj
;
554 struct msm_gem_object
*msm_obj
;
560 switch (args
->info
) {
561 case MSM_INFO_GET_OFFSET
:
562 case MSM_INFO_GET_IOVA
:
563 case MSM_INFO_SET_IOVA
:
564 case MSM_INFO_GET_FLAGS
:
565 /* value returned as immediate, not pointer, so len==0: */
569 case MSM_INFO_SET_NAME
:
570 case MSM_INFO_GET_NAME
:
576 obj
= drm_gem_object_lookup(file
, args
->handle
);
580 msm_obj
= to_msm_bo(obj
);
582 switch (args
->info
) {
583 case MSM_INFO_GET_OFFSET
:
584 args
->value
= msm_gem_mmap_offset(obj
);
586 case MSM_INFO_GET_IOVA
:
587 ret
= msm_ioctl_gem_info_iova(dev
, file
, obj
, &args
->value
);
589 case MSM_INFO_SET_IOVA
:
590 ret
= msm_ioctl_gem_info_set_iova(dev
, file
, obj
, args
->value
);
592 case MSM_INFO_GET_FLAGS
:
593 if (obj
->import_attach
) {
597 /* Hide internal kernel-only flags: */
598 args
->value
= to_msm_bo(obj
)->flags
& MSM_BO_FLAGS
;
601 case MSM_INFO_SET_NAME
:
602 /* length check should leave room for terminating null: */
603 if (args
->len
>= sizeof(msm_obj
->name
)) {
607 if (copy_from_user(msm_obj
->name
, u64_to_user_ptr(args
->value
),
609 msm_obj
->name
[0] = '\0';
613 msm_obj
->name
[args
->len
] = '\0';
614 for (i
= 0; i
< args
->len
; i
++) {
615 if (!isprint(msm_obj
->name
[i
])) {
616 msm_obj
->name
[i
] = '\0';
621 case MSM_INFO_GET_NAME
:
622 if (args
->value
&& (args
->len
< strlen(msm_obj
->name
))) {
626 args
->len
= strlen(msm_obj
->name
);
628 if (copy_to_user(u64_to_user_ptr(args
->value
),
629 msm_obj
->name
, args
->len
))
635 drm_gem_object_put(obj
);
640 static int wait_fence(struct msm_gpu_submitqueue
*queue
, uint32_t fence_id
,
641 ktime_t timeout
, uint32_t flags
)
643 struct dma_fence
*fence
;
646 if (fence_after(fence_id
, queue
->last_fence
)) {
647 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
648 fence_id
, queue
->last_fence
);
653 * Map submitqueue scoped "seqno" (which is actually an idr key)
654 * back to underlying dma-fence
656 * The fence is removed from the fence_idr when the submit is
657 * retired, so if the fence is not found it means there is nothing
660 spin_lock(&queue
->idr_lock
);
661 fence
= idr_find(&queue
->fence_idr
, fence_id
);
663 fence
= dma_fence_get_rcu(fence
);
664 spin_unlock(&queue
->idr_lock
);
669 if (flags
& MSM_WAIT_FENCE_BOOST
)
670 dma_fence_set_deadline(fence
, ktime_get());
672 ret
= dma_fence_wait_timeout(fence
, true, timeout_to_jiffies(&timeout
));
675 } else if (ret
!= -ERESTARTSYS
) {
679 dma_fence_put(fence
);
684 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
685 struct drm_file
*file
)
687 struct msm_drm_private
*priv
= dev
->dev_private
;
688 struct drm_msm_wait_fence
*args
= data
;
689 struct msm_gpu_submitqueue
*queue
;
692 if (args
->flags
& ~MSM_WAIT_FENCE_FLAGS
) {
693 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
700 queue
= msm_submitqueue_get(file
->driver_priv
, args
->queueid
);
704 ret
= wait_fence(queue
, args
->fence
, to_ktime(args
->timeout
), args
->flags
);
706 msm_submitqueue_put(queue
);
711 static int msm_ioctl_gem_madvise(struct drm_device
*dev
, void *data
,
712 struct drm_file
*file
)
714 struct drm_msm_gem_madvise
*args
= data
;
715 struct drm_gem_object
*obj
;
718 switch (args
->madv
) {
719 case MSM_MADV_DONTNEED
:
720 case MSM_MADV_WILLNEED
:
726 obj
= drm_gem_object_lookup(file
, args
->handle
);
731 ret
= msm_gem_madvise(obj
, args
->madv
);
733 args
->retained
= ret
;
737 drm_gem_object_put(obj
);
743 static int msm_ioctl_submitqueue_new(struct drm_device
*dev
, void *data
,
744 struct drm_file
*file
)
746 struct drm_msm_submitqueue
*args
= data
;
748 if (args
->flags
& ~MSM_SUBMITQUEUE_FLAGS
)
751 return msm_submitqueue_create(dev
, file
->driver_priv
, args
->prio
,
752 args
->flags
, &args
->id
);
755 static int msm_ioctl_submitqueue_query(struct drm_device
*dev
, void *data
,
756 struct drm_file
*file
)
758 return msm_submitqueue_query(dev
, file
->driver_priv
, data
);
761 static int msm_ioctl_submitqueue_close(struct drm_device
*dev
, void *data
,
762 struct drm_file
*file
)
764 u32 id
= *(u32
*) data
;
766 return msm_submitqueue_remove(file
->driver_priv
, id
);
769 static const struct drm_ioctl_desc msm_ioctls
[] = {
770 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_RENDER_ALLOW
),
771 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM
, msm_ioctl_set_param
, DRM_RENDER_ALLOW
),
772 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_RENDER_ALLOW
),
773 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_RENDER_ALLOW
),
774 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_RENDER_ALLOW
),
775 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_RENDER_ALLOW
),
776 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_RENDER_ALLOW
),
777 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_RENDER_ALLOW
),
778 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE
, msm_ioctl_gem_madvise
, DRM_RENDER_ALLOW
),
779 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW
, msm_ioctl_submitqueue_new
, DRM_RENDER_ALLOW
),
780 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE
, msm_ioctl_submitqueue_close
, DRM_RENDER_ALLOW
),
781 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY
, msm_ioctl_submitqueue_query
, DRM_RENDER_ALLOW
),
784 static void msm_show_fdinfo(struct drm_printer
*p
, struct drm_file
*file
)
786 struct drm_device
*dev
= file
->minor
->dev
;
787 struct msm_drm_private
*priv
= dev
->dev_private
;
792 msm_gpu_show_fdinfo(priv
->gpu
, file
->driver_priv
, p
);
794 drm_show_memory_stats(p
, file
);
797 static const struct file_operations fops
= {
798 .owner
= THIS_MODULE
,
800 .show_fdinfo
= drm_show_fdinfo
,
803 static const struct drm_driver msm_driver
= {
804 .driver_features
= DRIVER_GEM
|
810 .postclose
= msm_postclose
,
811 .dumb_create
= msm_gem_dumb_create
,
812 .dumb_map_offset
= msm_gem_dumb_map_offset
,
813 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
814 #ifdef CONFIG_DEBUG_FS
815 .debugfs_init
= msm_debugfs_init
,
817 .show_fdinfo
= msm_show_fdinfo
,
818 .ioctls
= msm_ioctls
,
819 .num_ioctls
= ARRAY_SIZE(msm_ioctls
),
822 .desc
= "MSM Snapdragon DRM",
824 .major
= MSM_VERSION_MAJOR
,
825 .minor
= MSM_VERSION_MINOR
,
826 .patchlevel
= MSM_VERSION_PATCHLEVEL
,
830 * Componentized driver support:
834 * Identify what components need to be added by parsing what remote-endpoints
835 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
836 * is no external component that we need to add since LVDS is within MDP4
839 static int add_components_mdp(struct device
*master_dev
,
840 struct component_match
**matchptr
)
842 struct device_node
*np
= master_dev
->of_node
;
843 struct device_node
*ep_node
;
845 for_each_endpoint_of_node(np
, ep_node
) {
846 struct device_node
*intf
;
847 struct of_endpoint ep
;
850 ret
= of_graph_parse_endpoint(ep_node
, &ep
);
852 DRM_DEV_ERROR(master_dev
, "unable to parse port endpoint\n");
853 of_node_put(ep_node
);
858 * The LCDC/LVDS port on MDP4 is a speacial case where the
859 * remote-endpoint isn't a component that we need to add
861 if (of_device_is_compatible(np
, "qcom,mdp4") &&
866 * It's okay if some of the ports don't have a remote endpoint
867 * specified. It just means that the port isn't connected to
868 * any external interface.
870 intf
= of_graph_get_remote_port_parent(ep_node
);
874 if (of_device_is_available(intf
))
875 drm_of_component_match_add(master_dev
, matchptr
,
876 component_compare_of
, intf
);
885 * We don't know what's the best binding to link the gpu with the drm device.
886 * Fow now, we just hunt for all the possible gpus that we support, and add them
889 static const struct of_device_id msm_gpu_match
[] = {
890 { .compatible
= "qcom,adreno" },
891 { .compatible
= "qcom,adreno-3xx" },
892 { .compatible
= "amd,imageon" },
893 { .compatible
= "qcom,kgsl-3d0" },
897 static int add_gpu_components(struct device
*dev
,
898 struct component_match
**matchptr
)
900 struct device_node
*np
;
902 np
= of_find_matching_node(NULL
, msm_gpu_match
);
906 if (of_device_is_available(np
))
907 drm_of_component_match_add(dev
, matchptr
, component_compare_of
, np
);
914 static int msm_drm_bind(struct device
*dev
)
916 return msm_drm_init(dev
, &msm_driver
);
919 static void msm_drm_unbind(struct device
*dev
)
924 const struct component_master_ops msm_drm_ops
= {
925 .bind
= msm_drm_bind
,
926 .unbind
= msm_drm_unbind
,
929 int msm_drv_probe(struct device
*master_dev
,
930 int (*kms_init
)(struct drm_device
*dev
),
933 struct msm_drm_private
*priv
;
934 struct component_match
*match
= NULL
;
937 priv
= devm_kzalloc(master_dev
, sizeof(*priv
), GFP_KERNEL
);
942 priv
->kms_init
= kms_init
;
943 dev_set_drvdata(master_dev
, priv
);
945 /* Add mdp components if we have KMS. */
947 ret
= add_components_mdp(master_dev
, &match
);
952 ret
= add_gpu_components(master_dev
, &match
);
956 /* on all devices that I am aware of, iommu's which can map
957 * any address the cpu can see are used:
959 ret
= dma_set_mask_and_coherent(master_dev
, ~0);
963 ret
= component_master_add_with_match(master_dev
, &msm_drm_ops
, match
);
972 * Used only for headlesss GPU instances
975 static int msm_pdev_probe(struct platform_device
*pdev
)
977 return msm_drv_probe(&pdev
->dev
, NULL
, NULL
);
980 static void msm_pdev_remove(struct platform_device
*pdev
)
982 component_master_del(&pdev
->dev
, &msm_drm_ops
);
985 static struct platform_driver msm_platform_driver
= {
986 .probe
= msm_pdev_probe
,
987 .remove_new
= msm_pdev_remove
,
993 static int __init
msm_drm_register(void)
1002 msm_hdmi_register();
1005 msm_mdp4_register();
1006 msm_mdss_register();
1007 return platform_driver_register(&msm_platform_driver
);
1010 static void __exit
msm_drm_unregister(void)
1013 platform_driver_unregister(&msm_platform_driver
);
1014 msm_mdss_unregister();
1015 msm_mdp4_unregister();
1016 msm_dp_unregister();
1017 msm_hdmi_unregister();
1018 adreno_unregister();
1019 msm_dsi_unregister();
1020 msm_mdp_unregister();
1021 msm_dpu_unregister();
1024 module_init(msm_drm_register
);
1025 module_exit(msm_drm_unregister
);
1027 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1028 MODULE_DESCRIPTION("MSM DRM Driver");
1029 MODULE_LICENSE("GPL");