]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/msm/msm_drv.c
Merge tag 'loongarch-kvm-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhu...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / msm / msm_drv.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
c8afe684 2/*
98659487 3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
c8afe684
RC
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
c8afe684
RC
6 */
7
feea39a8 8#include <linux/dma-mapping.h>
6d29709d 9#include <linux/fault-inject.h>
648cb683 10#include <linux/of_address.h>
feea39a8 11#include <linux/uaccess.h>
feea39a8
SR
12
13#include <drm/drm_drv.h>
14#include <drm/drm_file.h>
15#include <drm/drm_ioctl.h>
97ac0e47
RK
16#include <drm/drm_of.h>
17
c8afe684 18#include "msm_drv.h"
edcd60ce 19#include "msm_debugfs.h"
dd2da6e3 20#include "msm_kms.h"
c2052a4e 21#include "adreno/adreno_gpu.h"
c8afe684 22
a8d854c1
RC
23/*
24 * MSM driver version:
25 * - 1.0.0 - initial interface
26 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
7a3bcc0a 27 * - 1.2.0 - adds explicit fence support for submit ioctl
f7de1545
JC
28 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
29 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
30 * MSM_GEM_INFO ioctl.
1fed8df3
RC
31 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
32 * GEM object's debug name
b0fb6604 33 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
ab723b7a 34 * - 1.6.0 - Syncobj support
3ab1c5cc 35 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
d12e3390 36 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
17154add 37 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
b1bf64f8 38 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
b5a24e13 39 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
a8d854c1
RC
40 */
41#define MSM_VERSION_MAJOR 1
b1bf64f8 42#define MSM_VERSION_MINOR 10
a8d854c1
RC
43#define MSM_VERSION_PATCHLEVEL 0
44
60d476af
JH
45static void msm_deinit_vram(struct drm_device *ddev);
46
3a10ba8c 47static char *vram = "16m";
4313c744 48MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
871d812a
RC
49module_param(vram, charp, 0);
50
5369f3c5 51bool dumpstate;
06d9f56f
RC
52MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
53module_param(dumpstate, bool, 0600);
54
ba4dd718
RC
55static bool modeset = true;
56MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
57module_param(modeset, bool, 0600);
58
6d29709d
RC
59#ifdef CONFIG_FAULT_INJECTION
60DECLARE_FAULT_ATTR(fail_gem_alloc);
61DECLARE_FAULT_ATTR(fail_gem_iova);
62#endif
63
2b669875 64static int msm_drm_uninit(struct device *dev)
c8afe684 65{
2b669875 66 struct platform_device *pdev = to_platform_device(dev);
ec919e6e
ADR
67 struct msm_drm_private *priv = platform_get_drvdata(pdev);
68 struct drm_device *ddev = priv->dev;
78b1d470 69
2aa31767
SP
70 /*
71 * Shutdown the hw if we're far enough along where things might be on.
72 * If we run this too early, we'll end up panicking in any variety of
73 * places. Since we don't register the drm device until late in
74 * msm_drm_init, drm_dev->registered is used as an indicator that the
75 * shutdown will be successful.
76 */
77 if (ddev->registered) {
78 drm_dev_unregister(ddev);
506efcba
DB
79 if (priv->kms)
80 drm_atomic_helper_shutdown(ddev);
2aa31767
SP
81 }
82
78b1d470 83 /* We must cancel and cleanup any pending vblank enable/disable
f026e431 84 * work before msm_irq_uninstall() to avoid work re-enabling an
78b1d470
HL
85 * irq after uninstall has disabled it.
86 */
c8afe684 87
48d1d28e 88 flush_workqueue(priv->wq);
25fdd593 89
68209390
RC
90 msm_gem_shrinker_cleanup(ddev);
91
85eac470
NT
92 msm_perf_debugfs_cleanup(priv);
93 msm_rd_debugfs_cleanup(priv);
94
506efcba
DB
95 if (priv->kms)
96 msm_drm_kms_uninit(dev);
c8afe684 97
60d476af 98 msm_deinit_vram(ddev);
871d812a 99
2b669875 100 component_unbind_all(dev, ddev);
060530f1 101
2b669875 102 ddev->dev_private = NULL;
652eadfd
JH
103 drm_dev_put(ddev);
104
2aa31767 105 destroy_workqueue(priv->wq);
c8afe684
RC
106
107 return 0;
108}
109
c2052a4e
JM
110bool msm_use_mmu(struct drm_device *dev)
111{
112 struct msm_drm_private *priv = dev->dev_private;
113
8cb72adb
DB
114 /*
115 * a2xx comes with its own MMU
116 * On other platforms IOMMU can be declared specified either for the
117 * MDP/DPU device or for its parent, MDSS device.
118 */
119 return priv->is_a2xx ||
120 device_iommu_mapped(dev->dev) ||
121 device_iommu_mapped(dev->dev->parent);
c2052a4e
JM
122}
123
5bf9c0b6 124static int msm_init_vram(struct drm_device *dev)
c8afe684 125{
5bf9c0b6 126 struct msm_drm_private *priv = dev->dev_private;
e9fbdaf2 127 struct device_node *node;
072f1f91
RC
128 unsigned long size = 0;
129 int ret = 0;
130
072f1f91
RC
131 /* In the device-tree world, we could have a 'memory-region'
132 * phandle, which gives us a link to our "vram". Allocating
133 * is all nicely abstracted behind the dma api, but we need
134 * to know the entire size to allocate it all in one go. There
135 * are two cases:
136 * 1) device with no IOMMU, in which case we need exclusive
137 * access to a VRAM carveout big enough for all gpu
138 * buffers
139 * 2) device with IOMMU, but where the bootloader puts up
140 * a splash screen. In this case, the VRAM carveout
141 * need only be large enough for fbdev fb. But we need
142 * exclusive access to the buffer to avoid the kernel
143 * using those pages for other purposes (which appears
144 * as corruption on screen before we have a chance to
145 * load and do initial modeset)
146 */
072f1f91
RC
147
148 node = of_parse_phandle(dev->dev->of_node, "memory-region", 0);
149 if (node) {
150 struct resource r;
151 ret = of_address_to_resource(node, 0, &r);
2ca41c17 152 of_node_put(node);
072f1f91
RC
153 if (ret)
154 return ret;
0a727b45 155 size = r.end - r.start + 1;
fc99f97a 156 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
c8afe684 157
e9fbdaf2 158 /* if we have no IOMMU, then we need to use carveout allocator.
4a83c26a 159 * Grab the entire DMA chunk carved out in early startup in
e9fbdaf2
AT
160 * mach-msm:
161 */
c2052a4e 162 } else if (!msm_use_mmu(dev)) {
072f1f91
RC
163 DRM_INFO("using %s VRAM carveout\n", vram);
164 size = memparse(vram, NULL);
165 }
166
167 if (size) {
00085f1e 168 unsigned long attrs = 0;
871d812a
RC
169 void *p;
170
871d812a
RC
171 priv->vram.size = size;
172
173 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
0e08270a 174 spin_lock_init(&priv->vram.lock);
871d812a 175
00085f1e
KK
176 attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
177 attrs |= DMA_ATTR_WRITE_COMBINE;
871d812a
RC
178
179 /* note that for no-kernel-mapping, the vaddr returned
180 * is bogus, but non-null if allocation succeeded:
181 */
182 p = dma_alloc_attrs(dev->dev, size,
00085f1e 183 &priv->vram.paddr, GFP_KERNEL, attrs);
871d812a 184 if (!p) {
6a41da17 185 DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
871d812a 186 priv->vram.paddr = 0;
5bf9c0b6 187 return -ENOMEM;
871d812a
RC
188 }
189
6a41da17 190 DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
871d812a
RC
191 (uint32_t)priv->vram.paddr,
192 (uint32_t)(priv->vram.paddr + size));
193 }
194
072f1f91 195 return ret;
5bf9c0b6
RC
196}
197
60d476af
JH
198static void msm_deinit_vram(struct drm_device *ddev)
199{
200 struct msm_drm_private *priv = ddev->dev_private;
201 unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
202
203 if (!priv->vram.paddr)
204 return;
205
206 drm_mm_takedown(&priv->vram.mm);
207 dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr,
208 attrs);
209}
210
70a59dd8 211static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
5bf9c0b6 212{
ec919e6e 213 struct msm_drm_private *priv = dev_get_drvdata(dev);
2b669875 214 struct drm_device *ddev;
274f1614 215 int ret;
5bf9c0b6 216
5d40a4b8
JMC
217 if (drm_firmware_drivers_only())
218 return -ENODEV;
219
2b669875 220 ddev = drm_dev_alloc(drv, dev);
0f288605 221 if (IS_ERR(ddev)) {
6a41da17 222 DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
0f288605 223 return PTR_ERR(ddev);
2b669875 224 }
2b669875 225 ddev->dev_private = priv;
68209390 226 priv->dev = ddev;
5bf9c0b6
RC
227
228 priv->wq = alloc_ordered_workqueue("msm", 0);
ca090c83
JH
229 if (!priv->wq) {
230 ret = -ENOMEM;
231 goto err_put_dev;
232 }
5bf9c0b6 233
6ed0897c
RC
234 INIT_LIST_HEAD(&priv->objects);
235 mutex_init(&priv->obj_lock);
236
b352ba54
RC
237 /*
238 * Initialize the LRUs:
239 */
240 mutex_init(&priv->lru.lock);
241 drm_gem_lru_init(&priv->lru.unbacked, &priv->lru.lock);
242 drm_gem_lru_init(&priv->lru.pinned, &priv->lru.lock);
243 drm_gem_lru_init(&priv->lru.willneed, &priv->lru.lock);
244 drm_gem_lru_init(&priv->lru.dontneed, &priv->lru.lock);
48e7f183 245
d984457b
RC
246 /* Teach lockdep about lock ordering wrt. shrinker: */
247 fs_reclaim_acquire(GFP_KERNEL);
b352ba54 248 might_lock(&priv->lru.lock);
d984457b 249 fs_reclaim_release(GFP_KERNEL);
5bf9c0b6 250
506efcba
DB
251 if (priv->kms_init) {
252 ret = drmm_mode_config_init(ddev);
253 if (ret)
254 goto err_destroy_wq;
255 }
060530f1 256
d863f0c7 257 ret = msm_init_vram(ddev);
77050c3f 258 if (ret)
eedba1b3 259 goto err_destroy_wq;
060530f1 260
173d4272
RC
261 dma_set_max_seg_size(dev, UINT_MAX);
262
d863f0c7
CT
263 /* Bind all our sub-components: */
264 ret = component_bind_all(dev, ddev);
13f15565 265 if (ret)
60d476af 266 goto err_deinit_vram;
13f15565 267
cd61a76c
QZ
268 ret = msm_gem_shrinker_init(ddev);
269 if (ret)
270 goto err_msm_uninit;
68209390 271
5d44531b 272 if (priv->kms_init) {
506efcba
DB
273 ret = msm_drm_kms_init(dev, drv);
274 if (ret)
5d44531b 275 goto err_msm_uninit;
5d44531b 276 } else {
e6f6d63e
JM
277 /* valid only for the dummy headless case, where of_node=NULL */
278 WARN_ON(dev->of_node);
506efcba
DB
279 ddev->driver_features &= ~DRIVER_MODESET;
280 ddev->driver_features &= ~DRIVER_ATOMIC;
c8afe684
RC
281 }
282
2b669875
AT
283 ret = drm_dev_register(ddev, 0);
284 if (ret)
77050c3f 285 goto err_msm_uninit;
2b669875 286
2b669875 287 ret = msm_debugfs_late_init(ddev);
a7d3c950 288 if (ret)
77050c3f 289 goto err_msm_uninit;
a7d3c950 290
506efcba
DB
291 if (priv->kms_init) {
292 drm_kms_helper_poll_init(ddev);
940b869c 293 msm_fbdev_setup(ddev);
506efcba 294 }
940b869c 295
c8afe684
RC
296 return 0;
297
77050c3f 298err_msm_uninit:
2b669875 299 msm_drm_uninit(dev);
214b09db
JH
300
301 return ret;
302
60d476af
JH
303err_deinit_vram:
304 msm_deinit_vram(ddev);
eedba1b3 305err_destroy_wq:
a75b49db 306 destroy_workqueue(priv->wq);
214b09db 307err_put_dev:
86365003 308 drm_dev_put(ddev);
214b09db 309
c8afe684
RC
310 return ret;
311}
312
2b669875
AT
313/*
314 * DRM operations:
315 */
316
7198e6b0
RC
317static void load_gpu(struct drm_device *dev)
318{
a1ad3523 319 static DEFINE_MUTEX(init_lock);
7198e6b0 320 struct msm_drm_private *priv = dev->dev_private;
7198e6b0 321
a1ad3523
RC
322 mutex_lock(&init_lock);
323
e2550b7a
RC
324 if (!priv->gpu)
325 priv->gpu = adreno_load_gpu(dev);
7198e6b0 326
a1ad3523 327 mutex_unlock(&init_lock);
7198e6b0
RC
328}
329
f97decac 330static int context_init(struct drm_device *dev, struct drm_file *file)
7198e6b0 331{
14eb0cb4 332 static atomic_t ident = ATOMIC_INIT(0);
295b22ae 333 struct msm_drm_private *priv = dev->dev_private;
7198e6b0
RC
334 struct msm_file_private *ctx;
335
7198e6b0
RC
336 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
337 if (!ctx)
338 return -ENOMEM;
339
654e9c18
RC
340 INIT_LIST_HEAD(&ctx->submitqueues);
341 rwlock_init(&ctx->queuelock);
342
cf655d61 343 kref_init(&ctx->ref);
f97decac 344 msm_submitqueue_init(dev, ctx);
f7de1545 345
25faf2f2 346 ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
7198e6b0
RC
347 file->driver_priv = ctx;
348
14eb0cb4
RC
349 ctx->seqno = atomic_inc_return(&ident);
350
7198e6b0
RC
351 return 0;
352}
353
f7de1545
JC
354static int msm_open(struct drm_device *dev, struct drm_file *file)
355{
356 /* For now, load gpu on open.. to avoid the requirement of having
357 * firmware in the initrd.
358 */
359 load_gpu(dev);
360
f97decac 361 return context_init(dev, file);
f7de1545
JC
362}
363
364static void context_close(struct msm_file_private *ctx)
365{
366 msm_submitqueue_close(ctx);
cf655d61 367 msm_file_private_put(ctx);
f7de1545
JC
368}
369
94df145c 370static void msm_postclose(struct drm_device *dev, struct drm_file *file)
c8afe684 371{
90f45c42 372 struct msm_drm_private *priv = dev->dev_private;
7198e6b0 373 struct msm_file_private *ctx = file->driver_priv;
7198e6b0 374
90f45c42
RC
375 /*
376 * It is not possible to set sysprof param to non-zero if gpu
377 * is not initialized:
378 */
379 if (priv->gpu)
380 msm_file_private_set_sysprof(ctx, priv->gpu, 0);
381
f7de1545 382 context_close(ctx);
c8afe684
RC
383}
384
7198e6b0
RC
385/*
386 * DRM ioctls:
387 */
388
389static int msm_ioctl_get_param(struct drm_device *dev, void *data,
390 struct drm_file *file)
391{
392 struct msm_drm_private *priv = dev->dev_private;
393 struct drm_msm_param *args = data;
394 struct msm_gpu *gpu;
395
396 /* for now, we just have 3d pipe.. eventually this would need to
397 * be more clever to dispatch to appropriate gpu module:
398 */
4bfba716 399 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
7198e6b0
RC
400 return -EINVAL;
401
402 gpu = priv->gpu;
403
404 if (!gpu)
405 return -ENXIO;
406
f98f915b 407 return gpu->funcs->get_param(gpu, file->driver_priv,
4bfba716 408 args->param, &args->value, &args->len);
7198e6b0
RC
409}
410
f7ddbf55
RC
411static int msm_ioctl_set_param(struct drm_device *dev, void *data,
412 struct drm_file *file)
413{
414 struct msm_drm_private *priv = dev->dev_private;
415 struct drm_msm_param *args = data;
416 struct msm_gpu *gpu;
417
4bfba716 418 if ((args->pipe != MSM_PIPE_3D0) || (args->pad != 0))
f7ddbf55
RC
419 return -EINVAL;
420
421 gpu = priv->gpu;
422
423 if (!gpu)
424 return -ENXIO;
425
426 return gpu->funcs->set_param(gpu, file->driver_priv,
4bfba716 427 args->param, args->value, args->len);
f7ddbf55
RC
428}
429
7198e6b0
RC
430static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
431 struct drm_file *file)
432{
433 struct drm_msm_gem_new *args = data;
8b5de735 434 uint32_t flags = args->flags;
93ddb0d3
RC
435
436 if (args->flags & ~MSM_BO_FLAGS) {
437 DRM_ERROR("invalid flags: %08x\n", args->flags);
438 return -EINVAL;
439 }
440
8b5de735
RC
441 /*
442 * Uncached CPU mappings are deprecated, as of:
443 *
444 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
445 *
446 * So promote them to WC.
447 */
448 if (flags & MSM_BO_UNCACHED) {
449 flags &= ~MSM_BO_CACHED;
450 flags |= MSM_BO_WC;
451 }
452
6d29709d
RC
453 if (should_fail(&fail_gem_alloc, args->size))
454 return -ENOMEM;
455
7198e6b0 456 return msm_gem_new_handle(dev, file, args->size,
0815d774 457 args->flags, &args->handle, NULL);
7198e6b0
RC
458}
459
56c2da83
RC
460static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
461{
462 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
463}
7198e6b0
RC
464
465static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
466 struct drm_file *file)
467{
468 struct drm_msm_gem_cpu_prep *args = data;
469 struct drm_gem_object *obj;
56c2da83 470 ktime_t timeout = to_ktime(args->timeout);
7198e6b0
RC
471 int ret;
472
93ddb0d3
RC
473 if (args->op & ~MSM_PREP_FLAGS) {
474 DRM_ERROR("invalid op: %08x\n", args->op);
475 return -EINVAL;
476 }
477
a8ad0bd8 478 obj = drm_gem_object_lookup(file, args->handle);
7198e6b0
RC
479 if (!obj)
480 return -ENOENT;
481
56c2da83 482 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
7198e6b0 483
f7d33950 484 drm_gem_object_put(obj);
7198e6b0
RC
485
486 return ret;
487}
488
489static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
490 struct drm_file *file)
491{
492 struct drm_msm_gem_cpu_fini *args = data;
493 struct drm_gem_object *obj;
494 int ret;
495
a8ad0bd8 496 obj = drm_gem_object_lookup(file, args->handle);
7198e6b0
RC
497 if (!obj)
498 return -ENOENT;
499
500 ret = msm_gem_cpu_fini(obj);
501
f7d33950 502 drm_gem_object_put(obj);
7198e6b0
RC
503
504 return ret;
505}
506
49fd08ba 507static int msm_ioctl_gem_info_iova(struct drm_device *dev,
933415e2
JC
508 struct drm_file *file, struct drm_gem_object *obj,
509 uint64_t *iova)
49fd08ba 510{
6cefa31e 511 struct msm_drm_private *priv = dev->dev_private;
933415e2 512 struct msm_file_private *ctx = file->driver_priv;
49fd08ba 513
6cefa31e 514 if (!priv->gpu)
49fd08ba
JC
515 return -EINVAL;
516
6d29709d
RC
517 if (should_fail(&fail_gem_iova, obj->size))
518 return -ENOMEM;
519
9fe041f6
JC
520 /*
521 * Don't pin the memory here - just get an address so that userspace can
522 * be productive
523 */
933415e2 524 return msm_gem_get_iova(obj, ctx->aspace, iova);
49fd08ba
JC
525}
526
a636a0ff
RC
527static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
528 struct drm_file *file, struct drm_gem_object *obj,
529 uint64_t iova)
530{
531 struct msm_drm_private *priv = dev->dev_private;
532 struct msm_file_private *ctx = file->driver_priv;
533
534 if (!priv->gpu)
535 return -EINVAL;
536
537 /* Only supported if per-process address space is supported: */
538 if (priv->gpu->aspace == ctx->aspace)
539 return -EOPNOTSUPP;
540
6d29709d
RC
541 if (should_fail(&fail_gem_iova, obj->size))
542 return -ENOMEM;
543
a636a0ff
RC
544 return msm_gem_set_iova(obj, ctx->aspace, iova);
545}
546
7198e6b0
RC
547static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
548 struct drm_file *file)
549{
550 struct drm_msm_gem_info *args = data;
551 struct drm_gem_object *obj;
f05c83e7
RC
552 struct msm_gem_object *msm_obj;
553 int i, ret = 0;
7198e6b0 554
789d2e5a 555 if (args->pad)
7198e6b0
RC
556 return -EINVAL;
557
789d2e5a
RC
558 switch (args->info) {
559 case MSM_INFO_GET_OFFSET:
560 case MSM_INFO_GET_IOVA:
a636a0ff 561 case MSM_INFO_SET_IOVA:
90d2c87f 562 case MSM_INFO_GET_FLAGS:
789d2e5a
RC
563 /* value returned as immediate, not pointer, so len==0: */
564 if (args->len)
565 return -EINVAL;
566 break;
f05c83e7
RC
567 case MSM_INFO_SET_NAME:
568 case MSM_INFO_GET_NAME:
569 break;
789d2e5a 570 default:
7198e6b0 571 return -EINVAL;
789d2e5a 572 }
7198e6b0 573
a8ad0bd8 574 obj = drm_gem_object_lookup(file, args->handle);
7198e6b0
RC
575 if (!obj)
576 return -ENOENT;
577
f05c83e7 578 msm_obj = to_msm_bo(obj);
49fd08ba 579
789d2e5a
RC
580 switch (args->info) {
581 case MSM_INFO_GET_OFFSET:
582 args->value = msm_gem_mmap_offset(obj);
583 break;
584 case MSM_INFO_GET_IOVA:
933415e2 585 ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
789d2e5a 586 break;
a636a0ff
RC
587 case MSM_INFO_SET_IOVA:
588 ret = msm_ioctl_gem_info_set_iova(dev, file, obj, args->value);
589 break;
90d2c87f
RC
590 case MSM_INFO_GET_FLAGS:
591 if (obj->import_attach) {
592 ret = -EINVAL;
593 break;
594 }
595 /* Hide internal kernel-only flags: */
596 args->value = to_msm_bo(obj)->flags & MSM_BO_FLAGS;
597 ret = 0;
598 break;
f05c83e7
RC
599 case MSM_INFO_SET_NAME:
600 /* length check should leave room for terminating null: */
601 if (args->len >= sizeof(msm_obj->name)) {
602 ret = -EINVAL;
603 break;
604 }
7cce8e4e 605 if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value),
860433ed
JC
606 args->len)) {
607 msm_obj->name[0] = '\0';
7cce8e4e 608 ret = -EFAULT;
860433ed
JC
609 break;
610 }
f05c83e7
RC
611 msm_obj->name[args->len] = '\0';
612 for (i = 0; i < args->len; i++) {
613 if (!isprint(msm_obj->name[i])) {
614 msm_obj->name[i] = '\0';
615 break;
616 }
617 }
618 break;
619 case MSM_INFO_GET_NAME:
620 if (args->value && (args->len < strlen(msm_obj->name))) {
621 ret = -EINVAL;
622 break;
623 }
624 args->len = strlen(msm_obj->name);
625 if (args->value) {
7cce8e4e
DC
626 if (copy_to_user(u64_to_user_ptr(args->value),
627 msm_obj->name, args->len))
628 ret = -EFAULT;
f05c83e7
RC
629 }
630 break;
49fd08ba 631 }
7198e6b0 632
f7d33950 633 drm_gem_object_put(obj);
7198e6b0
RC
634
635 return ret;
636}
637
ea0006d3 638static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
b5a24e13 639 ktime_t timeout, uint32_t flags)
7198e6b0 640{
a61acbbe 641 struct dma_fence *fence;
f97decac 642 int ret;
93ddb0d3 643
5f3aee4c 644 if (fence_after(fence_id, queue->last_fence)) {
067ecab9
RC
645 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
646 fence_id, queue->last_fence);
647 return -EINVAL;
648 }
649
a61acbbe
RC
650 /*
651 * Map submitqueue scoped "seqno" (which is actually an idr key)
652 * back to underlying dma-fence
653 *
654 * The fence is removed from the fence_idr when the submit is
655 * retired, so if the fence is not found it means there is nothing
656 * to wait for
657 */
e4f020c6 658 spin_lock(&queue->idr_lock);
ea0006d3 659 fence = idr_find(&queue->fence_idr, fence_id);
a61acbbe
RC
660 if (fence)
661 fence = dma_fence_get_rcu(fence);
e4f020c6 662 spin_unlock(&queue->idr_lock);
a61acbbe
RC
663
664 if (!fence)
665 return 0;
f97decac 666
b5a24e13
RC
667 if (flags & MSM_WAIT_FENCE_BOOST)
668 dma_fence_set_deadline(fence, ktime_get());
669
a61acbbe
RC
670 ret = dma_fence_wait_timeout(fence, true, timeout_to_jiffies(&timeout));
671 if (ret == 0) {
672 ret = -ETIMEDOUT;
673 } else if (ret != -ERESTARTSYS) {
674 ret = 0;
675 }
676
677 dma_fence_put(fence);
ea0006d3
RC
678
679 return ret;
680}
681
682static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
683 struct drm_file *file)
684{
685 struct msm_drm_private *priv = dev->dev_private;
686 struct drm_msm_wait_fence *args = data;
687 struct msm_gpu_submitqueue *queue;
688 int ret;
689
b5a24e13
RC
690 if (args->flags & ~MSM_WAIT_FENCE_FLAGS) {
691 DRM_ERROR("invalid flags: %08x\n", args->flags);
ea0006d3
RC
692 return -EINVAL;
693 }
694
695 if (!priv->gpu)
696 return 0;
697
698 queue = msm_submitqueue_get(file->driver_priv, args->queueid);
699 if (!queue)
700 return -ENOENT;
701
b5a24e13 702 ret = wait_fence(queue, args->fence, to_ktime(args->timeout), args->flags);
ea0006d3 703
f97decac 704 msm_submitqueue_put(queue);
a61acbbe 705
f97decac 706 return ret;
7198e6b0
RC
707}
708
4cd33c48
RC
709static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
710 struct drm_file *file)
711{
712 struct drm_msm_gem_madvise *args = data;
713 struct drm_gem_object *obj;
714 int ret;
715
716 switch (args->madv) {
717 case MSM_MADV_DONTNEED:
718 case MSM_MADV_WILLNEED:
719 break;
720 default:
721 return -EINVAL;
722 }
723
4cd33c48
RC
724 obj = drm_gem_object_lookup(file, args->handle);
725 if (!obj) {
f92f026a 726 return -ENOENT;
4cd33c48
RC
727 }
728
729 ret = msm_gem_madvise(obj, args->madv);
730 if (ret >= 0) {
731 args->retained = ret;
732 ret = 0;
733 }
734
f92f026a 735 drm_gem_object_put(obj);
4cd33c48 736
4cd33c48
RC
737 return ret;
738}
739
f7de1545
JC
740
741static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
742 struct drm_file *file)
743{
744 struct drm_msm_submitqueue *args = data;
745
746 if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
747 return -EINVAL;
748
f97decac 749 return msm_submitqueue_create(dev, file->driver_priv, args->prio,
f7de1545
JC
750 args->flags, &args->id);
751}
752
b0fb6604
JC
753static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
754 struct drm_file *file)
755{
756 return msm_submitqueue_query(dev, file->driver_priv, data);
757}
f7de1545
JC
758
759static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
760 struct drm_file *file)
761{
762 u32 id = *(u32 *) data;
763
764 return msm_submitqueue_remove(file->driver_priv, id);
765}
766
7198e6b0 767static const struct drm_ioctl_desc msm_ioctls[] = {
34127c7a 768 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
f7ddbf55 769 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM, msm_ioctl_set_param, DRM_RENDER_ALLOW),
34127c7a
EV
770 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
771 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
772 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
773 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
774 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
775 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
776 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
777 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
778 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
779 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
7198e6b0
RC
780};
781
51d86ee5 782static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file)
cfebe3fd 783{
cfebe3fd
RC
784 struct drm_device *dev = file->minor->dev;
785 struct msm_drm_private *priv = dev->dev_private;
cfebe3fd
RC
786
787 if (!priv->gpu)
788 return;
789
51d86ee5 790 msm_gpu_show_fdinfo(priv->gpu, file->driver_priv, p);
3e9757f5
RC
791
792 drm_show_memory_stats(p, file);
cfebe3fd
RC
793}
794
795static const struct file_operations fops = {
796 .owner = THIS_MODULE,
797 DRM_GEM_FOPS,
51d86ee5 798 .show_fdinfo = drm_show_fdinfo,
cfebe3fd 799};
c8afe684 800
70a59dd8 801static const struct drm_driver msm_driver = {
5b38e747 802 .driver_features = DRIVER_GEM |
b4b15c86 803 DRIVER_RENDER |
a5436e1d 804 DRIVER_ATOMIC |
ab723b7a
BN
805 DRIVER_MODESET |
806 DRIVER_SYNCOBJ,
7198e6b0 807 .open = msm_open,
51d86ee5 808 .postclose = msm_postclose,
c8afe684
RC
809 .dumb_create = msm_gem_dumb_create,
810 .dumb_map_offset = msm_gem_dumb_map_offset,
05b84911 811 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
c8afe684
RC
812#ifdef CONFIG_DEBUG_FS
813 .debugfs_init = msm_debugfs_init,
c8afe684 814#endif
51d86ee5 815 .show_fdinfo = msm_show_fdinfo,
7198e6b0 816 .ioctls = msm_ioctls,
167b606a 817 .num_ioctls = ARRAY_SIZE(msm_ioctls),
c8afe684
RC
818 .fops = &fops,
819 .name = "msm",
820 .desc = "MSM Snapdragon DRM",
821 .date = "20130625",
a8d854c1
RC
822 .major = MSM_VERSION_MAJOR,
823 .minor = MSM_VERSION_MINOR,
824 .patchlevel = MSM_VERSION_PATCHLEVEL,
c8afe684
RC
825};
826
060530f1
RC
827/*
828 * Componentized driver support:
829 */
830
812070eb
AT
831/*
832 * Identify what components need to be added by parsing what remote-endpoints
833 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
834 * is no external component that we need to add since LVDS is within MDP4
835 * itself.
836 */
6874f48b 837static int add_components_mdp(struct device *master_dev,
812070eb
AT
838 struct component_match **matchptr)
839{
6874f48b 840 struct device_node *np = master_dev->of_node;
812070eb
AT
841 struct device_node *ep_node;
842
843 for_each_endpoint_of_node(np, ep_node) {
844 struct device_node *intf;
845 struct of_endpoint ep;
846 int ret;
847
848 ret = of_graph_parse_endpoint(ep_node, &ep);
849 if (ret) {
6874f48b 850 DRM_DEV_ERROR(master_dev, "unable to parse port endpoint\n");
812070eb
AT
851 of_node_put(ep_node);
852 return ret;
853 }
854
855 /*
856 * The LCDC/LVDS port on MDP4 is a speacial case where the
857 * remote-endpoint isn't a component that we need to add
858 */
859 if (of_device_is_compatible(np, "qcom,mdp4") &&
d8dd8052 860 ep.port == 0)
812070eb 861 continue;
812070eb
AT
862
863 /*
864 * It's okay if some of the ports don't have a remote endpoint
865 * specified. It just means that the port isn't connected to
866 * any external interface.
867 */
868 intf = of_graph_get_remote_port_parent(ep_node);
d8dd8052 869 if (!intf)
812070eb 870 continue;
812070eb 871
d1d9d0e1
DA
872 if (of_device_is_available(intf))
873 drm_of_component_match_add(master_dev, matchptr,
0a82e0a9 874 component_compare_of, intf);
d1d9d0e1 875
812070eb 876 of_node_put(intf);
812070eb
AT
877 }
878
879 return 0;
880}
881
dc3ea265
AT
882/*
883 * We don't know what's the best binding to link the gpu with the drm device.
884 * Fow now, we just hunt for all the possible gpus that we support, and add them
885 * as components.
886 */
887static const struct of_device_id msm_gpu_match[] = {
1db7afa4 888 { .compatible = "qcom,adreno" },
dc3ea265 889 { .compatible = "qcom,adreno-3xx" },
e6f6d63e 890 { .compatible = "amd,imageon" },
dc3ea265
AT
891 { .compatible = "qcom,kgsl-3d0" },
892 { },
893};
894
7d526fcf
AT
895static int add_gpu_components(struct device *dev,
896 struct component_match **matchptr)
897{
dc3ea265
AT
898 struct device_node *np;
899
900 np = of_find_matching_node(NULL, msm_gpu_match);
901 if (!np)
902 return 0;
903
9ca7ad6c 904 if (of_device_is_available(np))
0a82e0a9 905 drm_of_component_match_add(dev, matchptr, component_compare_of, np);
dc3ea265
AT
906
907 of_node_put(np);
908
909 return 0;
7d526fcf
AT
910}
911
84448288
RK
912static int msm_drm_bind(struct device *dev)
913{
2b669875 914 return msm_drm_init(dev, &msm_driver);
84448288
RK
915}
916
917static void msm_drm_unbind(struct device *dev)
918{
2b669875 919 msm_drm_uninit(dev);
84448288
RK
920}
921
ecb23f2e 922const struct component_master_ops msm_drm_ops = {
84448288
RK
923 .bind = msm_drm_bind,
924 .unbind = msm_drm_unbind,
925};
926
6874f48b 927int msm_drv_probe(struct device *master_dev,
a2ab5d5b
DB
928 int (*kms_init)(struct drm_device *dev),
929 struct msm_kms *kms)
060530f1 930{
6874f48b 931 struct msm_drm_private *priv;
84448288 932 struct component_match *match = NULL;
7d526fcf
AT
933 int ret;
934
6874f48b
DB
935 priv = devm_kzalloc(master_dev, sizeof(*priv), GFP_KERNEL);
936 if (!priv)
937 return -ENOMEM;
e1072257 938
a2ab5d5b 939 priv->kms = kms;
6874f48b
DB
940 priv->kms_init = kms_init;
941 dev_set_drvdata(master_dev, priv);
942
943 /* Add mdp components if we have KMS. */
944 if (kms_init) {
945 ret = add_components_mdp(master_dev, &match);
e6f6d63e 946 if (ret)
ecb23f2e 947 return ret;
e6f6d63e 948 }
e9fbdaf2 949
ecb23f2e 950 ret = add_gpu_components(master_dev, &match);
7d526fcf 951 if (ret)
ecb23f2e 952 return ret;
060530f1 953
c83ea576
RC
954 /* on all devices that I am aware of, iommu's which can map
955 * any address the cpu can see are used:
956 */
ecb23f2e 957 ret = dma_set_mask_and_coherent(master_dev, ~0);
c83ea576 958 if (ret)
ecb23f2e 959 return ret;
4368a153 960
ecb23f2e 961 ret = component_master_add_with_match(master_dev, &msm_drm_ops, match);
4368a153 962 if (ret)
ecb23f2e 963 return ret;
c83ea576 964
4368a153 965 return 0;
ecb23f2e 966}
4368a153 967
ecb23f2e
DB
968/*
969 * Platform driver:
970 * Used only for headlesss GPU instances
971 */
2027e5b3 972
ecb23f2e
DB
973static int msm_pdev_probe(struct platform_device *pdev)
974{
a2ab5d5b 975 return msm_drv_probe(&pdev->dev, NULL, NULL);
c8afe684
RC
976}
977
01790d5e 978static void msm_pdev_remove(struct platform_device *pdev)
c8afe684 979{
060530f1 980 component_master_del(&pdev->dev, &msm_drm_ops);
c8afe684
RC
981}
982
c8afe684
RC
983static struct platform_driver msm_platform_driver = {
984 .probe = msm_pdev_probe,
01790d5e 985 .remove_new = msm_pdev_remove,
c8afe684 986 .driver = {
c8afe684 987 .name = "msm",
c8afe684 988 },
c8afe684
RC
989};
990
991static int __init msm_drm_register(void)
992{
ba4dd718
RC
993 if (!modeset)
994 return -EINVAL;
995
c8afe684 996 DBG("init");
1dd0a0b1 997 msm_mdp_register();
25fdd593 998 msm_dpu_register();
d5af49c9 999 msm_dsi_register();
fcda50c8 1000 msm_hdmi_register();
c943b494 1001 msm_dp_register();
bfd28b13 1002 adreno_register();
ecb23f2e
DB
1003 msm_mdp4_register();
1004 msm_mdss_register();
c8afe684
RC
1005 return platform_driver_register(&msm_platform_driver);
1006}
1007
1008static void __exit msm_drm_unregister(void)
1009{
1010 DBG("fini");
1011 platform_driver_unregister(&msm_platform_driver);
ecb23f2e
DB
1012 msm_mdss_unregister();
1013 msm_mdp4_unregister();
c943b494 1014 msm_dp_unregister();
fcda50c8 1015 msm_hdmi_unregister();
bfd28b13 1016 adreno_unregister();
d5af49c9 1017 msm_dsi_unregister();
1dd0a0b1 1018 msm_mdp_unregister();
25fdd593 1019 msm_dpu_unregister();
c8afe684
RC
1020}
1021
1022module_init(msm_drm_register);
1023module_exit(msm_drm_unregister);
1024
1025MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1026MODULE_DESCRIPTION("MSM DRM Driver");
1027MODULE_LICENSE("GPL");