]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/msm/msm_gem_submit.c
Merge tag 'drm-intel-fixes-2018-12-12-1' of git://anongit.freedesktop.org/drm/drm...
[thirdparty/linux.git] / drivers / gpu / drm / msm / msm_gem_submit.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/sync_file.h>
19
20 #include "msm_drv.h"
21 #include "msm_gpu.h"
22 #include "msm_gem.h"
23
24 /*
25 * Cmdstream submission:
26 */
27
28 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
29 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
30 #define BO_LOCKED 0x4000
31 #define BO_PINNED 0x2000
32
33 static struct msm_gem_submit *submit_create(struct drm_device *dev,
34 struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
35 uint32_t nr_bos, uint32_t nr_cmds)
36 {
37 struct msm_gem_submit *submit;
38 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
39 ((u64)nr_cmds * sizeof(submit->cmd[0]));
40
41 if (sz > SIZE_MAX)
42 return NULL;
43
44 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
45 if (!submit)
46 return NULL;
47
48 submit->dev = dev;
49 submit->gpu = gpu;
50 submit->fence = NULL;
51 submit->pid = get_pid(task_pid(current));
52 submit->cmd = (void *)&submit->bos[nr_bos];
53 submit->queue = queue;
54 submit->ring = gpu->rb[queue->prio];
55
56 /* initially, until copy_from_user() and bo lookup succeeds: */
57 submit->nr_bos = 0;
58 submit->nr_cmds = 0;
59
60 INIT_LIST_HEAD(&submit->node);
61 INIT_LIST_HEAD(&submit->bo_list);
62 ww_acquire_init(&submit->ticket, &reservation_ww_class);
63
64 return submit;
65 }
66
67 void msm_gem_submit_free(struct msm_gem_submit *submit)
68 {
69 dma_fence_put(submit->fence);
70 list_del(&submit->node);
71 put_pid(submit->pid);
72 msm_submitqueue_put(submit->queue);
73
74 kfree(submit);
75 }
76
77 static inline unsigned long __must_check
78 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
79 {
80 if (access_ok(VERIFY_READ, from, n))
81 return __copy_from_user_inatomic(to, from, n);
82 return -EFAULT;
83 }
84
85 static int submit_lookup_objects(struct msm_gem_submit *submit,
86 struct drm_msm_gem_submit *args, struct drm_file *file)
87 {
88 unsigned i;
89 int ret = 0;
90
91 spin_lock(&file->table_lock);
92 pagefault_disable();
93
94 for (i = 0; i < args->nr_bos; i++) {
95 struct drm_msm_gem_submit_bo submit_bo;
96 struct drm_gem_object *obj;
97 struct msm_gem_object *msm_obj;
98 void __user *userptr =
99 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
100
101 /* make sure we don't have garbage flags, in case we hit
102 * error path before flags is initialized:
103 */
104 submit->bos[i].flags = 0;
105
106 if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
107 pagefault_enable();
108 spin_unlock(&file->table_lock);
109 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
110 ret = -EFAULT;
111 goto out;
112 }
113 spin_lock(&file->table_lock);
114 pagefault_disable();
115 }
116
117 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
118 !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
119 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
120 ret = -EINVAL;
121 goto out_unlock;
122 }
123
124 submit->bos[i].flags = submit_bo.flags;
125 /* in validate_objects() we figure out if this is true: */
126 submit->bos[i].iova = submit_bo.presumed;
127
128 /* normally use drm_gem_object_lookup(), but for bulk lookup
129 * all under single table_lock just hit object_idr directly:
130 */
131 obj = idr_find(&file->object_idr, submit_bo.handle);
132 if (!obj) {
133 DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
134 ret = -EINVAL;
135 goto out_unlock;
136 }
137
138 msm_obj = to_msm_bo(obj);
139
140 if (!list_empty(&msm_obj->submit_entry)) {
141 DRM_ERROR("handle %u at index %u already on submit list\n",
142 submit_bo.handle, i);
143 ret = -EINVAL;
144 goto out_unlock;
145 }
146
147 drm_gem_object_get(obj);
148
149 submit->bos[i].obj = msm_obj;
150
151 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
152 }
153
154 out_unlock:
155 pagefault_enable();
156 spin_unlock(&file->table_lock);
157
158 out:
159 submit->nr_bos = i;
160
161 return ret;
162 }
163
164 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
165 int i, bool backoff)
166 {
167 struct msm_gem_object *msm_obj = submit->bos[i].obj;
168
169 if (submit->bos[i].flags & BO_PINNED)
170 msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
171
172 if (submit->bos[i].flags & BO_LOCKED)
173 ww_mutex_unlock(&msm_obj->resv->lock);
174
175 if (backoff && !(submit->bos[i].flags & BO_VALID))
176 submit->bos[i].iova = 0;
177
178 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
179 }
180
181 /* This is where we make sure all the bo's are reserved and pin'd: */
182 static int submit_lock_objects(struct msm_gem_submit *submit)
183 {
184 int contended, slow_locked = -1, i, ret = 0;
185
186 retry:
187 for (i = 0; i < submit->nr_bos; i++) {
188 struct msm_gem_object *msm_obj = submit->bos[i].obj;
189
190 if (slow_locked == i)
191 slow_locked = -1;
192
193 contended = i;
194
195 if (!(submit->bos[i].flags & BO_LOCKED)) {
196 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
197 &submit->ticket);
198 if (ret)
199 goto fail;
200 submit->bos[i].flags |= BO_LOCKED;
201 }
202 }
203
204 ww_acquire_done(&submit->ticket);
205
206 return 0;
207
208 fail:
209 for (; i >= 0; i--)
210 submit_unlock_unpin_bo(submit, i, true);
211
212 if (slow_locked > 0)
213 submit_unlock_unpin_bo(submit, slow_locked, true);
214
215 if (ret == -EDEADLK) {
216 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
217 /* we lost out in a seqno race, lock and retry.. */
218 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
219 &submit->ticket);
220 if (!ret) {
221 submit->bos[contended].flags |= BO_LOCKED;
222 slow_locked = contended;
223 goto retry;
224 }
225 }
226
227 return ret;
228 }
229
230 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
231 {
232 int i, ret = 0;
233
234 for (i = 0; i < submit->nr_bos; i++) {
235 struct msm_gem_object *msm_obj = submit->bos[i].obj;
236 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
237
238 if (!write) {
239 /* NOTE: _reserve_shared() must happen before
240 * _add_shared_fence(), which makes this a slightly
241 * strange place to call it. OTOH this is a
242 * convenient can-fail point to hook it in.
243 */
244 ret = reservation_object_reserve_shared(msm_obj->resv);
245 if (ret)
246 return ret;
247 }
248
249 if (no_implicit)
250 continue;
251
252 ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
253 write);
254 if (ret)
255 break;
256 }
257
258 return ret;
259 }
260
261 static int submit_pin_objects(struct msm_gem_submit *submit)
262 {
263 int i, ret = 0;
264
265 submit->valid = true;
266
267 for (i = 0; i < submit->nr_bos; i++) {
268 struct msm_gem_object *msm_obj = submit->bos[i].obj;
269 uint64_t iova;
270
271 /* if locking succeeded, pin bo: */
272 ret = msm_gem_get_iova(&msm_obj->base,
273 submit->gpu->aspace, &iova);
274
275 if (ret)
276 break;
277
278 submit->bos[i].flags |= BO_PINNED;
279
280 if (iova == submit->bos[i].iova) {
281 submit->bos[i].flags |= BO_VALID;
282 } else {
283 submit->bos[i].iova = iova;
284 /* iova changed, so address in cmdstream is not valid: */
285 submit->bos[i].flags &= ~BO_VALID;
286 submit->valid = false;
287 }
288 }
289
290 return ret;
291 }
292
293 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
294 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
295 {
296 if (idx >= submit->nr_bos) {
297 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
298 idx, submit->nr_bos);
299 return -EINVAL;
300 }
301
302 if (obj)
303 *obj = submit->bos[idx].obj;
304 if (iova)
305 *iova = submit->bos[idx].iova;
306 if (valid)
307 *valid = !!(submit->bos[idx].flags & BO_VALID);
308
309 return 0;
310 }
311
312 /* process the reloc's and patch up the cmdstream as needed: */
313 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
314 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
315 {
316 uint32_t i, last_offset = 0;
317 uint32_t *ptr;
318 int ret = 0;
319
320 if (!nr_relocs)
321 return 0;
322
323 if (offset % 4) {
324 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
325 return -EINVAL;
326 }
327
328 /* For now, just map the entire thing. Eventually we probably
329 * to do it page-by-page, w/ kmap() if not vmap()d..
330 */
331 ptr = msm_gem_get_vaddr(&obj->base);
332
333 if (IS_ERR(ptr)) {
334 ret = PTR_ERR(ptr);
335 DBG("failed to map: %d", ret);
336 return ret;
337 }
338
339 for (i = 0; i < nr_relocs; i++) {
340 struct drm_msm_gem_submit_reloc submit_reloc;
341 void __user *userptr =
342 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
343 uint32_t off;
344 uint64_t iova;
345 bool valid;
346
347 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
348 ret = -EFAULT;
349 goto out;
350 }
351
352 if (submit_reloc.submit_offset % 4) {
353 DRM_ERROR("non-aligned reloc offset: %u\n",
354 submit_reloc.submit_offset);
355 ret = -EINVAL;
356 goto out;
357 }
358
359 /* offset in dwords: */
360 off = submit_reloc.submit_offset / 4;
361
362 if ((off >= (obj->base.size / 4)) ||
363 (off < last_offset)) {
364 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
365 ret = -EINVAL;
366 goto out;
367 }
368
369 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
370 if (ret)
371 goto out;
372
373 if (valid)
374 continue;
375
376 iova += submit_reloc.reloc_offset;
377
378 if (submit_reloc.shift < 0)
379 iova >>= -submit_reloc.shift;
380 else
381 iova <<= submit_reloc.shift;
382
383 ptr[off] = iova | submit_reloc.or;
384
385 last_offset = off;
386 }
387
388 out:
389 msm_gem_put_vaddr(&obj->base);
390
391 return ret;
392 }
393
394 static void submit_cleanup(struct msm_gem_submit *submit)
395 {
396 unsigned i;
397
398 for (i = 0; i < submit->nr_bos; i++) {
399 struct msm_gem_object *msm_obj = submit->bos[i].obj;
400 submit_unlock_unpin_bo(submit, i, false);
401 list_del_init(&msm_obj->submit_entry);
402 drm_gem_object_put(&msm_obj->base);
403 }
404
405 ww_acquire_fini(&submit->ticket);
406 }
407
408 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
409 struct drm_file *file)
410 {
411 struct msm_drm_private *priv = dev->dev_private;
412 struct drm_msm_gem_submit *args = data;
413 struct msm_file_private *ctx = file->driver_priv;
414 struct msm_gem_submit *submit;
415 struct msm_gpu *gpu = priv->gpu;
416 struct sync_file *sync_file = NULL;
417 struct msm_gpu_submitqueue *queue;
418 struct msm_ringbuffer *ring;
419 int out_fence_fd = -1;
420 unsigned i;
421 int ret;
422
423 if (!gpu)
424 return -ENXIO;
425
426 /* for now, we just have 3d pipe.. eventually this would need to
427 * be more clever to dispatch to appropriate gpu module:
428 */
429 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
430 return -EINVAL;
431
432 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
433 return -EINVAL;
434
435 if (args->flags & MSM_SUBMIT_SUDO) {
436 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
437 !capable(CAP_SYS_RAWIO))
438 return -EINVAL;
439 }
440
441 queue = msm_submitqueue_get(ctx, args->queueid);
442 if (!queue)
443 return -ENOENT;
444
445 ring = gpu->rb[queue->prio];
446
447 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
448 struct dma_fence *in_fence;
449
450 in_fence = sync_file_get_fence(args->fence_fd);
451
452 if (!in_fence)
453 return -EINVAL;
454
455 /*
456 * Wait if the fence is from a foreign context, or if the fence
457 * array contains any fence from a foreign context.
458 */
459 ret = 0;
460 if (!dma_fence_match_context(in_fence, ring->fctx->context))
461 ret = dma_fence_wait(in_fence, true);
462
463 dma_fence_put(in_fence);
464 if (ret)
465 return ret;
466 }
467
468 ret = mutex_lock_interruptible(&dev->struct_mutex);
469 if (ret)
470 return ret;
471
472 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
473 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
474 if (out_fence_fd < 0) {
475 ret = out_fence_fd;
476 goto out_unlock;
477 }
478 }
479
480 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
481 if (!submit) {
482 ret = -ENOMEM;
483 goto out_unlock;
484 }
485
486 if (args->flags & MSM_SUBMIT_SUDO)
487 submit->in_rb = true;
488
489 ret = submit_lookup_objects(submit, args, file);
490 if (ret)
491 goto out;
492
493 ret = submit_lock_objects(submit);
494 if (ret)
495 goto out;
496
497 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
498 if (ret)
499 goto out;
500
501 ret = submit_pin_objects(submit);
502 if (ret)
503 goto out;
504
505 for (i = 0; i < args->nr_cmds; i++) {
506 struct drm_msm_gem_submit_cmd submit_cmd;
507 void __user *userptr =
508 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
509 struct msm_gem_object *msm_obj;
510 uint64_t iova;
511
512 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
513 if (ret) {
514 ret = -EFAULT;
515 goto out;
516 }
517
518 /* validate input from userspace: */
519 switch (submit_cmd.type) {
520 case MSM_SUBMIT_CMD_BUF:
521 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
522 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
523 break;
524 default:
525 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
526 ret = -EINVAL;
527 goto out;
528 }
529
530 ret = submit_bo(submit, submit_cmd.submit_idx,
531 &msm_obj, &iova, NULL);
532 if (ret)
533 goto out;
534
535 if (submit_cmd.size % 4) {
536 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
537 submit_cmd.size);
538 ret = -EINVAL;
539 goto out;
540 }
541
542 if (!submit_cmd.size ||
543 ((submit_cmd.size + submit_cmd.submit_offset) >
544 msm_obj->base.size)) {
545 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
546 ret = -EINVAL;
547 goto out;
548 }
549
550 submit->cmd[i].type = submit_cmd.type;
551 submit->cmd[i].size = submit_cmd.size / 4;
552 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
553 submit->cmd[i].idx = submit_cmd.submit_idx;
554
555 if (submit->valid)
556 continue;
557
558 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
559 submit_cmd.nr_relocs, submit_cmd.relocs);
560 if (ret)
561 goto out;
562 }
563
564 submit->nr_cmds = i;
565
566 submit->fence = msm_fence_alloc(ring->fctx);
567 if (IS_ERR(submit->fence)) {
568 ret = PTR_ERR(submit->fence);
569 submit->fence = NULL;
570 goto out;
571 }
572
573 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
574 sync_file = sync_file_create(submit->fence);
575 if (!sync_file) {
576 ret = -ENOMEM;
577 goto out;
578 }
579 }
580
581 msm_gpu_submit(gpu, submit, ctx);
582
583 args->fence = submit->fence->seqno;
584
585 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
586 fd_install(out_fence_fd, sync_file->file);
587 args->fence_fd = out_fence_fd;
588 }
589
590 out:
591 submit_cleanup(submit);
592 if (ret)
593 msm_gem_submit_free(submit);
594 out_unlock:
595 if (ret && (out_fence_fd >= 0))
596 put_unused_fd(out_fence_fd);
597 mutex_unlock(&dev->struct_mutex);
598 return ret;
599 }