]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Merge tag 'drm-intel-next-2016-10-24' of git://anongit.freedesktop.org/drm-intel...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29 #include <linux/dma_remapping.h>
30 #include <linux/reservation.h>
31 #include <linux/uaccess.h>
32
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35
36 #include "i915_drv.h"
37 #include "i915_gem_dmabuf.h"
38 #include "i915_trace.h"
39 #include "intel_drv.h"
40 #include "intel_frontbuffer.h"
41
42 #define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
43
44 #define __EXEC_OBJECT_HAS_PIN (1<<31)
45 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
46 #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
47 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
48 #define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
49
50 #define BATCH_OFFSET_BIAS (256*1024)
51
52 struct i915_execbuffer_params {
53 struct drm_device *dev;
54 struct drm_file *file;
55 struct i915_vma *batch;
56 u32 dispatch_flags;
57 u32 args_batch_start_offset;
58 struct intel_engine_cs *engine;
59 struct i915_gem_context *ctx;
60 struct drm_i915_gem_request *request;
61 };
62
63 struct eb_vmas {
64 struct drm_i915_private *i915;
65 struct list_head vmas;
66 int and;
67 union {
68 struct i915_vma *lut[0];
69 struct hlist_head buckets[0];
70 };
71 };
72
73 static struct eb_vmas *
74 eb_create(struct drm_i915_private *i915,
75 struct drm_i915_gem_execbuffer2 *args)
76 {
77 struct eb_vmas *eb = NULL;
78
79 if (args->flags & I915_EXEC_HANDLE_LUT) {
80 unsigned size = args->buffer_count;
81 size *= sizeof(struct i915_vma *);
82 size += sizeof(struct eb_vmas);
83 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
84 }
85
86 if (eb == NULL) {
87 unsigned size = args->buffer_count;
88 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
89 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
90 while (count > 2*size)
91 count >>= 1;
92 eb = kzalloc(count*sizeof(struct hlist_head) +
93 sizeof(struct eb_vmas),
94 GFP_TEMPORARY);
95 if (eb == NULL)
96 return eb;
97
98 eb->and = count - 1;
99 } else
100 eb->and = -args->buffer_count;
101
102 eb->i915 = i915;
103 INIT_LIST_HEAD(&eb->vmas);
104 return eb;
105 }
106
107 static void
108 eb_reset(struct eb_vmas *eb)
109 {
110 if (eb->and >= 0)
111 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
112 }
113
114 static struct i915_vma *
115 eb_get_batch(struct eb_vmas *eb)
116 {
117 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
118
119 /*
120 * SNA is doing fancy tricks with compressing batch buffers, which leads
121 * to negative relocation deltas. Usually that works out ok since the
122 * relocate address is still positive, except when the batch is placed
123 * very low in the GTT. Ensure this doesn't happen.
124 *
125 * Note that actual hangs have only been observed on gen7, but for
126 * paranoia do it everywhere.
127 */
128 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
130
131 return vma;
132 }
133
134 static int
135 eb_lookup_vmas(struct eb_vmas *eb,
136 struct drm_i915_gem_exec_object2 *exec,
137 const struct drm_i915_gem_execbuffer2 *args,
138 struct i915_address_space *vm,
139 struct drm_file *file)
140 {
141 struct drm_i915_gem_object *obj;
142 struct list_head objects;
143 int i, ret;
144
145 INIT_LIST_HEAD(&objects);
146 spin_lock(&file->table_lock);
147 /* Grab a reference to the object and release the lock so we can lookup
148 * or create the VMA without using GFP_ATOMIC */
149 for (i = 0; i < args->buffer_count; i++) {
150 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
151 if (obj == NULL) {
152 spin_unlock(&file->table_lock);
153 DRM_DEBUG("Invalid object handle %d at index %d\n",
154 exec[i].handle, i);
155 ret = -ENOENT;
156 goto err;
157 }
158
159 if (!list_empty(&obj->obj_exec_link)) {
160 spin_unlock(&file->table_lock);
161 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162 obj, exec[i].handle, i);
163 ret = -EINVAL;
164 goto err;
165 }
166
167 i915_gem_object_get(obj);
168 list_add_tail(&obj->obj_exec_link, &objects);
169 }
170 spin_unlock(&file->table_lock);
171
172 i = 0;
173 while (!list_empty(&objects)) {
174 struct i915_vma *vma;
175
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
178 obj_exec_link);
179
180 /*
181 * NOTE: We can leak any vmas created here when something fails
182 * later on. But that's no issue since vma_unbind can deal with
183 * vmas which are not actually bound. And since only
184 * lookup_or_create exists as an interface to get at the vma
185 * from the (obj, vm) we don't run the risk of creating
186 * duplicated vmas for the same vm.
187 */
188 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
189 if (unlikely(IS_ERR(vma))) {
190 DRM_DEBUG("Failed to lookup VMA\n");
191 ret = PTR_ERR(vma);
192 goto err;
193 }
194
195 /* Transfer ownership from the objects list to the vmas list. */
196 list_add_tail(&vma->exec_list, &eb->vmas);
197 list_del_init(&obj->obj_exec_link);
198
199 vma->exec_entry = &exec[i];
200 if (eb->and < 0) {
201 eb->lut[i] = vma;
202 } else {
203 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
204 vma->exec_handle = handle;
205 hlist_add_head(&vma->exec_node,
206 &eb->buckets[handle & eb->and]);
207 }
208 ++i;
209 }
210
211 return 0;
212
213
214 err:
215 while (!list_empty(&objects)) {
216 obj = list_first_entry(&objects,
217 struct drm_i915_gem_object,
218 obj_exec_link);
219 list_del_init(&obj->obj_exec_link);
220 i915_gem_object_put(obj);
221 }
222 /*
223 * Objects already transfered to the vmas list will be unreferenced by
224 * eb_destroy.
225 */
226
227 return ret;
228 }
229
230 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
231 {
232 if (eb->and < 0) {
233 if (handle >= -eb->and)
234 return NULL;
235 return eb->lut[handle];
236 } else {
237 struct hlist_head *head;
238 struct i915_vma *vma;
239
240 head = &eb->buckets[handle & eb->and];
241 hlist_for_each_entry(vma, head, exec_node) {
242 if (vma->exec_handle == handle)
243 return vma;
244 }
245 return NULL;
246 }
247 }
248
249 static void
250 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
251 {
252 struct drm_i915_gem_exec_object2 *entry;
253
254 if (!drm_mm_node_allocated(&vma->node))
255 return;
256
257 entry = vma->exec_entry;
258
259 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
260 i915_vma_unpin_fence(vma);
261
262 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
263 __i915_vma_unpin(vma);
264
265 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
266 }
267
268 static void eb_destroy(struct eb_vmas *eb)
269 {
270 while (!list_empty(&eb->vmas)) {
271 struct i915_vma *vma;
272
273 vma = list_first_entry(&eb->vmas,
274 struct i915_vma,
275 exec_list);
276 list_del_init(&vma->exec_list);
277 i915_gem_execbuffer_unreserve_vma(vma);
278 i915_vma_put(vma);
279 }
280 kfree(eb);
281 }
282
283 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
284 {
285 if (!i915_gem_object_has_struct_page(obj))
286 return false;
287
288 if (DBG_USE_CPU_RELOC)
289 return DBG_USE_CPU_RELOC > 0;
290
291 return (HAS_LLC(obj->base.dev) ||
292 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
293 obj->cache_level != I915_CACHE_NONE);
294 }
295
296 /* Used to convert any address to canonical form.
297 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
298 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
299 * addresses to be in a canonical form:
300 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
301 * canonical form [63:48] == [47]."
302 */
303 #define GEN8_HIGH_ADDRESS_BIT 47
304 static inline uint64_t gen8_canonical_addr(uint64_t address)
305 {
306 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
307 }
308
309 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
310 {
311 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
312 }
313
314 static inline uint64_t
315 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
316 uint64_t target_offset)
317 {
318 return gen8_canonical_addr((int)reloc->delta + target_offset);
319 }
320
321 struct reloc_cache {
322 struct drm_i915_private *i915;
323 struct drm_mm_node node;
324 unsigned long vaddr;
325 unsigned int page;
326 bool use_64bit_reloc;
327 };
328
329 static void reloc_cache_init(struct reloc_cache *cache,
330 struct drm_i915_private *i915)
331 {
332 cache->page = -1;
333 cache->vaddr = 0;
334 cache->i915 = i915;
335 cache->use_64bit_reloc = INTEL_GEN(cache->i915) >= 8;
336 cache->node.allocated = false;
337 }
338
339 static inline void *unmask_page(unsigned long p)
340 {
341 return (void *)(uintptr_t)(p & PAGE_MASK);
342 }
343
344 static inline unsigned int unmask_flags(unsigned long p)
345 {
346 return p & ~PAGE_MASK;
347 }
348
349 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
350
351 static void reloc_cache_fini(struct reloc_cache *cache)
352 {
353 void *vaddr;
354
355 if (!cache->vaddr)
356 return;
357
358 vaddr = unmask_page(cache->vaddr);
359 if (cache->vaddr & KMAP) {
360 if (cache->vaddr & CLFLUSH_AFTER)
361 mb();
362
363 kunmap_atomic(vaddr);
364 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
365 } else {
366 wmb();
367 io_mapping_unmap_atomic((void __iomem *)vaddr);
368 if (cache->node.allocated) {
369 struct i915_ggtt *ggtt = &cache->i915->ggtt;
370
371 ggtt->base.clear_range(&ggtt->base,
372 cache->node.start,
373 cache->node.size);
374 drm_mm_remove_node(&cache->node);
375 } else {
376 i915_vma_unpin((struct i915_vma *)cache->node.mm);
377 }
378 }
379 }
380
381 static void *reloc_kmap(struct drm_i915_gem_object *obj,
382 struct reloc_cache *cache,
383 int page)
384 {
385 void *vaddr;
386
387 if (cache->vaddr) {
388 kunmap_atomic(unmask_page(cache->vaddr));
389 } else {
390 unsigned int flushes;
391 int ret;
392
393 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
394 if (ret)
395 return ERR_PTR(ret);
396
397 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
398 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
399
400 cache->vaddr = flushes | KMAP;
401 cache->node.mm = (void *)obj;
402 if (flushes)
403 mb();
404 }
405
406 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
407 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
408 cache->page = page;
409
410 return vaddr;
411 }
412
413 static void *reloc_iomap(struct drm_i915_gem_object *obj,
414 struct reloc_cache *cache,
415 int page)
416 {
417 struct i915_ggtt *ggtt = &cache->i915->ggtt;
418 unsigned long offset;
419 void *vaddr;
420
421 if (cache->node.allocated) {
422 wmb();
423 ggtt->base.insert_page(&ggtt->base,
424 i915_gem_object_get_dma_address(obj, page),
425 cache->node.start, I915_CACHE_NONE, 0);
426 cache->page = page;
427 return unmask_page(cache->vaddr);
428 }
429
430 if (cache->vaddr) {
431 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
432 } else {
433 struct i915_vma *vma;
434 int ret;
435
436 if (use_cpu_reloc(obj))
437 return NULL;
438
439 ret = i915_gem_object_set_to_gtt_domain(obj, true);
440 if (ret)
441 return ERR_PTR(ret);
442
443 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
444 PIN_MAPPABLE | PIN_NONBLOCK);
445 if (IS_ERR(vma)) {
446 memset(&cache->node, 0, sizeof(cache->node));
447 ret = drm_mm_insert_node_in_range_generic
448 (&ggtt->base.mm, &cache->node,
449 4096, 0, 0,
450 0, ggtt->mappable_end,
451 DRM_MM_SEARCH_DEFAULT,
452 DRM_MM_CREATE_DEFAULT);
453 if (ret) /* no inactive aperture space, use cpu reloc */
454 return NULL;
455 } else {
456 ret = i915_vma_put_fence(vma);
457 if (ret) {
458 i915_vma_unpin(vma);
459 return ERR_PTR(ret);
460 }
461
462 cache->node.start = vma->node.start;
463 cache->node.mm = (void *)vma;
464 }
465 }
466
467 offset = cache->node.start;
468 if (cache->node.allocated) {
469 ggtt->base.insert_page(&ggtt->base,
470 i915_gem_object_get_dma_address(obj, page),
471 offset, I915_CACHE_NONE, 0);
472 } else {
473 offset += page << PAGE_SHIFT;
474 }
475
476 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
477 cache->page = page;
478 cache->vaddr = (unsigned long)vaddr;
479
480 return vaddr;
481 }
482
483 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
484 struct reloc_cache *cache,
485 int page)
486 {
487 void *vaddr;
488
489 if (cache->page == page) {
490 vaddr = unmask_page(cache->vaddr);
491 } else {
492 vaddr = NULL;
493 if ((cache->vaddr & KMAP) == 0)
494 vaddr = reloc_iomap(obj, cache, page);
495 if (!vaddr)
496 vaddr = reloc_kmap(obj, cache, page);
497 }
498
499 return vaddr;
500 }
501
502 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
503 {
504 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
505 if (flushes & CLFLUSH_BEFORE) {
506 clflushopt(addr);
507 mb();
508 }
509
510 *addr = value;
511
512 /* Writes to the same cacheline are serialised by the CPU
513 * (including clflush). On the write path, we only require
514 * that it hits memory in an orderly fashion and place
515 * mb barriers at the start and end of the relocation phase
516 * to ensure ordering of clflush wrt to the system.
517 */
518 if (flushes & CLFLUSH_AFTER)
519 clflushopt(addr);
520 } else
521 *addr = value;
522 }
523
524 static int
525 relocate_entry(struct drm_i915_gem_object *obj,
526 const struct drm_i915_gem_relocation_entry *reloc,
527 struct reloc_cache *cache,
528 u64 target_offset)
529 {
530 u64 offset = reloc->offset;
531 bool wide = cache->use_64bit_reloc;
532 void *vaddr;
533
534 target_offset = relocation_target(reloc, target_offset);
535 repeat:
536 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
537 if (IS_ERR(vaddr))
538 return PTR_ERR(vaddr);
539
540 clflush_write32(vaddr + offset_in_page(offset),
541 lower_32_bits(target_offset),
542 cache->vaddr);
543
544 if (wide) {
545 offset += sizeof(u32);
546 target_offset >>= 32;
547 wide = false;
548 goto repeat;
549 }
550
551 return 0;
552 }
553
554 static int
555 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
556 struct eb_vmas *eb,
557 struct drm_i915_gem_relocation_entry *reloc,
558 struct reloc_cache *cache)
559 {
560 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
561 struct drm_gem_object *target_obj;
562 struct drm_i915_gem_object *target_i915_obj;
563 struct i915_vma *target_vma;
564 uint64_t target_offset;
565 int ret;
566
567 /* we've already hold a reference to all valid objects */
568 target_vma = eb_get_vma(eb, reloc->target_handle);
569 if (unlikely(target_vma == NULL))
570 return -ENOENT;
571 target_i915_obj = target_vma->obj;
572 target_obj = &target_vma->obj->base;
573
574 target_offset = gen8_canonical_addr(target_vma->node.start);
575
576 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
577 * pipe_control writes because the gpu doesn't properly redirect them
578 * through the ppgtt for non_secure batchbuffers. */
579 if (unlikely(IS_GEN6(dev_priv) &&
580 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
581 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
582 PIN_GLOBAL);
583 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
584 return ret;
585 }
586
587 /* Validate that the target is in a valid r/w GPU domain */
588 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
589 DRM_DEBUG("reloc with multiple write domains: "
590 "obj %p target %d offset %d "
591 "read %08x write %08x",
592 obj, reloc->target_handle,
593 (int) reloc->offset,
594 reloc->read_domains,
595 reloc->write_domain);
596 return -EINVAL;
597 }
598 if (unlikely((reloc->write_domain | reloc->read_domains)
599 & ~I915_GEM_GPU_DOMAINS)) {
600 DRM_DEBUG("reloc with read/write non-GPU domains: "
601 "obj %p target %d offset %d "
602 "read %08x write %08x",
603 obj, reloc->target_handle,
604 (int) reloc->offset,
605 reloc->read_domains,
606 reloc->write_domain);
607 return -EINVAL;
608 }
609
610 target_obj->pending_read_domains |= reloc->read_domains;
611 target_obj->pending_write_domain |= reloc->write_domain;
612
613 /* If the relocation already has the right value in it, no
614 * more work needs to be done.
615 */
616 if (target_offset == reloc->presumed_offset)
617 return 0;
618
619 /* Check that the relocation address is valid... */
620 if (unlikely(reloc->offset >
621 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
622 DRM_DEBUG("Relocation beyond object bounds: "
623 "obj %p target %d offset %d size %d.\n",
624 obj, reloc->target_handle,
625 (int) reloc->offset,
626 (int) obj->base.size);
627 return -EINVAL;
628 }
629 if (unlikely(reloc->offset & 3)) {
630 DRM_DEBUG("Relocation not 4-byte aligned: "
631 "obj %p target %d offset %d.\n",
632 obj, reloc->target_handle,
633 (int) reloc->offset);
634 return -EINVAL;
635 }
636
637 ret = relocate_entry(obj, reloc, cache, target_offset);
638 if (ret)
639 return ret;
640
641 /* and update the user's relocation entry */
642 reloc->presumed_offset = target_offset;
643 return 0;
644 }
645
646 static int
647 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
648 struct eb_vmas *eb)
649 {
650 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
651 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
652 struct drm_i915_gem_relocation_entry __user *user_relocs;
653 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
654 struct reloc_cache cache;
655 int remain, ret = 0;
656
657 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
658 reloc_cache_init(&cache, eb->i915);
659
660 remain = entry->relocation_count;
661 while (remain) {
662 struct drm_i915_gem_relocation_entry *r = stack_reloc;
663 unsigned long unwritten;
664 unsigned int count;
665
666 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
667 remain -= count;
668
669 /* This is the fast path and we cannot handle a pagefault
670 * whilst holding the struct mutex lest the user pass in the
671 * relocations contained within a mmaped bo. For in such a case
672 * we, the page fault handler would call i915_gem_fault() and
673 * we would try to acquire the struct mutex again. Obviously
674 * this is bad and so lockdep complains vehemently.
675 */
676 pagefault_disable();
677 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
678 pagefault_enable();
679 if (unlikely(unwritten)) {
680 ret = -EFAULT;
681 goto out;
682 }
683
684 do {
685 u64 offset = r->presumed_offset;
686
687 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
688 if (ret)
689 goto out;
690
691 if (r->presumed_offset != offset) {
692 pagefault_disable();
693 unwritten = __put_user(r->presumed_offset,
694 &user_relocs->presumed_offset);
695 pagefault_enable();
696 if (unlikely(unwritten)) {
697 /* Note that reporting an error now
698 * leaves everything in an inconsistent
699 * state as we have *already* changed
700 * the relocation value inside the
701 * object. As we have not changed the
702 * reloc.presumed_offset or will not
703 * change the execobject.offset, on the
704 * call we may not rewrite the value
705 * inside the object, leaving it
706 * dangling and causing a GPU hang.
707 */
708 ret = -EFAULT;
709 goto out;
710 }
711 }
712
713 user_relocs++;
714 r++;
715 } while (--count);
716 }
717
718 out:
719 reloc_cache_fini(&cache);
720 return ret;
721 #undef N_RELOC
722 }
723
724 static int
725 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
726 struct eb_vmas *eb,
727 struct drm_i915_gem_relocation_entry *relocs)
728 {
729 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
730 struct reloc_cache cache;
731 int i, ret = 0;
732
733 reloc_cache_init(&cache, eb->i915);
734 for (i = 0; i < entry->relocation_count; i++) {
735 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
736 if (ret)
737 break;
738 }
739 reloc_cache_fini(&cache);
740
741 return ret;
742 }
743
744 static int
745 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
746 {
747 struct i915_vma *vma;
748 int ret = 0;
749
750 list_for_each_entry(vma, &eb->vmas, exec_list) {
751 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
752 if (ret)
753 break;
754 }
755
756 return ret;
757 }
758
759 static bool only_mappable_for_reloc(unsigned int flags)
760 {
761 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
762 __EXEC_OBJECT_NEEDS_MAP;
763 }
764
765 static int
766 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
767 struct intel_engine_cs *engine,
768 bool *need_reloc)
769 {
770 struct drm_i915_gem_object *obj = vma->obj;
771 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
772 uint64_t flags;
773 int ret;
774
775 flags = PIN_USER;
776 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
777 flags |= PIN_GLOBAL;
778
779 if (!drm_mm_node_allocated(&vma->node)) {
780 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
781 * limit address to the first 4GBs for unflagged objects.
782 */
783 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
784 flags |= PIN_ZONE_4G;
785 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
786 flags |= PIN_GLOBAL | PIN_MAPPABLE;
787 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
788 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
789 if (entry->flags & EXEC_OBJECT_PINNED)
790 flags |= entry->offset | PIN_OFFSET_FIXED;
791 if ((flags & PIN_MAPPABLE) == 0)
792 flags |= PIN_HIGH;
793 }
794
795 ret = i915_vma_pin(vma,
796 entry->pad_to_size,
797 entry->alignment,
798 flags);
799 if ((ret == -ENOSPC || ret == -E2BIG) &&
800 only_mappable_for_reloc(entry->flags))
801 ret = i915_vma_pin(vma,
802 entry->pad_to_size,
803 entry->alignment,
804 flags & ~PIN_MAPPABLE);
805 if (ret)
806 return ret;
807
808 entry->flags |= __EXEC_OBJECT_HAS_PIN;
809
810 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
811 ret = i915_vma_get_fence(vma);
812 if (ret)
813 return ret;
814
815 if (i915_vma_pin_fence(vma))
816 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
817 }
818
819 if (entry->offset != vma->node.start) {
820 entry->offset = vma->node.start;
821 *need_reloc = true;
822 }
823
824 if (entry->flags & EXEC_OBJECT_WRITE) {
825 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
826 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
827 }
828
829 return 0;
830 }
831
832 static bool
833 need_reloc_mappable(struct i915_vma *vma)
834 {
835 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
836
837 if (entry->relocation_count == 0)
838 return false;
839
840 if (!i915_vma_is_ggtt(vma))
841 return false;
842
843 /* See also use_cpu_reloc() */
844 if (HAS_LLC(vma->obj->base.dev))
845 return false;
846
847 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
848 return false;
849
850 return true;
851 }
852
853 static bool
854 eb_vma_misplaced(struct i915_vma *vma)
855 {
856 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
857
858 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
859 !i915_vma_is_ggtt(vma));
860
861 if (entry->alignment &&
862 vma->node.start & (entry->alignment - 1))
863 return true;
864
865 if (vma->node.size < entry->pad_to_size)
866 return true;
867
868 if (entry->flags & EXEC_OBJECT_PINNED &&
869 vma->node.start != entry->offset)
870 return true;
871
872 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
873 vma->node.start < BATCH_OFFSET_BIAS)
874 return true;
875
876 /* avoid costly ping-pong once a batch bo ended up non-mappable */
877 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
878 !i915_vma_is_map_and_fenceable(vma))
879 return !only_mappable_for_reloc(entry->flags);
880
881 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
882 (vma->node.start + vma->node.size - 1) >> 32)
883 return true;
884
885 return false;
886 }
887
888 static int
889 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
890 struct list_head *vmas,
891 struct i915_gem_context *ctx,
892 bool *need_relocs)
893 {
894 struct drm_i915_gem_object *obj;
895 struct i915_vma *vma;
896 struct i915_address_space *vm;
897 struct list_head ordered_vmas;
898 struct list_head pinned_vmas;
899 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
900 int retry;
901
902 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
903
904 INIT_LIST_HEAD(&ordered_vmas);
905 INIT_LIST_HEAD(&pinned_vmas);
906 while (!list_empty(vmas)) {
907 struct drm_i915_gem_exec_object2 *entry;
908 bool need_fence, need_mappable;
909
910 vma = list_first_entry(vmas, struct i915_vma, exec_list);
911 obj = vma->obj;
912 entry = vma->exec_entry;
913
914 if (ctx->flags & CONTEXT_NO_ZEROMAP)
915 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
916
917 if (!has_fenced_gpu_access)
918 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
919 need_fence =
920 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
921 i915_gem_object_is_tiled(obj);
922 need_mappable = need_fence || need_reloc_mappable(vma);
923
924 if (entry->flags & EXEC_OBJECT_PINNED)
925 list_move_tail(&vma->exec_list, &pinned_vmas);
926 else if (need_mappable) {
927 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
928 list_move(&vma->exec_list, &ordered_vmas);
929 } else
930 list_move_tail(&vma->exec_list, &ordered_vmas);
931
932 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
933 obj->base.pending_write_domain = 0;
934 }
935 list_splice(&ordered_vmas, vmas);
936 list_splice(&pinned_vmas, vmas);
937
938 /* Attempt to pin all of the buffers into the GTT.
939 * This is done in 3 phases:
940 *
941 * 1a. Unbind all objects that do not match the GTT constraints for
942 * the execbuffer (fenceable, mappable, alignment etc).
943 * 1b. Increment pin count for already bound objects.
944 * 2. Bind new objects.
945 * 3. Decrement pin count.
946 *
947 * This avoid unnecessary unbinding of later objects in order to make
948 * room for the earlier objects *unless* we need to defragment.
949 */
950 retry = 0;
951 do {
952 int ret = 0;
953
954 /* Unbind any ill-fitting objects or pin. */
955 list_for_each_entry(vma, vmas, exec_list) {
956 if (!drm_mm_node_allocated(&vma->node))
957 continue;
958
959 if (eb_vma_misplaced(vma))
960 ret = i915_vma_unbind(vma);
961 else
962 ret = i915_gem_execbuffer_reserve_vma(vma,
963 engine,
964 need_relocs);
965 if (ret)
966 goto err;
967 }
968
969 /* Bind fresh objects */
970 list_for_each_entry(vma, vmas, exec_list) {
971 if (drm_mm_node_allocated(&vma->node))
972 continue;
973
974 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
975 need_relocs);
976 if (ret)
977 goto err;
978 }
979
980 err:
981 if (ret != -ENOSPC || retry++)
982 return ret;
983
984 /* Decrement pin count for bound objects */
985 list_for_each_entry(vma, vmas, exec_list)
986 i915_gem_execbuffer_unreserve_vma(vma);
987
988 ret = i915_gem_evict_vm(vm, true);
989 if (ret)
990 return ret;
991 } while (1);
992 }
993
994 static int
995 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
996 struct drm_i915_gem_execbuffer2 *args,
997 struct drm_file *file,
998 struct intel_engine_cs *engine,
999 struct eb_vmas *eb,
1000 struct drm_i915_gem_exec_object2 *exec,
1001 struct i915_gem_context *ctx)
1002 {
1003 struct drm_i915_gem_relocation_entry *reloc;
1004 struct i915_address_space *vm;
1005 struct i915_vma *vma;
1006 bool need_relocs;
1007 int *reloc_offset;
1008 int i, total, ret;
1009 unsigned count = args->buffer_count;
1010
1011 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1012
1013 /* We may process another execbuffer during the unlock... */
1014 while (!list_empty(&eb->vmas)) {
1015 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1016 list_del_init(&vma->exec_list);
1017 i915_gem_execbuffer_unreserve_vma(vma);
1018 i915_vma_put(vma);
1019 }
1020
1021 mutex_unlock(&dev->struct_mutex);
1022
1023 total = 0;
1024 for (i = 0; i < count; i++)
1025 total += exec[i].relocation_count;
1026
1027 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
1028 reloc = drm_malloc_ab(total, sizeof(*reloc));
1029 if (reloc == NULL || reloc_offset == NULL) {
1030 drm_free_large(reloc);
1031 drm_free_large(reloc_offset);
1032 mutex_lock(&dev->struct_mutex);
1033 return -ENOMEM;
1034 }
1035
1036 total = 0;
1037 for (i = 0; i < count; i++) {
1038 struct drm_i915_gem_relocation_entry __user *user_relocs;
1039 u64 invalid_offset = (u64)-1;
1040 int j;
1041
1042 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
1043
1044 if (copy_from_user(reloc+total, user_relocs,
1045 exec[i].relocation_count * sizeof(*reloc))) {
1046 ret = -EFAULT;
1047 mutex_lock(&dev->struct_mutex);
1048 goto err;
1049 }
1050
1051 /* As we do not update the known relocation offsets after
1052 * relocating (due to the complexities in lock handling),
1053 * we need to mark them as invalid now so that we force the
1054 * relocation processing next time. Just in case the target
1055 * object is evicted and then rebound into its old
1056 * presumed_offset before the next execbuffer - if that
1057 * happened we would make the mistake of assuming that the
1058 * relocations were valid.
1059 */
1060 for (j = 0; j < exec[i].relocation_count; j++) {
1061 if (__copy_to_user(&user_relocs[j].presumed_offset,
1062 &invalid_offset,
1063 sizeof(invalid_offset))) {
1064 ret = -EFAULT;
1065 mutex_lock(&dev->struct_mutex);
1066 goto err;
1067 }
1068 }
1069
1070 reloc_offset[i] = total;
1071 total += exec[i].relocation_count;
1072 }
1073
1074 ret = i915_mutex_lock_interruptible(dev);
1075 if (ret) {
1076 mutex_lock(&dev->struct_mutex);
1077 goto err;
1078 }
1079
1080 /* reacquire the objects */
1081 eb_reset(eb);
1082 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1083 if (ret)
1084 goto err;
1085
1086 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1087 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1088 &need_relocs);
1089 if (ret)
1090 goto err;
1091
1092 list_for_each_entry(vma, &eb->vmas, exec_list) {
1093 int offset = vma->exec_entry - exec;
1094 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1095 reloc + reloc_offset[offset]);
1096 if (ret)
1097 goto err;
1098 }
1099
1100 /* Leave the user relocations as are, this is the painfully slow path,
1101 * and we want to avoid the complication of dropping the lock whilst
1102 * having buffers reserved in the aperture and so causing spurious
1103 * ENOSPC for random operations.
1104 */
1105
1106 err:
1107 drm_free_large(reloc);
1108 drm_free_large(reloc_offset);
1109 return ret;
1110 }
1111
1112 static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
1113 {
1114 unsigned int mask;
1115
1116 mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
1117 mask <<= I915_BO_ACTIVE_SHIFT;
1118
1119 return mask;
1120 }
1121
1122 static int
1123 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1124 struct list_head *vmas)
1125 {
1126 const unsigned int other_rings = eb_other_engines(req);
1127 struct i915_vma *vma;
1128 int ret;
1129
1130 list_for_each_entry(vma, vmas, exec_list) {
1131 struct drm_i915_gem_object *obj = vma->obj;
1132 struct reservation_object *resv;
1133
1134 if (obj->flags & other_rings) {
1135 ret = i915_gem_request_await_object
1136 (req, obj, obj->base.pending_write_domain);
1137 if (ret)
1138 return ret;
1139 }
1140
1141 resv = i915_gem_object_get_dmabuf_resv(obj);
1142 if (resv) {
1143 ret = i915_sw_fence_await_reservation
1144 (&req->submit, resv, &i915_fence_ops,
1145 obj->base.pending_write_domain, 10*HZ,
1146 GFP_KERNEL | __GFP_NOWARN);
1147 if (ret < 0)
1148 return ret;
1149 }
1150
1151 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
1152 i915_gem_clflush_object(obj, false);
1153 }
1154
1155 /* Unconditionally flush any chipset caches (for streaming writes). */
1156 i915_gem_chipset_flush(req->engine->i915);
1157
1158 /* Unconditionally invalidate GPU caches and TLBs. */
1159 return req->engine->emit_flush(req, EMIT_INVALIDATE);
1160 }
1161
1162 static bool
1163 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1164 {
1165 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1166 return false;
1167
1168 /* Kernel clipping was a DRI1 misfeature */
1169 if (exec->num_cliprects || exec->cliprects_ptr)
1170 return false;
1171
1172 if (exec->DR4 == 0xffffffff) {
1173 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1174 exec->DR4 = 0;
1175 }
1176 if (exec->DR1 || exec->DR4)
1177 return false;
1178
1179 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1180 return false;
1181
1182 return true;
1183 }
1184
1185 static int
1186 validate_exec_list(struct drm_device *dev,
1187 struct drm_i915_gem_exec_object2 *exec,
1188 int count)
1189 {
1190 unsigned relocs_total = 0;
1191 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1192 unsigned invalid_flags;
1193 int i;
1194
1195 /* INTERNAL flags must not overlap with external ones */
1196 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1197
1198 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1199 if (USES_FULL_PPGTT(dev))
1200 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1201
1202 for (i = 0; i < count; i++) {
1203 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1204 int length; /* limited by fault_in_pages_readable() */
1205
1206 if (exec[i].flags & invalid_flags)
1207 return -EINVAL;
1208
1209 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1210 * any non-page-aligned or non-canonical addresses.
1211 */
1212 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1213 if (exec[i].offset !=
1214 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1215 return -EINVAL;
1216
1217 /* From drm_mm perspective address space is continuous,
1218 * so from this point we're always using non-canonical
1219 * form internally.
1220 */
1221 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1222 }
1223
1224 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1225 return -EINVAL;
1226
1227 /* pad_to_size was once a reserved field, so sanitize it */
1228 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1229 if (offset_in_page(exec[i].pad_to_size))
1230 return -EINVAL;
1231 } else {
1232 exec[i].pad_to_size = 0;
1233 }
1234
1235 /* First check for malicious input causing overflow in
1236 * the worst case where we need to allocate the entire
1237 * relocation tree as a single array.
1238 */
1239 if (exec[i].relocation_count > relocs_max - relocs_total)
1240 return -EINVAL;
1241 relocs_total += exec[i].relocation_count;
1242
1243 length = exec[i].relocation_count *
1244 sizeof(struct drm_i915_gem_relocation_entry);
1245 /*
1246 * We must check that the entire relocation array is safe
1247 * to read, but since we may need to update the presumed
1248 * offsets during execution, check for full write access.
1249 */
1250 if (!access_ok(VERIFY_WRITE, ptr, length))
1251 return -EFAULT;
1252
1253 if (likely(!i915.prefault_disable)) {
1254 if (fault_in_pages_readable(ptr, length))
1255 return -EFAULT;
1256 }
1257 }
1258
1259 return 0;
1260 }
1261
1262 static struct i915_gem_context *
1263 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1264 struct intel_engine_cs *engine, const u32 ctx_id)
1265 {
1266 struct i915_gem_context *ctx;
1267 struct i915_ctx_hang_stats *hs;
1268
1269 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1270 if (IS_ERR(ctx))
1271 return ctx;
1272
1273 hs = &ctx->hang_stats;
1274 if (hs->banned) {
1275 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1276 return ERR_PTR(-EIO);
1277 }
1278
1279 return ctx;
1280 }
1281
1282 void i915_vma_move_to_active(struct i915_vma *vma,
1283 struct drm_i915_gem_request *req,
1284 unsigned int flags)
1285 {
1286 struct drm_i915_gem_object *obj = vma->obj;
1287 const unsigned int idx = req->engine->id;
1288
1289 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1290
1291 obj->dirty = 1; /* be paranoid */
1292
1293 /* Add a reference if we're newly entering the active list.
1294 * The order in which we add operations to the retirement queue is
1295 * vital here: mark_active adds to the start of the callback list,
1296 * such that subsequent callbacks are called first. Therefore we
1297 * add the active reference first and queue for it to be dropped
1298 * *last*.
1299 */
1300 if (!i915_gem_object_is_active(obj))
1301 i915_gem_object_get(obj);
1302 i915_gem_object_set_active(obj, idx);
1303 i915_gem_active_set(&obj->last_read[idx], req);
1304
1305 if (flags & EXEC_OBJECT_WRITE) {
1306 i915_gem_active_set(&obj->last_write, req);
1307
1308 intel_fb_obj_invalidate(obj, ORIGIN_CS);
1309
1310 /* update for the implicit flush after a batch */
1311 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1312 }
1313
1314 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1315 i915_gem_active_set(&vma->last_fence, req);
1316
1317 i915_vma_set_active(vma, idx);
1318 i915_gem_active_set(&vma->last_read[idx], req);
1319 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1320 }
1321
1322 static void eb_export_fence(struct drm_i915_gem_object *obj,
1323 struct drm_i915_gem_request *req,
1324 unsigned int flags)
1325 {
1326 struct reservation_object *resv;
1327
1328 resv = i915_gem_object_get_dmabuf_resv(obj);
1329 if (!resv)
1330 return;
1331
1332 /* Ignore errors from failing to allocate the new fence, we can't
1333 * handle an error right now. Worst case should be missed
1334 * synchronisation leading to rendering corruption.
1335 */
1336 ww_mutex_lock(&resv->lock, NULL);
1337 if (flags & EXEC_OBJECT_WRITE)
1338 reservation_object_add_excl_fence(resv, &req->fence);
1339 else if (reservation_object_reserve_shared(resv) == 0)
1340 reservation_object_add_shared_fence(resv, &req->fence);
1341 ww_mutex_unlock(&resv->lock);
1342 }
1343
1344 static void
1345 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1346 struct drm_i915_gem_request *req)
1347 {
1348 struct i915_vma *vma;
1349
1350 list_for_each_entry(vma, vmas, exec_list) {
1351 struct drm_i915_gem_object *obj = vma->obj;
1352 u32 old_read = obj->base.read_domains;
1353 u32 old_write = obj->base.write_domain;
1354
1355 obj->base.write_domain = obj->base.pending_write_domain;
1356 if (obj->base.write_domain)
1357 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1358 else
1359 obj->base.pending_read_domains |= obj->base.read_domains;
1360 obj->base.read_domains = obj->base.pending_read_domains;
1361
1362 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1363 eb_export_fence(obj, req, vma->exec_entry->flags);
1364 trace_i915_gem_object_change_domain(obj, old_read, old_write);
1365 }
1366 }
1367
1368 static int
1369 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1370 {
1371 struct intel_ring *ring = req->ring;
1372 int ret, i;
1373
1374 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1375 DRM_DEBUG("sol reset is gen7/rcs only\n");
1376 return -EINVAL;
1377 }
1378
1379 ret = intel_ring_begin(req, 4 * 3);
1380 if (ret)
1381 return ret;
1382
1383 for (i = 0; i < 4; i++) {
1384 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1385 intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
1386 intel_ring_emit(ring, 0);
1387 }
1388
1389 intel_ring_advance(ring);
1390
1391 return 0;
1392 }
1393
1394 static struct i915_vma *
1395 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1396 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1397 struct drm_i915_gem_object *batch_obj,
1398 struct eb_vmas *eb,
1399 u32 batch_start_offset,
1400 u32 batch_len,
1401 bool is_master)
1402 {
1403 struct drm_i915_gem_object *shadow_batch_obj;
1404 struct i915_vma *vma;
1405 int ret;
1406
1407 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1408 PAGE_ALIGN(batch_len));
1409 if (IS_ERR(shadow_batch_obj))
1410 return ERR_CAST(shadow_batch_obj);
1411
1412 ret = intel_engine_cmd_parser(engine,
1413 batch_obj,
1414 shadow_batch_obj,
1415 batch_start_offset,
1416 batch_len,
1417 is_master);
1418 if (ret) {
1419 if (ret == -EACCES) /* unhandled chained batch */
1420 vma = NULL;
1421 else
1422 vma = ERR_PTR(ret);
1423 goto out;
1424 }
1425
1426 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1427 if (IS_ERR(vma))
1428 goto out;
1429
1430 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1431
1432 vma->exec_entry = shadow_exec_entry;
1433 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1434 i915_gem_object_get(shadow_batch_obj);
1435 list_add_tail(&vma->exec_list, &eb->vmas);
1436
1437 out:
1438 i915_gem_object_unpin_pages(shadow_batch_obj);
1439 return vma;
1440 }
1441
1442 static int
1443 execbuf_submit(struct i915_execbuffer_params *params,
1444 struct drm_i915_gem_execbuffer2 *args,
1445 struct list_head *vmas)
1446 {
1447 struct drm_i915_private *dev_priv = params->request->i915;
1448 u64 exec_start, exec_len;
1449 int instp_mode;
1450 u32 instp_mask;
1451 int ret;
1452
1453 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1454 if (ret)
1455 return ret;
1456
1457 ret = i915_switch_context(params->request);
1458 if (ret)
1459 return ret;
1460
1461 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1462 instp_mask = I915_EXEC_CONSTANTS_MASK;
1463 switch (instp_mode) {
1464 case I915_EXEC_CONSTANTS_REL_GENERAL:
1465 case I915_EXEC_CONSTANTS_ABSOLUTE:
1466 case I915_EXEC_CONSTANTS_REL_SURFACE:
1467 if (instp_mode != 0 && params->engine->id != RCS) {
1468 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1469 return -EINVAL;
1470 }
1471
1472 if (instp_mode != dev_priv->relative_constants_mode) {
1473 if (INTEL_INFO(dev_priv)->gen < 4) {
1474 DRM_DEBUG("no rel constants on pre-gen4\n");
1475 return -EINVAL;
1476 }
1477
1478 if (INTEL_INFO(dev_priv)->gen > 5 &&
1479 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1480 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1481 return -EINVAL;
1482 }
1483
1484 /* The HW changed the meaning on this bit on gen6 */
1485 if (INTEL_INFO(dev_priv)->gen >= 6)
1486 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1487 }
1488 break;
1489 default:
1490 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1491 return -EINVAL;
1492 }
1493
1494 if (params->engine->id == RCS &&
1495 instp_mode != dev_priv->relative_constants_mode) {
1496 struct intel_ring *ring = params->request->ring;
1497
1498 ret = intel_ring_begin(params->request, 4);
1499 if (ret)
1500 return ret;
1501
1502 intel_ring_emit(ring, MI_NOOP);
1503 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1504 intel_ring_emit_reg(ring, INSTPM);
1505 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1506 intel_ring_advance(ring);
1507
1508 dev_priv->relative_constants_mode = instp_mode;
1509 }
1510
1511 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1512 ret = i915_reset_gen7_sol_offsets(params->request);
1513 if (ret)
1514 return ret;
1515 }
1516
1517 exec_len = args->batch_len;
1518 exec_start = params->batch->node.start +
1519 params->args_batch_start_offset;
1520
1521 if (exec_len == 0)
1522 exec_len = params->batch->size - params->args_batch_start_offset;
1523
1524 ret = params->engine->emit_bb_start(params->request,
1525 exec_start, exec_len,
1526 params->dispatch_flags);
1527 if (ret)
1528 return ret;
1529
1530 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1531
1532 i915_gem_execbuffer_move_to_active(vmas, params->request);
1533
1534 return 0;
1535 }
1536
1537 /**
1538 * Find one BSD ring to dispatch the corresponding BSD command.
1539 * The engine index is returned.
1540 */
1541 static unsigned int
1542 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1543 struct drm_file *file)
1544 {
1545 struct drm_i915_file_private *file_priv = file->driver_priv;
1546
1547 /* Check whether the file_priv has already selected one ring. */
1548 if ((int)file_priv->bsd_engine < 0)
1549 file_priv->bsd_engine = atomic_fetch_xor(1,
1550 &dev_priv->mm.bsd_engine_dispatch_index);
1551
1552 return file_priv->bsd_engine;
1553 }
1554
1555 #define I915_USER_RINGS (4)
1556
1557 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1558 [I915_EXEC_DEFAULT] = RCS,
1559 [I915_EXEC_RENDER] = RCS,
1560 [I915_EXEC_BLT] = BCS,
1561 [I915_EXEC_BSD] = VCS,
1562 [I915_EXEC_VEBOX] = VECS
1563 };
1564
1565 static struct intel_engine_cs *
1566 eb_select_engine(struct drm_i915_private *dev_priv,
1567 struct drm_file *file,
1568 struct drm_i915_gem_execbuffer2 *args)
1569 {
1570 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1571 struct intel_engine_cs *engine;
1572
1573 if (user_ring_id > I915_USER_RINGS) {
1574 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1575 return NULL;
1576 }
1577
1578 if ((user_ring_id != I915_EXEC_BSD) &&
1579 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1580 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1581 "bsd dispatch flags: %d\n", (int)(args->flags));
1582 return NULL;
1583 }
1584
1585 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1586 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1587
1588 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1589 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1590 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1591 bsd_idx <= I915_EXEC_BSD_RING2) {
1592 bsd_idx >>= I915_EXEC_BSD_SHIFT;
1593 bsd_idx--;
1594 } else {
1595 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1596 bsd_idx);
1597 return NULL;
1598 }
1599
1600 engine = dev_priv->engine[_VCS(bsd_idx)];
1601 } else {
1602 engine = dev_priv->engine[user_ring_map[user_ring_id]];
1603 }
1604
1605 if (!engine) {
1606 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1607 return NULL;
1608 }
1609
1610 return engine;
1611 }
1612
1613 static int
1614 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1615 struct drm_file *file,
1616 struct drm_i915_gem_execbuffer2 *args,
1617 struct drm_i915_gem_exec_object2 *exec)
1618 {
1619 struct drm_i915_private *dev_priv = to_i915(dev);
1620 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1621 struct eb_vmas *eb;
1622 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1623 struct intel_engine_cs *engine;
1624 struct i915_gem_context *ctx;
1625 struct i915_address_space *vm;
1626 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1627 struct i915_execbuffer_params *params = &params_master;
1628 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1629 u32 dispatch_flags;
1630 int ret;
1631 bool need_relocs;
1632
1633 if (!i915_gem_check_execbuffer(args))
1634 return -EINVAL;
1635
1636 ret = validate_exec_list(dev, exec, args->buffer_count);
1637 if (ret)
1638 return ret;
1639
1640 dispatch_flags = 0;
1641 if (args->flags & I915_EXEC_SECURE) {
1642 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1643 return -EPERM;
1644
1645 dispatch_flags |= I915_DISPATCH_SECURE;
1646 }
1647 if (args->flags & I915_EXEC_IS_PINNED)
1648 dispatch_flags |= I915_DISPATCH_PINNED;
1649
1650 engine = eb_select_engine(dev_priv, file, args);
1651 if (!engine)
1652 return -EINVAL;
1653
1654 if (args->buffer_count < 1) {
1655 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1656 return -EINVAL;
1657 }
1658
1659 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1660 if (!HAS_RESOURCE_STREAMER(dev)) {
1661 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1662 return -EINVAL;
1663 }
1664 if (engine->id != RCS) {
1665 DRM_DEBUG("RS is not available on %s\n",
1666 engine->name);
1667 return -EINVAL;
1668 }
1669
1670 dispatch_flags |= I915_DISPATCH_RS;
1671 }
1672
1673 /* Take a local wakeref for preparing to dispatch the execbuf as
1674 * we expect to access the hardware fairly frequently in the
1675 * process. Upon first dispatch, we acquire another prolonged
1676 * wakeref that we hold until the GPU has been idle for at least
1677 * 100ms.
1678 */
1679 intel_runtime_pm_get(dev_priv);
1680
1681 ret = i915_mutex_lock_interruptible(dev);
1682 if (ret)
1683 goto pre_mutex_err;
1684
1685 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1686 if (IS_ERR(ctx)) {
1687 mutex_unlock(&dev->struct_mutex);
1688 ret = PTR_ERR(ctx);
1689 goto pre_mutex_err;
1690 }
1691
1692 i915_gem_context_get(ctx);
1693
1694 if (ctx->ppgtt)
1695 vm = &ctx->ppgtt->base;
1696 else
1697 vm = &ggtt->base;
1698
1699 memset(&params_master, 0x00, sizeof(params_master));
1700
1701 eb = eb_create(dev_priv, args);
1702 if (eb == NULL) {
1703 i915_gem_context_put(ctx);
1704 mutex_unlock(&dev->struct_mutex);
1705 ret = -ENOMEM;
1706 goto pre_mutex_err;
1707 }
1708
1709 /* Look up object handles */
1710 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1711 if (ret)
1712 goto err;
1713
1714 /* take note of the batch buffer before we might reorder the lists */
1715 params->batch = eb_get_batch(eb);
1716
1717 /* Move the objects en-masse into the GTT, evicting if necessary. */
1718 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1719 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1720 &need_relocs);
1721 if (ret)
1722 goto err;
1723
1724 /* The objects are in their final locations, apply the relocations. */
1725 if (need_relocs)
1726 ret = i915_gem_execbuffer_relocate(eb);
1727 if (ret) {
1728 if (ret == -EFAULT) {
1729 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1730 engine,
1731 eb, exec, ctx);
1732 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1733 }
1734 if (ret)
1735 goto err;
1736 }
1737
1738 /* Set the pending read domains for the batch buffer to COMMAND */
1739 if (params->batch->obj->base.pending_write_domain) {
1740 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1741 ret = -EINVAL;
1742 goto err;
1743 }
1744 if (args->batch_start_offset > params->batch->size ||
1745 args->batch_len > params->batch->size - args->batch_start_offset) {
1746 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1747 ret = -EINVAL;
1748 goto err;
1749 }
1750
1751 params->args_batch_start_offset = args->batch_start_offset;
1752 if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
1753 struct i915_vma *vma;
1754
1755 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1756 params->batch->obj,
1757 eb,
1758 args->batch_start_offset,
1759 args->batch_len,
1760 drm_is_current_master(file));
1761 if (IS_ERR(vma)) {
1762 ret = PTR_ERR(vma);
1763 goto err;
1764 }
1765
1766 if (vma) {
1767 /*
1768 * Batch parsed and accepted:
1769 *
1770 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1771 * bit from MI_BATCH_BUFFER_START commands issued in
1772 * the dispatch_execbuffer implementations. We
1773 * specifically don't want that set on batches the
1774 * command parser has accepted.
1775 */
1776 dispatch_flags |= I915_DISPATCH_SECURE;
1777 params->args_batch_start_offset = 0;
1778 params->batch = vma;
1779 }
1780 }
1781
1782 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1783
1784 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1785 * batch" bit. Hence we need to pin secure batches into the global gtt.
1786 * hsw should have this fixed, but bdw mucks it up again. */
1787 if (dispatch_flags & I915_DISPATCH_SECURE) {
1788 struct drm_i915_gem_object *obj = params->batch->obj;
1789 struct i915_vma *vma;
1790
1791 /*
1792 * So on first glance it looks freaky that we pin the batch here
1793 * outside of the reservation loop. But:
1794 * - The batch is already pinned into the relevant ppgtt, so we
1795 * already have the backing storage fully allocated.
1796 * - No other BO uses the global gtt (well contexts, but meh),
1797 * so we don't really have issues with multiple objects not
1798 * fitting due to fragmentation.
1799 * So this is actually safe.
1800 */
1801 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1802 if (IS_ERR(vma)) {
1803 ret = PTR_ERR(vma);
1804 goto err;
1805 }
1806
1807 params->batch = vma;
1808 }
1809
1810 /* Allocate a request for this batch buffer nice and early. */
1811 params->request = i915_gem_request_alloc(engine, ctx);
1812 if (IS_ERR(params->request)) {
1813 ret = PTR_ERR(params->request);
1814 goto err_batch_unpin;
1815 }
1816
1817 /* Whilst this request exists, batch_obj will be on the
1818 * active_list, and so will hold the active reference. Only when this
1819 * request is retired will the the batch_obj be moved onto the
1820 * inactive_list and lose its active reference. Hence we do not need
1821 * to explicitly hold another reference here.
1822 */
1823 params->request->batch = params->batch;
1824
1825 ret = i915_gem_request_add_to_client(params->request, file);
1826 if (ret)
1827 goto err_request;
1828
1829 /*
1830 * Save assorted stuff away to pass through to *_submission().
1831 * NB: This data should be 'persistent' and not local as it will
1832 * kept around beyond the duration of the IOCTL once the GPU
1833 * scheduler arrives.
1834 */
1835 params->dev = dev;
1836 params->file = file;
1837 params->engine = engine;
1838 params->dispatch_flags = dispatch_flags;
1839 params->ctx = ctx;
1840
1841 ret = execbuf_submit(params, args, &eb->vmas);
1842 err_request:
1843 __i915_add_request(params->request, ret == 0);
1844
1845 err_batch_unpin:
1846 /*
1847 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1848 * batch vma for correctness. For less ugly and less fragility this
1849 * needs to be adjusted to also track the ggtt batch vma properly as
1850 * active.
1851 */
1852 if (dispatch_flags & I915_DISPATCH_SECURE)
1853 i915_vma_unpin(params->batch);
1854 err:
1855 /* the request owns the ref now */
1856 i915_gem_context_put(ctx);
1857 eb_destroy(eb);
1858
1859 mutex_unlock(&dev->struct_mutex);
1860
1861 pre_mutex_err:
1862 /* intel_gpu_busy should also get a ref, so it will free when the device
1863 * is really idle. */
1864 intel_runtime_pm_put(dev_priv);
1865 return ret;
1866 }
1867
1868 /*
1869 * Legacy execbuffer just creates an exec2 list from the original exec object
1870 * list array and passes it to the real function.
1871 */
1872 int
1873 i915_gem_execbuffer(struct drm_device *dev, void *data,
1874 struct drm_file *file)
1875 {
1876 struct drm_i915_gem_execbuffer *args = data;
1877 struct drm_i915_gem_execbuffer2 exec2;
1878 struct drm_i915_gem_exec_object *exec_list = NULL;
1879 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1880 int ret, i;
1881
1882 if (args->buffer_count < 1) {
1883 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1884 return -EINVAL;
1885 }
1886
1887 /* Copy in the exec list from userland */
1888 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1889 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1890 if (exec_list == NULL || exec2_list == NULL) {
1891 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1892 args->buffer_count);
1893 drm_free_large(exec_list);
1894 drm_free_large(exec2_list);
1895 return -ENOMEM;
1896 }
1897 ret = copy_from_user(exec_list,
1898 u64_to_user_ptr(args->buffers_ptr),
1899 sizeof(*exec_list) * args->buffer_count);
1900 if (ret != 0) {
1901 DRM_DEBUG("copy %d exec entries failed %d\n",
1902 args->buffer_count, ret);
1903 drm_free_large(exec_list);
1904 drm_free_large(exec2_list);
1905 return -EFAULT;
1906 }
1907
1908 for (i = 0; i < args->buffer_count; i++) {
1909 exec2_list[i].handle = exec_list[i].handle;
1910 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1911 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1912 exec2_list[i].alignment = exec_list[i].alignment;
1913 exec2_list[i].offset = exec_list[i].offset;
1914 if (INTEL_INFO(dev)->gen < 4)
1915 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1916 else
1917 exec2_list[i].flags = 0;
1918 }
1919
1920 exec2.buffers_ptr = args->buffers_ptr;
1921 exec2.buffer_count = args->buffer_count;
1922 exec2.batch_start_offset = args->batch_start_offset;
1923 exec2.batch_len = args->batch_len;
1924 exec2.DR1 = args->DR1;
1925 exec2.DR4 = args->DR4;
1926 exec2.num_cliprects = args->num_cliprects;
1927 exec2.cliprects_ptr = args->cliprects_ptr;
1928 exec2.flags = I915_EXEC_RENDER;
1929 i915_execbuffer2_set_context_id(exec2, 0);
1930
1931 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1932 if (!ret) {
1933 struct drm_i915_gem_exec_object __user *user_exec_list =
1934 u64_to_user_ptr(args->buffers_ptr);
1935
1936 /* Copy the new buffer offsets back to the user's exec list. */
1937 for (i = 0; i < args->buffer_count; i++) {
1938 exec2_list[i].offset =
1939 gen8_canonical_addr(exec2_list[i].offset);
1940 ret = __copy_to_user(&user_exec_list[i].offset,
1941 &exec2_list[i].offset,
1942 sizeof(user_exec_list[i].offset));
1943 if (ret) {
1944 ret = -EFAULT;
1945 DRM_DEBUG("failed to copy %d exec entries "
1946 "back to user (%d)\n",
1947 args->buffer_count, ret);
1948 break;
1949 }
1950 }
1951 }
1952
1953 drm_free_large(exec_list);
1954 drm_free_large(exec2_list);
1955 return ret;
1956 }
1957
1958 int
1959 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1960 struct drm_file *file)
1961 {
1962 struct drm_i915_gem_execbuffer2 *args = data;
1963 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1964 int ret;
1965
1966 if (args->buffer_count < 1 ||
1967 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1968 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1969 return -EINVAL;
1970 }
1971
1972 if (args->rsvd2 != 0) {
1973 DRM_DEBUG("dirty rvsd2 field\n");
1974 return -EINVAL;
1975 }
1976
1977 exec2_list = drm_malloc_gfp(args->buffer_count,
1978 sizeof(*exec2_list),
1979 GFP_TEMPORARY);
1980 if (exec2_list == NULL) {
1981 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1982 args->buffer_count);
1983 return -ENOMEM;
1984 }
1985 ret = copy_from_user(exec2_list,
1986 u64_to_user_ptr(args->buffers_ptr),
1987 sizeof(*exec2_list) * args->buffer_count);
1988 if (ret != 0) {
1989 DRM_DEBUG("copy %d exec entries failed %d\n",
1990 args->buffer_count, ret);
1991 drm_free_large(exec2_list);
1992 return -EFAULT;
1993 }
1994
1995 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1996 if (!ret) {
1997 /* Copy the new buffer offsets back to the user's exec list. */
1998 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1999 u64_to_user_ptr(args->buffers_ptr);
2000 int i;
2001
2002 for (i = 0; i < args->buffer_count; i++) {
2003 exec2_list[i].offset =
2004 gen8_canonical_addr(exec2_list[i].offset);
2005 ret = __copy_to_user(&user_exec_list[i].offset,
2006 &exec2_list[i].offset,
2007 sizeof(user_exec_list[i].offset));
2008 if (ret) {
2009 ret = -EFAULT;
2010 DRM_DEBUG("failed to copy %d exec entries "
2011 "back to user\n",
2012 args->buffer_count);
2013 break;
2014 }
2015 }
2016 }
2017
2018 drm_free_large(exec2_list);
2019 return ret;
2020 }