]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/i915_gem_execbuffer.c
Merge tag 'pwm/for-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29 #include <linux/intel-iommu.h>
30 #include <linux/reservation.h>
31 #include <linux/sync_file.h>
32 #include <linux/uaccess.h>
33
34 #include <drm/drm_syncobj.h>
35 #include <drm/i915_drm.h>
36
37 #include "i915_drv.h"
38 #include "i915_gem_clflush.h"
39 #include "i915_trace.h"
40 #include "intel_drv.h"
41 #include "intel_frontbuffer.h"
42
43 enum {
44 FORCE_CPU_RELOC = 1,
45 FORCE_GTT_RELOC,
46 FORCE_GPU_RELOC,
47 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
48 };
49
50 #define __EXEC_OBJECT_HAS_REF BIT(31)
51 #define __EXEC_OBJECT_HAS_PIN BIT(30)
52 #define __EXEC_OBJECT_HAS_FENCE BIT(29)
53 #define __EXEC_OBJECT_NEEDS_MAP BIT(28)
54 #define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
55 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
56 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
57
58 #define __EXEC_HAS_RELOC BIT(31)
59 #define __EXEC_VALIDATED BIT(30)
60 #define __EXEC_INTERNAL_FLAGS (~0u << 30)
61 #define UPDATE PIN_OFFSET_FIXED
62
63 #define BATCH_OFFSET_BIAS (256*1024)
64
65 #define __I915_EXEC_ILLEGAL_FLAGS \
66 (__I915_EXEC_UNKNOWN_FLAGS | \
67 I915_EXEC_CONSTANTS_MASK | \
68 I915_EXEC_RESOURCE_STREAMER)
69
70 /* Catch emission of unexpected errors for CI! */
71 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
72 #undef EINVAL
73 #define EINVAL ({ \
74 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
75 22; \
76 })
77 #endif
78
79 /**
80 * DOC: User command execution
81 *
82 * Userspace submits commands to be executed on the GPU as an instruction
83 * stream within a GEM object we call a batchbuffer. This instructions may
84 * refer to other GEM objects containing auxiliary state such as kernels,
85 * samplers, render targets and even secondary batchbuffers. Userspace does
86 * not know where in the GPU memory these objects reside and so before the
87 * batchbuffer is passed to the GPU for execution, those addresses in the
88 * batchbuffer and auxiliary objects are updated. This is known as relocation,
89 * or patching. To try and avoid having to relocate each object on the next
90 * execution, userspace is told the location of those objects in this pass,
91 * but this remains just a hint as the kernel may choose a new location for
92 * any object in the future.
93 *
94 * At the level of talking to the hardware, submitting a batchbuffer for the
95 * GPU to execute is to add content to a buffer from which the HW
96 * command streamer is reading.
97 *
98 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
99 * Execlists, this command is not placed on the same buffer as the
100 * remaining items.
101 *
102 * 2. Add a command to invalidate caches to the buffer.
103 *
104 * 3. Add a batchbuffer start command to the buffer; the start command is
105 * essentially a token together with the GPU address of the batchbuffer
106 * to be executed.
107 *
108 * 4. Add a pipeline flush to the buffer.
109 *
110 * 5. Add a memory write command to the buffer to record when the GPU
111 * is done executing the batchbuffer. The memory write writes the
112 * global sequence number of the request, ``i915_request::global_seqno``;
113 * the i915 driver uses the current value in the register to determine
114 * if the GPU has completed the batchbuffer.
115 *
116 * 6. Add a user interrupt command to the buffer. This command instructs
117 * the GPU to issue an interrupt when the command, pipeline flush and
118 * memory write are completed.
119 *
120 * 7. Inform the hardware of the additional commands added to the buffer
121 * (by updating the tail pointer).
122 *
123 * Processing an execbuf ioctl is conceptually split up into a few phases.
124 *
125 * 1. Validation - Ensure all the pointers, handles and flags are valid.
126 * 2. Reservation - Assign GPU address space for every object
127 * 3. Relocation - Update any addresses to point to the final locations
128 * 4. Serialisation - Order the request with respect to its dependencies
129 * 5. Construction - Construct a request to execute the batchbuffer
130 * 6. Submission (at some point in the future execution)
131 *
132 * Reserving resources for the execbuf is the most complicated phase. We
133 * neither want to have to migrate the object in the address space, nor do
134 * we want to have to update any relocations pointing to this object. Ideally,
135 * we want to leave the object where it is and for all the existing relocations
136 * to match. If the object is given a new address, or if userspace thinks the
137 * object is elsewhere, we have to parse all the relocation entries and update
138 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
139 * all the target addresses in all of its objects match the value in the
140 * relocation entries and that they all match the presumed offsets given by the
141 * list of execbuffer objects. Using this knowledge, we know that if we haven't
142 * moved any buffers, all the relocation entries are valid and we can skip
143 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
144 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
145 *
146 * The addresses written in the objects must match the corresponding
147 * reloc.presumed_offset which in turn must match the corresponding
148 * execobject.offset.
149 *
150 * Any render targets written to in the batch must be flagged with
151 * EXEC_OBJECT_WRITE.
152 *
153 * To avoid stalling, execobject.offset should match the current
154 * address of that object within the active context.
155 *
156 * The reservation is done is multiple phases. First we try and keep any
157 * object already bound in its current location - so as long as meets the
158 * constraints imposed by the new execbuffer. Any object left unbound after the
159 * first pass is then fitted into any available idle space. If an object does
160 * not fit, all objects are removed from the reservation and the process rerun
161 * after sorting the objects into a priority order (more difficult to fit
162 * objects are tried first). Failing that, the entire VM is cleared and we try
163 * to fit the execbuf once last time before concluding that it simply will not
164 * fit.
165 *
166 * A small complication to all of this is that we allow userspace not only to
167 * specify an alignment and a size for the object in the address space, but
168 * we also allow userspace to specify the exact offset. This objects are
169 * simpler to place (the location is known a priori) all we have to do is make
170 * sure the space is available.
171 *
172 * Once all the objects are in place, patching up the buried pointers to point
173 * to the final locations is a fairly simple job of walking over the relocation
174 * entry arrays, looking up the right address and rewriting the value into
175 * the object. Simple! ... The relocation entries are stored in user memory
176 * and so to access them we have to copy them into a local buffer. That copy
177 * has to avoid taking any pagefaults as they may lead back to a GEM object
178 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
179 * the relocation into multiple passes. First we try to do everything within an
180 * atomic context (avoid the pagefaults) which requires that we never wait. If
181 * we detect that we may wait, or if we need to fault, then we have to fallback
182 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
183 * bells yet?) Dropping the mutex means that we lose all the state we have
184 * built up so far for the execbuf and we must reset any global data. However,
185 * we do leave the objects pinned in their final locations - which is a
186 * potential issue for concurrent execbufs. Once we have left the mutex, we can
187 * allocate and copy all the relocation entries into a large array at our
188 * leisure, reacquire the mutex, reclaim all the objects and other state and
189 * then proceed to update any incorrect addresses with the objects.
190 *
191 * As we process the relocation entries, we maintain a record of whether the
192 * object is being written to. Using NORELOC, we expect userspace to provide
193 * this information instead. We also check whether we can skip the relocation
194 * by comparing the expected value inside the relocation entry with the target's
195 * final address. If they differ, we have to map the current object and rewrite
196 * the 4 or 8 byte pointer within.
197 *
198 * Serialising an execbuf is quite simple according to the rules of the GEM
199 * ABI. Execution within each context is ordered by the order of submission.
200 * Writes to any GEM object are in order of submission and are exclusive. Reads
201 * from a GEM object are unordered with respect to other reads, but ordered by
202 * writes. A write submitted after a read cannot occur before the read, and
203 * similarly any read submitted after a write cannot occur before the write.
204 * Writes are ordered between engines such that only one write occurs at any
205 * time (completing any reads beforehand) - using semaphores where available
206 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
207 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
208 * reads before starting, and any read (either using set-domain or pread) must
209 * flush all GPU writes before starting. (Note we only employ a barrier before,
210 * we currently rely on userspace not concurrently starting a new execution
211 * whilst reading or writing to an object. This may be an advantage or not
212 * depending on how much you trust userspace not to shoot themselves in the
213 * foot.) Serialisation may just result in the request being inserted into
214 * a DAG awaiting its turn, but most simple is to wait on the CPU until
215 * all dependencies are resolved.
216 *
217 * After all of that, is just a matter of closing the request and handing it to
218 * the hardware (well, leaving it in a queue to be executed). However, we also
219 * offer the ability for batchbuffers to be run with elevated privileges so
220 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
221 * Before any batch is given extra privileges we first must check that it
222 * contains no nefarious instructions, we check that each instruction is from
223 * our whitelist and all registers are also from an allowed list. We first
224 * copy the user's batchbuffer to a shadow (so that the user doesn't have
225 * access to it, either by the CPU or GPU as we scan it) and then parse each
226 * instruction. If everything is ok, we set a flag telling the hardware to run
227 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
228 */
229
230 struct i915_execbuffer {
231 struct drm_i915_private *i915; /** i915 backpointer */
232 struct drm_file *file; /** per-file lookup tables and limits */
233 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
234 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
235 struct i915_vma **vma;
236 unsigned int *flags;
237
238 struct intel_engine_cs *engine; /** engine to queue the request to */
239 struct i915_gem_context *ctx; /** context for building the request */
240 struct i915_address_space *vm; /** GTT and vma for the request */
241
242 struct i915_request *request; /** our request to build */
243 struct i915_vma *batch; /** identity of the batch obj/vma */
244
245 /** actual size of execobj[] as we may extend it for the cmdparser */
246 unsigned int buffer_count;
247
248 /** list of vma not yet bound during reservation phase */
249 struct list_head unbound;
250
251 /** list of vma that have execobj.relocation_count */
252 struct list_head relocs;
253
254 /**
255 * Track the most recently used object for relocations, as we
256 * frequently have to perform multiple relocations within the same
257 * obj/page
258 */
259 struct reloc_cache {
260 struct drm_mm_node node; /** temporary GTT binding */
261 unsigned long vaddr; /** Current kmap address */
262 unsigned long page; /** Currently mapped page index */
263 unsigned int gen; /** Cached value of INTEL_GEN */
264 bool use_64bit_reloc : 1;
265 bool has_llc : 1;
266 bool has_fence : 1;
267 bool needs_unfenced : 1;
268
269 struct i915_request *rq;
270 u32 *rq_cmd;
271 unsigned int rq_size;
272 } reloc_cache;
273
274 u64 invalid_flags; /** Set of execobj.flags that are invalid */
275 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
276
277 u32 batch_start_offset; /** Location within object of batch */
278 u32 batch_len; /** Length of batch within object */
279 u32 batch_flags; /** Flags composed for emit_bb_start() */
280
281 /**
282 * Indicate either the size of the hastable used to resolve
283 * relocation handles, or if negative that we are using a direct
284 * index into the execobj[].
285 */
286 int lut_size;
287 struct hlist_head *buckets; /** ht for relocation handles */
288 };
289
290 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
291
292 /*
293 * Used to convert any address to canonical form.
294 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
295 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
296 * addresses to be in a canonical form:
297 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
298 * canonical form [63:48] == [47]."
299 */
300 #define GEN8_HIGH_ADDRESS_BIT 47
301 static inline u64 gen8_canonical_addr(u64 address)
302 {
303 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
304 }
305
306 static inline u64 gen8_noncanonical_addr(u64 address)
307 {
308 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
309 }
310
311 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
312 {
313 return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
314 }
315
316 static int eb_create(struct i915_execbuffer *eb)
317 {
318 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
319 unsigned int size = 1 + ilog2(eb->buffer_count);
320
321 /*
322 * Without a 1:1 association between relocation handles and
323 * the execobject[] index, we instead create a hashtable.
324 * We size it dynamically based on available memory, starting
325 * first with 1:1 assocative hash and scaling back until
326 * the allocation succeeds.
327 *
328 * Later on we use a positive lut_size to indicate we are
329 * using this hashtable, and a negative value to indicate a
330 * direct lookup.
331 */
332 do {
333 gfp_t flags;
334
335 /* While we can still reduce the allocation size, don't
336 * raise a warning and allow the allocation to fail.
337 * On the last pass though, we want to try as hard
338 * as possible to perform the allocation and warn
339 * if it fails.
340 */
341 flags = GFP_KERNEL;
342 if (size > 1)
343 flags |= __GFP_NORETRY | __GFP_NOWARN;
344
345 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
346 flags);
347 if (eb->buckets)
348 break;
349 } while (--size);
350
351 if (unlikely(!size))
352 return -ENOMEM;
353
354 eb->lut_size = size;
355 } else {
356 eb->lut_size = -eb->buffer_count;
357 }
358
359 return 0;
360 }
361
362 static bool
363 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
364 const struct i915_vma *vma,
365 unsigned int flags)
366 {
367 if (vma->node.size < entry->pad_to_size)
368 return true;
369
370 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
371 return true;
372
373 if (flags & EXEC_OBJECT_PINNED &&
374 vma->node.start != entry->offset)
375 return true;
376
377 if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
378 vma->node.start < BATCH_OFFSET_BIAS)
379 return true;
380
381 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
382 (vma->node.start + vma->node.size - 1) >> 32)
383 return true;
384
385 if (flags & __EXEC_OBJECT_NEEDS_MAP &&
386 !i915_vma_is_map_and_fenceable(vma))
387 return true;
388
389 return false;
390 }
391
392 static inline bool
393 eb_pin_vma(struct i915_execbuffer *eb,
394 const struct drm_i915_gem_exec_object2 *entry,
395 struct i915_vma *vma)
396 {
397 unsigned int exec_flags = *vma->exec_flags;
398 u64 pin_flags;
399
400 if (vma->node.size)
401 pin_flags = vma->node.start;
402 else
403 pin_flags = entry->offset & PIN_OFFSET_MASK;
404
405 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
406 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
407 pin_flags |= PIN_GLOBAL;
408
409 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
410 return false;
411
412 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
413 if (unlikely(i915_vma_pin_fence(vma))) {
414 i915_vma_unpin(vma);
415 return false;
416 }
417
418 if (vma->fence)
419 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
420 }
421
422 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
423 return !eb_vma_misplaced(entry, vma, exec_flags);
424 }
425
426 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
427 {
428 GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
429
430 if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
431 __i915_vma_unpin_fence(vma);
432
433 __i915_vma_unpin(vma);
434 }
435
436 static inline void
437 eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
438 {
439 if (!(*flags & __EXEC_OBJECT_HAS_PIN))
440 return;
441
442 __eb_unreserve_vma(vma, *flags);
443 *flags &= ~__EXEC_OBJECT_RESERVED;
444 }
445
446 static int
447 eb_validate_vma(struct i915_execbuffer *eb,
448 struct drm_i915_gem_exec_object2 *entry,
449 struct i915_vma *vma)
450 {
451 if (unlikely(entry->flags & eb->invalid_flags))
452 return -EINVAL;
453
454 if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
455 return -EINVAL;
456
457 /*
458 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
459 * any non-page-aligned or non-canonical addresses.
460 */
461 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
462 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
463 return -EINVAL;
464
465 /* pad_to_size was once a reserved field, so sanitize it */
466 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
467 if (unlikely(offset_in_page(entry->pad_to_size)))
468 return -EINVAL;
469 } else {
470 entry->pad_to_size = 0;
471 }
472
473 if (unlikely(vma->exec_flags)) {
474 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
475 entry->handle, (int)(entry - eb->exec));
476 return -EINVAL;
477 }
478
479 /*
480 * From drm_mm perspective address space is continuous,
481 * so from this point we're always using non-canonical
482 * form internally.
483 */
484 entry->offset = gen8_noncanonical_addr(entry->offset);
485
486 if (!eb->reloc_cache.has_fence) {
487 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
488 } else {
489 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
490 eb->reloc_cache.needs_unfenced) &&
491 i915_gem_object_is_tiled(vma->obj))
492 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
493 }
494
495 if (!(entry->flags & EXEC_OBJECT_PINNED))
496 entry->flags |= eb->context_flags;
497
498 return 0;
499 }
500
501 static int
502 eb_add_vma(struct i915_execbuffer *eb,
503 unsigned int i, unsigned batch_idx,
504 struct i915_vma *vma)
505 {
506 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
507 int err;
508
509 GEM_BUG_ON(i915_vma_is_closed(vma));
510
511 if (!(eb->args->flags & __EXEC_VALIDATED)) {
512 err = eb_validate_vma(eb, entry, vma);
513 if (unlikely(err))
514 return err;
515 }
516
517 if (eb->lut_size > 0) {
518 vma->exec_handle = entry->handle;
519 hlist_add_head(&vma->exec_node,
520 &eb->buckets[hash_32(entry->handle,
521 eb->lut_size)]);
522 }
523
524 if (entry->relocation_count)
525 list_add_tail(&vma->reloc_link, &eb->relocs);
526
527 /*
528 * Stash a pointer from the vma to execobj, so we can query its flags,
529 * size, alignment etc as provided by the user. Also we stash a pointer
530 * to the vma inside the execobj so that we can use a direct lookup
531 * to find the right target VMA when doing relocations.
532 */
533 eb->vma[i] = vma;
534 eb->flags[i] = entry->flags;
535 vma->exec_flags = &eb->flags[i];
536
537 /*
538 * SNA is doing fancy tricks with compressing batch buffers, which leads
539 * to negative relocation deltas. Usually that works out ok since the
540 * relocate address is still positive, except when the batch is placed
541 * very low in the GTT. Ensure this doesn't happen.
542 *
543 * Note that actual hangs have only been observed on gen7, but for
544 * paranoia do it everywhere.
545 */
546 if (i == batch_idx) {
547 if (entry->relocation_count &&
548 !(eb->flags[i] & EXEC_OBJECT_PINNED))
549 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
550 if (eb->reloc_cache.has_fence)
551 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
552
553 eb->batch = vma;
554 }
555
556 err = 0;
557 if (eb_pin_vma(eb, entry, vma)) {
558 if (entry->offset != vma->node.start) {
559 entry->offset = vma->node.start | UPDATE;
560 eb->args->flags |= __EXEC_HAS_RELOC;
561 }
562 } else {
563 eb_unreserve_vma(vma, vma->exec_flags);
564
565 list_add_tail(&vma->exec_link, &eb->unbound);
566 if (drm_mm_node_allocated(&vma->node))
567 err = i915_vma_unbind(vma);
568 if (unlikely(err))
569 vma->exec_flags = NULL;
570 }
571 return err;
572 }
573
574 static inline int use_cpu_reloc(const struct reloc_cache *cache,
575 const struct drm_i915_gem_object *obj)
576 {
577 if (!i915_gem_object_has_struct_page(obj))
578 return false;
579
580 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
581 return true;
582
583 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
584 return false;
585
586 return (cache->has_llc ||
587 obj->cache_dirty ||
588 obj->cache_level != I915_CACHE_NONE);
589 }
590
591 static int eb_reserve_vma(const struct i915_execbuffer *eb,
592 struct i915_vma *vma)
593 {
594 struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
595 unsigned int exec_flags = *vma->exec_flags;
596 u64 pin_flags;
597 int err;
598
599 pin_flags = PIN_USER | PIN_NONBLOCK;
600 if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
601 pin_flags |= PIN_GLOBAL;
602
603 /*
604 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
605 * limit address to the first 4GBs for unflagged objects.
606 */
607 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
608 pin_flags |= PIN_ZONE_4G;
609
610 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
611 pin_flags |= PIN_MAPPABLE;
612
613 if (exec_flags & EXEC_OBJECT_PINNED) {
614 pin_flags |= entry->offset | PIN_OFFSET_FIXED;
615 pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
616 } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
617 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
618 }
619
620 err = i915_vma_pin(vma,
621 entry->pad_to_size, entry->alignment,
622 pin_flags);
623 if (err)
624 return err;
625
626 if (entry->offset != vma->node.start) {
627 entry->offset = vma->node.start | UPDATE;
628 eb->args->flags |= __EXEC_HAS_RELOC;
629 }
630
631 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
632 err = i915_vma_pin_fence(vma);
633 if (unlikely(err)) {
634 i915_vma_unpin(vma);
635 return err;
636 }
637
638 if (vma->fence)
639 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
640 }
641
642 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
643 GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
644
645 return 0;
646 }
647
648 static int eb_reserve(struct i915_execbuffer *eb)
649 {
650 const unsigned int count = eb->buffer_count;
651 struct list_head last;
652 struct i915_vma *vma;
653 unsigned int i, pass;
654 int err;
655
656 /*
657 * Attempt to pin all of the buffers into the GTT.
658 * This is done in 3 phases:
659 *
660 * 1a. Unbind all objects that do not match the GTT constraints for
661 * the execbuffer (fenceable, mappable, alignment etc).
662 * 1b. Increment pin count for already bound objects.
663 * 2. Bind new objects.
664 * 3. Decrement pin count.
665 *
666 * This avoid unnecessary unbinding of later objects in order to make
667 * room for the earlier objects *unless* we need to defragment.
668 */
669
670 pass = 0;
671 err = 0;
672 do {
673 list_for_each_entry(vma, &eb->unbound, exec_link) {
674 err = eb_reserve_vma(eb, vma);
675 if (err)
676 break;
677 }
678 if (err != -ENOSPC)
679 return err;
680
681 /* Resort *all* the objects into priority order */
682 INIT_LIST_HEAD(&eb->unbound);
683 INIT_LIST_HEAD(&last);
684 for (i = 0; i < count; i++) {
685 unsigned int flags = eb->flags[i];
686 struct i915_vma *vma = eb->vma[i];
687
688 if (flags & EXEC_OBJECT_PINNED &&
689 flags & __EXEC_OBJECT_HAS_PIN)
690 continue;
691
692 eb_unreserve_vma(vma, &eb->flags[i]);
693
694 if (flags & EXEC_OBJECT_PINNED)
695 /* Pinned must have their slot */
696 list_add(&vma->exec_link, &eb->unbound);
697 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
698 /* Map require the lowest 256MiB (aperture) */
699 list_add_tail(&vma->exec_link, &eb->unbound);
700 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
701 /* Prioritise 4GiB region for restricted bo */
702 list_add(&vma->exec_link, &last);
703 else
704 list_add_tail(&vma->exec_link, &last);
705 }
706 list_splice_tail(&last, &eb->unbound);
707
708 switch (pass++) {
709 case 0:
710 break;
711
712 case 1:
713 /* Too fragmented, unbind everything and retry */
714 err = i915_gem_evict_vm(eb->vm);
715 if (err)
716 return err;
717 break;
718
719 default:
720 return -ENOSPC;
721 }
722 } while (1);
723 }
724
725 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
726 {
727 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
728 return 0;
729 else
730 return eb->buffer_count - 1;
731 }
732
733 static int eb_select_context(struct i915_execbuffer *eb)
734 {
735 struct i915_gem_context *ctx;
736
737 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
738 if (unlikely(!ctx))
739 return -ENOENT;
740
741 eb->ctx = ctx;
742 if (ctx->ppgtt) {
743 eb->vm = &ctx->ppgtt->vm;
744 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
745 } else {
746 eb->vm = &eb->i915->ggtt.vm;
747 }
748
749 eb->context_flags = 0;
750 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
751 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
752
753 return 0;
754 }
755
756 static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
757 {
758 struct i915_request *rq;
759
760 /*
761 * Completely unscientific finger-in-the-air estimates for suitable
762 * maximum user request size (to avoid blocking) and then backoff.
763 */
764 if (intel_ring_update_space(ring) >= PAGE_SIZE)
765 return NULL;
766
767 /*
768 * Find a request that after waiting upon, there will be at least half
769 * the ring available. The hysteresis allows us to compete for the
770 * shared ring and should mean that we sleep less often prior to
771 * claiming our resources, but not so long that the ring completely
772 * drains before we can submit our next request.
773 */
774 list_for_each_entry(rq, &ring->request_list, ring_link) {
775 if (__intel_ring_space(rq->postfix,
776 ring->emit, ring->size) > ring->size / 2)
777 break;
778 }
779 if (&rq->ring_link == &ring->request_list)
780 return NULL; /* weird, we will check again later for real */
781
782 return i915_request_get(rq);
783 }
784
785 static int eb_wait_for_ring(const struct i915_execbuffer *eb)
786 {
787 const struct intel_context *ce;
788 struct i915_request *rq;
789 int ret = 0;
790
791 /*
792 * Apply a light amount of backpressure to prevent excessive hogs
793 * from blocking waiting for space whilst holding struct_mutex and
794 * keeping all of their resources pinned.
795 */
796
797 ce = intel_context_lookup(eb->ctx, eb->engine);
798 if (!ce || !ce->ring) /* first use, assume empty! */
799 return 0;
800
801 rq = __eb_wait_for_ring(ce->ring);
802 if (rq) {
803 mutex_unlock(&eb->i915->drm.struct_mutex);
804
805 if (i915_request_wait(rq,
806 I915_WAIT_INTERRUPTIBLE,
807 MAX_SCHEDULE_TIMEOUT) < 0)
808 ret = -EINTR;
809
810 i915_request_put(rq);
811
812 mutex_lock(&eb->i915->drm.struct_mutex);
813 }
814
815 return ret;
816 }
817
818 static int eb_lookup_vmas(struct i915_execbuffer *eb)
819 {
820 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
821 struct drm_i915_gem_object *obj;
822 unsigned int i, batch;
823 int err;
824
825 if (unlikely(i915_gem_context_is_closed(eb->ctx)))
826 return -ENOENT;
827
828 if (unlikely(i915_gem_context_is_banned(eb->ctx)))
829 return -EIO;
830
831 INIT_LIST_HEAD(&eb->relocs);
832 INIT_LIST_HEAD(&eb->unbound);
833
834 batch = eb_batch_index(eb);
835
836 for (i = 0; i < eb->buffer_count; i++) {
837 u32 handle = eb->exec[i].handle;
838 struct i915_lut_handle *lut;
839 struct i915_vma *vma;
840
841 vma = radix_tree_lookup(handles_vma, handle);
842 if (likely(vma))
843 goto add_vma;
844
845 obj = i915_gem_object_lookup(eb->file, handle);
846 if (unlikely(!obj)) {
847 err = -ENOENT;
848 goto err_vma;
849 }
850
851 vma = i915_vma_instance(obj, eb->vm, NULL);
852 if (IS_ERR(vma)) {
853 err = PTR_ERR(vma);
854 goto err_obj;
855 }
856
857 lut = i915_lut_handle_alloc();
858 if (unlikely(!lut)) {
859 err = -ENOMEM;
860 goto err_obj;
861 }
862
863 err = radix_tree_insert(handles_vma, handle, vma);
864 if (unlikely(err)) {
865 i915_lut_handle_free(lut);
866 goto err_obj;
867 }
868
869 /* transfer ref to ctx */
870 if (!vma->open_count++)
871 i915_vma_reopen(vma);
872 list_add(&lut->obj_link, &obj->lut_list);
873 list_add(&lut->ctx_link, &eb->ctx->handles_list);
874 lut->ctx = eb->ctx;
875 lut->handle = handle;
876
877 add_vma:
878 err = eb_add_vma(eb, i, batch, vma);
879 if (unlikely(err))
880 goto err_vma;
881
882 GEM_BUG_ON(vma != eb->vma[i]);
883 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
884 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
885 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
886 }
887
888 eb->args->flags |= __EXEC_VALIDATED;
889 return eb_reserve(eb);
890
891 err_obj:
892 i915_gem_object_put(obj);
893 err_vma:
894 eb->vma[i] = NULL;
895 return err;
896 }
897
898 static struct i915_vma *
899 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
900 {
901 if (eb->lut_size < 0) {
902 if (handle >= -eb->lut_size)
903 return NULL;
904 return eb->vma[handle];
905 } else {
906 struct hlist_head *head;
907 struct i915_vma *vma;
908
909 head = &eb->buckets[hash_32(handle, eb->lut_size)];
910 hlist_for_each_entry(vma, head, exec_node) {
911 if (vma->exec_handle == handle)
912 return vma;
913 }
914 return NULL;
915 }
916 }
917
918 static void eb_release_vmas(const struct i915_execbuffer *eb)
919 {
920 const unsigned int count = eb->buffer_count;
921 unsigned int i;
922
923 for (i = 0; i < count; i++) {
924 struct i915_vma *vma = eb->vma[i];
925 unsigned int flags = eb->flags[i];
926
927 if (!vma)
928 break;
929
930 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
931 vma->exec_flags = NULL;
932 eb->vma[i] = NULL;
933
934 if (flags & __EXEC_OBJECT_HAS_PIN)
935 __eb_unreserve_vma(vma, flags);
936
937 if (flags & __EXEC_OBJECT_HAS_REF)
938 i915_vma_put(vma);
939 }
940 }
941
942 static void eb_reset_vmas(const struct i915_execbuffer *eb)
943 {
944 eb_release_vmas(eb);
945 if (eb->lut_size > 0)
946 memset(eb->buckets, 0,
947 sizeof(struct hlist_head) << eb->lut_size);
948 }
949
950 static void eb_destroy(const struct i915_execbuffer *eb)
951 {
952 GEM_BUG_ON(eb->reloc_cache.rq);
953
954 if (eb->lut_size > 0)
955 kfree(eb->buckets);
956 }
957
958 static inline u64
959 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
960 const struct i915_vma *target)
961 {
962 return gen8_canonical_addr((int)reloc->delta + target->node.start);
963 }
964
965 static void reloc_cache_init(struct reloc_cache *cache,
966 struct drm_i915_private *i915)
967 {
968 cache->page = -1;
969 cache->vaddr = 0;
970 /* Must be a variable in the struct to allow GCC to unroll. */
971 cache->gen = INTEL_GEN(i915);
972 cache->has_llc = HAS_LLC(i915);
973 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
974 cache->has_fence = cache->gen < 4;
975 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
976 cache->node.allocated = false;
977 cache->rq = NULL;
978 cache->rq_size = 0;
979 }
980
981 static inline void *unmask_page(unsigned long p)
982 {
983 return (void *)(uintptr_t)(p & PAGE_MASK);
984 }
985
986 static inline unsigned int unmask_flags(unsigned long p)
987 {
988 return p & ~PAGE_MASK;
989 }
990
991 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
992
993 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
994 {
995 struct drm_i915_private *i915 =
996 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
997 return &i915->ggtt;
998 }
999
1000 static void reloc_gpu_flush(struct reloc_cache *cache)
1001 {
1002 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
1003 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
1004
1005 __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
1006 i915_gem_object_unpin_map(cache->rq->batch->obj);
1007
1008 i915_gem_chipset_flush(cache->rq->i915);
1009
1010 i915_request_add(cache->rq);
1011 cache->rq = NULL;
1012 }
1013
1014 static void reloc_cache_reset(struct reloc_cache *cache)
1015 {
1016 void *vaddr;
1017
1018 if (cache->rq)
1019 reloc_gpu_flush(cache);
1020
1021 if (!cache->vaddr)
1022 return;
1023
1024 vaddr = unmask_page(cache->vaddr);
1025 if (cache->vaddr & KMAP) {
1026 if (cache->vaddr & CLFLUSH_AFTER)
1027 mb();
1028
1029 kunmap_atomic(vaddr);
1030 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
1031 } else {
1032 wmb();
1033 io_mapping_unmap_atomic((void __iomem *)vaddr);
1034 if (cache->node.allocated) {
1035 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1036
1037 ggtt->vm.clear_range(&ggtt->vm,
1038 cache->node.start,
1039 cache->node.size);
1040 drm_mm_remove_node(&cache->node);
1041 } else {
1042 i915_vma_unpin((struct i915_vma *)cache->node.mm);
1043 }
1044 }
1045
1046 cache->vaddr = 0;
1047 cache->page = -1;
1048 }
1049
1050 static void *reloc_kmap(struct drm_i915_gem_object *obj,
1051 struct reloc_cache *cache,
1052 unsigned long page)
1053 {
1054 void *vaddr;
1055
1056 if (cache->vaddr) {
1057 kunmap_atomic(unmask_page(cache->vaddr));
1058 } else {
1059 unsigned int flushes;
1060 int err;
1061
1062 err = i915_gem_obj_prepare_shmem_write(obj, &flushes);
1063 if (err)
1064 return ERR_PTR(err);
1065
1066 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
1067 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
1068
1069 cache->vaddr = flushes | KMAP;
1070 cache->node.mm = (void *)obj;
1071 if (flushes)
1072 mb();
1073 }
1074
1075 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
1076 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1077 cache->page = page;
1078
1079 return vaddr;
1080 }
1081
1082 static void *reloc_iomap(struct drm_i915_gem_object *obj,
1083 struct reloc_cache *cache,
1084 unsigned long page)
1085 {
1086 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1087 unsigned long offset;
1088 void *vaddr;
1089
1090 if (cache->vaddr) {
1091 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1092 } else {
1093 struct i915_vma *vma;
1094 int err;
1095
1096 if (use_cpu_reloc(cache, obj))
1097 return NULL;
1098
1099 err = i915_gem_object_set_to_gtt_domain(obj, true);
1100 if (err)
1101 return ERR_PTR(err);
1102
1103 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1104 PIN_MAPPABLE |
1105 PIN_NONBLOCK |
1106 PIN_NONFAULT);
1107 if (IS_ERR(vma)) {
1108 memset(&cache->node, 0, sizeof(cache->node));
1109 err = drm_mm_insert_node_in_range
1110 (&ggtt->vm.mm, &cache->node,
1111 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1112 0, ggtt->mappable_end,
1113 DRM_MM_INSERT_LOW);
1114 if (err) /* no inactive aperture space, use cpu reloc */
1115 return NULL;
1116 } else {
1117 err = i915_vma_put_fence(vma);
1118 if (err) {
1119 i915_vma_unpin(vma);
1120 return ERR_PTR(err);
1121 }
1122
1123 cache->node.start = vma->node.start;
1124 cache->node.mm = (void *)vma;
1125 }
1126 }
1127
1128 offset = cache->node.start;
1129 if (cache->node.allocated) {
1130 wmb();
1131 ggtt->vm.insert_page(&ggtt->vm,
1132 i915_gem_object_get_dma_address(obj, page),
1133 offset, I915_CACHE_NONE, 0);
1134 } else {
1135 offset += page << PAGE_SHIFT;
1136 }
1137
1138 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1139 offset);
1140 cache->page = page;
1141 cache->vaddr = (unsigned long)vaddr;
1142
1143 return vaddr;
1144 }
1145
1146 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1147 struct reloc_cache *cache,
1148 unsigned long page)
1149 {
1150 void *vaddr;
1151
1152 if (cache->page == page) {
1153 vaddr = unmask_page(cache->vaddr);
1154 } else {
1155 vaddr = NULL;
1156 if ((cache->vaddr & KMAP) == 0)
1157 vaddr = reloc_iomap(obj, cache, page);
1158 if (!vaddr)
1159 vaddr = reloc_kmap(obj, cache, page);
1160 }
1161
1162 return vaddr;
1163 }
1164
1165 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1166 {
1167 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1168 if (flushes & CLFLUSH_BEFORE) {
1169 clflushopt(addr);
1170 mb();
1171 }
1172
1173 *addr = value;
1174
1175 /*
1176 * Writes to the same cacheline are serialised by the CPU
1177 * (including clflush). On the write path, we only require
1178 * that it hits memory in an orderly fashion and place
1179 * mb barriers at the start and end of the relocation phase
1180 * to ensure ordering of clflush wrt to the system.
1181 */
1182 if (flushes & CLFLUSH_AFTER)
1183 clflushopt(addr);
1184 } else
1185 *addr = value;
1186 }
1187
1188 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1189 struct i915_vma *vma,
1190 unsigned int len)
1191 {
1192 struct reloc_cache *cache = &eb->reloc_cache;
1193 struct drm_i915_gem_object *obj;
1194 struct i915_request *rq;
1195 struct i915_vma *batch;
1196 u32 *cmd;
1197 int err;
1198
1199 if (DBG_FORCE_RELOC == FORCE_GPU_RELOC) {
1200 obj = vma->obj;
1201 if (obj->cache_dirty & ~obj->cache_coherent)
1202 i915_gem_clflush_object(obj, 0);
1203 obj->write_domain = 0;
1204 }
1205
1206 GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU);
1207
1208 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
1209 if (IS_ERR(obj))
1210 return PTR_ERR(obj);
1211
1212 cmd = i915_gem_object_pin_map(obj,
1213 cache->has_llc ?
1214 I915_MAP_FORCE_WB :
1215 I915_MAP_FORCE_WC);
1216 i915_gem_object_unpin_pages(obj);
1217 if (IS_ERR(cmd))
1218 return PTR_ERR(cmd);
1219
1220 batch = i915_vma_instance(obj, vma->vm, NULL);
1221 if (IS_ERR(batch)) {
1222 err = PTR_ERR(batch);
1223 goto err_unmap;
1224 }
1225
1226 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1227 if (err)
1228 goto err_unmap;
1229
1230 rq = i915_request_alloc(eb->engine, eb->ctx);
1231 if (IS_ERR(rq)) {
1232 err = PTR_ERR(rq);
1233 goto err_unpin;
1234 }
1235
1236 err = i915_request_await_object(rq, vma->obj, true);
1237 if (err)
1238 goto err_request;
1239
1240 err = eb->engine->emit_bb_start(rq,
1241 batch->node.start, PAGE_SIZE,
1242 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1243 if (err)
1244 goto err_request;
1245
1246 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
1247 err = i915_vma_move_to_active(batch, rq, 0);
1248 if (err)
1249 goto skip_request;
1250
1251 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1252 if (err)
1253 goto skip_request;
1254
1255 rq->batch = batch;
1256 i915_vma_unpin(batch);
1257
1258 cache->rq = rq;
1259 cache->rq_cmd = cmd;
1260 cache->rq_size = 0;
1261
1262 /* Return with batch mapping (cmd) still pinned */
1263 return 0;
1264
1265 skip_request:
1266 i915_request_skip(rq, err);
1267 err_request:
1268 i915_request_add(rq);
1269 err_unpin:
1270 i915_vma_unpin(batch);
1271 err_unmap:
1272 i915_gem_object_unpin_map(obj);
1273 return err;
1274 }
1275
1276 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1277 struct i915_vma *vma,
1278 unsigned int len)
1279 {
1280 struct reloc_cache *cache = &eb->reloc_cache;
1281 u32 *cmd;
1282
1283 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1284 reloc_gpu_flush(cache);
1285
1286 if (unlikely(!cache->rq)) {
1287 int err;
1288
1289 /* If we need to copy for the cmdparser, we will stall anyway */
1290 if (eb_use_cmdparser(eb))
1291 return ERR_PTR(-EWOULDBLOCK);
1292
1293 if (!intel_engine_can_store_dword(eb->engine))
1294 return ERR_PTR(-ENODEV);
1295
1296 err = __reloc_gpu_alloc(eb, vma, len);
1297 if (unlikely(err))
1298 return ERR_PTR(err);
1299 }
1300
1301 cmd = cache->rq_cmd + cache->rq_size;
1302 cache->rq_size += len;
1303
1304 return cmd;
1305 }
1306
1307 static u64
1308 relocate_entry(struct i915_vma *vma,
1309 const struct drm_i915_gem_relocation_entry *reloc,
1310 struct i915_execbuffer *eb,
1311 const struct i915_vma *target)
1312 {
1313 u64 offset = reloc->offset;
1314 u64 target_offset = relocation_target(reloc, target);
1315 bool wide = eb->reloc_cache.use_64bit_reloc;
1316 void *vaddr;
1317
1318 if (!eb->reloc_cache.vaddr &&
1319 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1320 !reservation_object_test_signaled_rcu(vma->resv, true))) {
1321 const unsigned int gen = eb->reloc_cache.gen;
1322 unsigned int len;
1323 u32 *batch;
1324 u64 addr;
1325
1326 if (wide)
1327 len = offset & 7 ? 8 : 5;
1328 else if (gen >= 4)
1329 len = 4;
1330 else
1331 len = 3;
1332
1333 batch = reloc_gpu(eb, vma, len);
1334 if (IS_ERR(batch))
1335 goto repeat;
1336
1337 addr = gen8_canonical_addr(vma->node.start + offset);
1338 if (wide) {
1339 if (offset & 7) {
1340 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1341 *batch++ = lower_32_bits(addr);
1342 *batch++ = upper_32_bits(addr);
1343 *batch++ = lower_32_bits(target_offset);
1344
1345 addr = gen8_canonical_addr(addr + 4);
1346
1347 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1348 *batch++ = lower_32_bits(addr);
1349 *batch++ = upper_32_bits(addr);
1350 *batch++ = upper_32_bits(target_offset);
1351 } else {
1352 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1353 *batch++ = lower_32_bits(addr);
1354 *batch++ = upper_32_bits(addr);
1355 *batch++ = lower_32_bits(target_offset);
1356 *batch++ = upper_32_bits(target_offset);
1357 }
1358 } else if (gen >= 6) {
1359 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1360 *batch++ = 0;
1361 *batch++ = addr;
1362 *batch++ = target_offset;
1363 } else if (gen >= 4) {
1364 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1365 *batch++ = 0;
1366 *batch++ = addr;
1367 *batch++ = target_offset;
1368 } else {
1369 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1370 *batch++ = addr;
1371 *batch++ = target_offset;
1372 }
1373
1374 goto out;
1375 }
1376
1377 repeat:
1378 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1379 if (IS_ERR(vaddr))
1380 return PTR_ERR(vaddr);
1381
1382 clflush_write32(vaddr + offset_in_page(offset),
1383 lower_32_bits(target_offset),
1384 eb->reloc_cache.vaddr);
1385
1386 if (wide) {
1387 offset += sizeof(u32);
1388 target_offset >>= 32;
1389 wide = false;
1390 goto repeat;
1391 }
1392
1393 out:
1394 return target->node.start | UPDATE;
1395 }
1396
1397 static u64
1398 eb_relocate_entry(struct i915_execbuffer *eb,
1399 struct i915_vma *vma,
1400 const struct drm_i915_gem_relocation_entry *reloc)
1401 {
1402 struct i915_vma *target;
1403 int err;
1404
1405 /* we've already hold a reference to all valid objects */
1406 target = eb_get_vma(eb, reloc->target_handle);
1407 if (unlikely(!target))
1408 return -ENOENT;
1409
1410 /* Validate that the target is in a valid r/w GPU domain */
1411 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1412 DRM_DEBUG("reloc with multiple write domains: "
1413 "target %d offset %d "
1414 "read %08x write %08x",
1415 reloc->target_handle,
1416 (int) reloc->offset,
1417 reloc->read_domains,
1418 reloc->write_domain);
1419 return -EINVAL;
1420 }
1421 if (unlikely((reloc->write_domain | reloc->read_domains)
1422 & ~I915_GEM_GPU_DOMAINS)) {
1423 DRM_DEBUG("reloc with read/write non-GPU domains: "
1424 "target %d offset %d "
1425 "read %08x write %08x",
1426 reloc->target_handle,
1427 (int) reloc->offset,
1428 reloc->read_domains,
1429 reloc->write_domain);
1430 return -EINVAL;
1431 }
1432
1433 if (reloc->write_domain) {
1434 *target->exec_flags |= EXEC_OBJECT_WRITE;
1435
1436 /*
1437 * Sandybridge PPGTT errata: We need a global gtt mapping
1438 * for MI and pipe_control writes because the gpu doesn't
1439 * properly redirect them through the ppgtt for non_secure
1440 * batchbuffers.
1441 */
1442 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1443 IS_GEN(eb->i915, 6)) {
1444 err = i915_vma_bind(target, target->obj->cache_level,
1445 PIN_GLOBAL);
1446 if (WARN_ONCE(err,
1447 "Unexpected failure to bind target VMA!"))
1448 return err;
1449 }
1450 }
1451
1452 /*
1453 * If the relocation already has the right value in it, no
1454 * more work needs to be done.
1455 */
1456 if (!DBG_FORCE_RELOC &&
1457 gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1458 return 0;
1459
1460 /* Check that the relocation address is valid... */
1461 if (unlikely(reloc->offset >
1462 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1463 DRM_DEBUG("Relocation beyond object bounds: "
1464 "target %d offset %d size %d.\n",
1465 reloc->target_handle,
1466 (int)reloc->offset,
1467 (int)vma->size);
1468 return -EINVAL;
1469 }
1470 if (unlikely(reloc->offset & 3)) {
1471 DRM_DEBUG("Relocation not 4-byte aligned: "
1472 "target %d offset %d.\n",
1473 reloc->target_handle,
1474 (int)reloc->offset);
1475 return -EINVAL;
1476 }
1477
1478 /*
1479 * If we write into the object, we need to force the synchronisation
1480 * barrier, either with an asynchronous clflush or if we executed the
1481 * patching using the GPU (though that should be serialised by the
1482 * timeline). To be completely sure, and since we are required to
1483 * do relocations we are already stalling, disable the user's opt
1484 * out of our synchronisation.
1485 */
1486 *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1487
1488 /* and update the user's relocation entry */
1489 return relocate_entry(vma, reloc, eb, target);
1490 }
1491
1492 static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1493 {
1494 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1495 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1496 struct drm_i915_gem_relocation_entry __user *urelocs;
1497 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1498 unsigned int remain;
1499
1500 urelocs = u64_to_user_ptr(entry->relocs_ptr);
1501 remain = entry->relocation_count;
1502 if (unlikely(remain > N_RELOC(ULONG_MAX)))
1503 return -EINVAL;
1504
1505 /*
1506 * We must check that the entire relocation array is safe
1507 * to read. However, if the array is not writable the user loses
1508 * the updated relocation values.
1509 */
1510 if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1511 return -EFAULT;
1512
1513 do {
1514 struct drm_i915_gem_relocation_entry *r = stack;
1515 unsigned int count =
1516 min_t(unsigned int, remain, ARRAY_SIZE(stack));
1517 unsigned int copied;
1518
1519 /*
1520 * This is the fast path and we cannot handle a pagefault
1521 * whilst holding the struct mutex lest the user pass in the
1522 * relocations contained within a mmaped bo. For in such a case
1523 * we, the page fault handler would call i915_gem_fault() and
1524 * we would try to acquire the struct mutex again. Obviously
1525 * this is bad and so lockdep complains vehemently.
1526 */
1527 pagefault_disable();
1528 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1529 pagefault_enable();
1530 if (unlikely(copied)) {
1531 remain = -EFAULT;
1532 goto out;
1533 }
1534
1535 remain -= count;
1536 do {
1537 u64 offset = eb_relocate_entry(eb, vma, r);
1538
1539 if (likely(offset == 0)) {
1540 } else if ((s64)offset < 0) {
1541 remain = (int)offset;
1542 goto out;
1543 } else {
1544 /*
1545 * Note that reporting an error now
1546 * leaves everything in an inconsistent
1547 * state as we have *already* changed
1548 * the relocation value inside the
1549 * object. As we have not changed the
1550 * reloc.presumed_offset or will not
1551 * change the execobject.offset, on the
1552 * call we may not rewrite the value
1553 * inside the object, leaving it
1554 * dangling and causing a GPU hang. Unless
1555 * userspace dynamically rebuilds the
1556 * relocations on each execbuf rather than
1557 * presume a static tree.
1558 *
1559 * We did previously check if the relocations
1560 * were writable (access_ok), an error now
1561 * would be a strange race with mprotect,
1562 * having already demonstrated that we
1563 * can read from this userspace address.
1564 */
1565 offset = gen8_canonical_addr(offset & ~UPDATE);
1566 if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
1567 remain = -EFAULT;
1568 goto out;
1569 }
1570 }
1571 } while (r++, --count);
1572 urelocs += ARRAY_SIZE(stack);
1573 } while (remain);
1574 out:
1575 reloc_cache_reset(&eb->reloc_cache);
1576 return remain;
1577 }
1578
1579 static int
1580 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1581 {
1582 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1583 struct drm_i915_gem_relocation_entry *relocs =
1584 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1585 unsigned int i;
1586 int err;
1587
1588 for (i = 0; i < entry->relocation_count; i++) {
1589 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1590
1591 if ((s64)offset < 0) {
1592 err = (int)offset;
1593 goto err;
1594 }
1595 }
1596 err = 0;
1597 err:
1598 reloc_cache_reset(&eb->reloc_cache);
1599 return err;
1600 }
1601
1602 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1603 {
1604 const char __user *addr, *end;
1605 unsigned long size;
1606 char __maybe_unused c;
1607
1608 size = entry->relocation_count;
1609 if (size == 0)
1610 return 0;
1611
1612 if (size > N_RELOC(ULONG_MAX))
1613 return -EINVAL;
1614
1615 addr = u64_to_user_ptr(entry->relocs_ptr);
1616 size *= sizeof(struct drm_i915_gem_relocation_entry);
1617 if (!access_ok(addr, size))
1618 return -EFAULT;
1619
1620 end = addr + size;
1621 for (; addr < end; addr += PAGE_SIZE) {
1622 int err = __get_user(c, addr);
1623 if (err)
1624 return err;
1625 }
1626 return __get_user(c, end - 1);
1627 }
1628
1629 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1630 {
1631 const unsigned int count = eb->buffer_count;
1632 unsigned int i;
1633 int err;
1634
1635 for (i = 0; i < count; i++) {
1636 const unsigned int nreloc = eb->exec[i].relocation_count;
1637 struct drm_i915_gem_relocation_entry __user *urelocs;
1638 struct drm_i915_gem_relocation_entry *relocs;
1639 unsigned long size;
1640 unsigned long copied;
1641
1642 if (nreloc == 0)
1643 continue;
1644
1645 err = check_relocations(&eb->exec[i]);
1646 if (err)
1647 goto err;
1648
1649 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1650 size = nreloc * sizeof(*relocs);
1651
1652 relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1653 if (!relocs) {
1654 err = -ENOMEM;
1655 goto err;
1656 }
1657
1658 /* copy_from_user is limited to < 4GiB */
1659 copied = 0;
1660 do {
1661 unsigned int len =
1662 min_t(u64, BIT_ULL(31), size - copied);
1663
1664 if (__copy_from_user((char *)relocs + copied,
1665 (char __user *)urelocs + copied,
1666 len)) {
1667 end_user:
1668 user_access_end();
1669 end:
1670 kvfree(relocs);
1671 err = -EFAULT;
1672 goto err;
1673 }
1674
1675 copied += len;
1676 } while (copied < size);
1677
1678 /*
1679 * As we do not update the known relocation offsets after
1680 * relocating (due to the complexities in lock handling),
1681 * we need to mark them as invalid now so that we force the
1682 * relocation processing next time. Just in case the target
1683 * object is evicted and then rebound into its old
1684 * presumed_offset before the next execbuffer - if that
1685 * happened we would make the mistake of assuming that the
1686 * relocations were valid.
1687 */
1688 if (!user_access_begin(urelocs, size))
1689 goto end;
1690
1691 for (copied = 0; copied < nreloc; copied++)
1692 unsafe_put_user(-1,
1693 &urelocs[copied].presumed_offset,
1694 end_user);
1695 user_access_end();
1696
1697 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1698 }
1699
1700 return 0;
1701
1702 err:
1703 while (i--) {
1704 struct drm_i915_gem_relocation_entry *relocs =
1705 u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1706 if (eb->exec[i].relocation_count)
1707 kvfree(relocs);
1708 }
1709 return err;
1710 }
1711
1712 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1713 {
1714 const unsigned int count = eb->buffer_count;
1715 unsigned int i;
1716
1717 if (unlikely(i915_modparams.prefault_disable))
1718 return 0;
1719
1720 for (i = 0; i < count; i++) {
1721 int err;
1722
1723 err = check_relocations(&eb->exec[i]);
1724 if (err)
1725 return err;
1726 }
1727
1728 return 0;
1729 }
1730
1731 static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1732 {
1733 struct drm_device *dev = &eb->i915->drm;
1734 bool have_copy = false;
1735 struct i915_vma *vma;
1736 int err = 0;
1737
1738 repeat:
1739 if (signal_pending(current)) {
1740 err = -ERESTARTSYS;
1741 goto out;
1742 }
1743
1744 /* We may process another execbuffer during the unlock... */
1745 eb_reset_vmas(eb);
1746 mutex_unlock(&dev->struct_mutex);
1747
1748 /*
1749 * We take 3 passes through the slowpatch.
1750 *
1751 * 1 - we try to just prefault all the user relocation entries and
1752 * then attempt to reuse the atomic pagefault disabled fast path again.
1753 *
1754 * 2 - we copy the user entries to a local buffer here outside of the
1755 * local and allow ourselves to wait upon any rendering before
1756 * relocations
1757 *
1758 * 3 - we already have a local copy of the relocation entries, but
1759 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1760 */
1761 if (!err) {
1762 err = eb_prefault_relocations(eb);
1763 } else if (!have_copy) {
1764 err = eb_copy_relocations(eb);
1765 have_copy = err == 0;
1766 } else {
1767 cond_resched();
1768 err = 0;
1769 }
1770 if (err) {
1771 mutex_lock(&dev->struct_mutex);
1772 goto out;
1773 }
1774
1775 /* A frequent cause for EAGAIN are currently unavailable client pages */
1776 flush_workqueue(eb->i915->mm.userptr_wq);
1777
1778 err = i915_mutex_lock_interruptible(dev);
1779 if (err) {
1780 mutex_lock(&dev->struct_mutex);
1781 goto out;
1782 }
1783
1784 /* reacquire the objects */
1785 err = eb_lookup_vmas(eb);
1786 if (err)
1787 goto err;
1788
1789 GEM_BUG_ON(!eb->batch);
1790
1791 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1792 if (!have_copy) {
1793 pagefault_disable();
1794 err = eb_relocate_vma(eb, vma);
1795 pagefault_enable();
1796 if (err)
1797 goto repeat;
1798 } else {
1799 err = eb_relocate_vma_slow(eb, vma);
1800 if (err)
1801 goto err;
1802 }
1803 }
1804
1805 /*
1806 * Leave the user relocations as are, this is the painfully slow path,
1807 * and we want to avoid the complication of dropping the lock whilst
1808 * having buffers reserved in the aperture and so causing spurious
1809 * ENOSPC for random operations.
1810 */
1811
1812 err:
1813 if (err == -EAGAIN)
1814 goto repeat;
1815
1816 out:
1817 if (have_copy) {
1818 const unsigned int count = eb->buffer_count;
1819 unsigned int i;
1820
1821 for (i = 0; i < count; i++) {
1822 const struct drm_i915_gem_exec_object2 *entry =
1823 &eb->exec[i];
1824 struct drm_i915_gem_relocation_entry *relocs;
1825
1826 if (!entry->relocation_count)
1827 continue;
1828
1829 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1830 kvfree(relocs);
1831 }
1832 }
1833
1834 return err;
1835 }
1836
1837 static int eb_relocate(struct i915_execbuffer *eb)
1838 {
1839 if (eb_lookup_vmas(eb))
1840 goto slow;
1841
1842 /* The objects are in their final locations, apply the relocations. */
1843 if (eb->args->flags & __EXEC_HAS_RELOC) {
1844 struct i915_vma *vma;
1845
1846 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1847 if (eb_relocate_vma(eb, vma))
1848 goto slow;
1849 }
1850 }
1851
1852 return 0;
1853
1854 slow:
1855 return eb_relocate_slow(eb);
1856 }
1857
1858 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1859 {
1860 const unsigned int count = eb->buffer_count;
1861 unsigned int i;
1862 int err;
1863
1864 for (i = 0; i < count; i++) {
1865 unsigned int flags = eb->flags[i];
1866 struct i915_vma *vma = eb->vma[i];
1867 struct drm_i915_gem_object *obj = vma->obj;
1868
1869 if (flags & EXEC_OBJECT_CAPTURE) {
1870 struct i915_capture_list *capture;
1871
1872 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1873 if (unlikely(!capture))
1874 return -ENOMEM;
1875
1876 capture->next = eb->request->capture_list;
1877 capture->vma = eb->vma[i];
1878 eb->request->capture_list = capture;
1879 }
1880
1881 /*
1882 * If the GPU is not _reading_ through the CPU cache, we need
1883 * to make sure that any writes (both previous GPU writes from
1884 * before a change in snooping levels and normal CPU writes)
1885 * caught in that cache are flushed to main memory.
1886 *
1887 * We want to say
1888 * obj->cache_dirty &&
1889 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1890 * but gcc's optimiser doesn't handle that as well and emits
1891 * two jumps instead of one. Maybe one day...
1892 */
1893 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1894 if (i915_gem_clflush_object(obj, 0))
1895 flags &= ~EXEC_OBJECT_ASYNC;
1896 }
1897
1898 if (flags & EXEC_OBJECT_ASYNC)
1899 continue;
1900
1901 err = i915_request_await_object
1902 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
1903 if (err)
1904 return err;
1905 }
1906
1907 for (i = 0; i < count; i++) {
1908 unsigned int flags = eb->flags[i];
1909 struct i915_vma *vma = eb->vma[i];
1910
1911 err = i915_vma_move_to_active(vma, eb->request, flags);
1912 if (unlikely(err)) {
1913 i915_request_skip(eb->request, err);
1914 return err;
1915 }
1916
1917 __eb_unreserve_vma(vma, flags);
1918 vma->exec_flags = NULL;
1919
1920 if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1921 i915_vma_put(vma);
1922 }
1923 eb->exec = NULL;
1924
1925 /* Unconditionally flush any chipset caches (for streaming writes). */
1926 i915_gem_chipset_flush(eb->i915);
1927
1928 return 0;
1929 }
1930
1931 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1932 {
1933 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1934 return false;
1935
1936 /* Kernel clipping was a DRI1 misfeature */
1937 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1938 if (exec->num_cliprects || exec->cliprects_ptr)
1939 return false;
1940 }
1941
1942 if (exec->DR4 == 0xffffffff) {
1943 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1944 exec->DR4 = 0;
1945 }
1946 if (exec->DR1 || exec->DR4)
1947 return false;
1948
1949 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1950 return false;
1951
1952 return true;
1953 }
1954
1955 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1956 {
1957 u32 *cs;
1958 int i;
1959
1960 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1961 DRM_DEBUG("sol reset is gen7/rcs only\n");
1962 return -EINVAL;
1963 }
1964
1965 cs = intel_ring_begin(rq, 4 * 2 + 2);
1966 if (IS_ERR(cs))
1967 return PTR_ERR(cs);
1968
1969 *cs++ = MI_LOAD_REGISTER_IMM(4);
1970 for (i = 0; i < 4; i++) {
1971 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1972 *cs++ = 0;
1973 }
1974 *cs++ = MI_NOOP;
1975 intel_ring_advance(rq, cs);
1976
1977 return 0;
1978 }
1979
1980 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1981 {
1982 struct drm_i915_gem_object *shadow_batch_obj;
1983 struct i915_vma *vma;
1984 int err;
1985
1986 shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
1987 PAGE_ALIGN(eb->batch_len));
1988 if (IS_ERR(shadow_batch_obj))
1989 return ERR_CAST(shadow_batch_obj);
1990
1991 err = intel_engine_cmd_parser(eb->engine,
1992 eb->batch->obj,
1993 shadow_batch_obj,
1994 eb->batch_start_offset,
1995 eb->batch_len,
1996 is_master);
1997 if (err) {
1998 if (err == -EACCES) /* unhandled chained batch */
1999 vma = NULL;
2000 else
2001 vma = ERR_PTR(err);
2002 goto out;
2003 }
2004
2005 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
2006 if (IS_ERR(vma))
2007 goto out;
2008
2009 eb->vma[eb->buffer_count] = i915_vma_get(vma);
2010 eb->flags[eb->buffer_count] =
2011 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
2012 vma->exec_flags = &eb->flags[eb->buffer_count];
2013 eb->buffer_count++;
2014
2015 out:
2016 i915_gem_object_unpin_pages(shadow_batch_obj);
2017 return vma;
2018 }
2019
2020 static void
2021 add_to_client(struct i915_request *rq, struct drm_file *file)
2022 {
2023 rq->file_priv = file->driver_priv;
2024 list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
2025 }
2026
2027 static int eb_submit(struct i915_execbuffer *eb)
2028 {
2029 int err;
2030
2031 err = eb_move_to_gpu(eb);
2032 if (err)
2033 return err;
2034
2035 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2036 err = i915_reset_gen7_sol_offsets(eb->request);
2037 if (err)
2038 return err;
2039 }
2040
2041 /*
2042 * After we completed waiting for other engines (using HW semaphores)
2043 * then we can signal that this request/batch is ready to run. This
2044 * allows us to determine if the batch is still waiting on the GPU
2045 * or actually running by checking the breadcrumb.
2046 */
2047 if (eb->engine->emit_init_breadcrumb) {
2048 err = eb->engine->emit_init_breadcrumb(eb->request);
2049 if (err)
2050 return err;
2051 }
2052
2053 err = eb->engine->emit_bb_start(eb->request,
2054 eb->batch->node.start +
2055 eb->batch_start_offset,
2056 eb->batch_len,
2057 eb->batch_flags);
2058 if (err)
2059 return err;
2060
2061 return 0;
2062 }
2063
2064 /*
2065 * Find one BSD ring to dispatch the corresponding BSD command.
2066 * The engine index is returned.
2067 */
2068 static unsigned int
2069 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
2070 struct drm_file *file)
2071 {
2072 struct drm_i915_file_private *file_priv = file->driver_priv;
2073
2074 /* Check whether the file_priv has already selected one ring. */
2075 if ((int)file_priv->bsd_engine < 0)
2076 file_priv->bsd_engine = atomic_fetch_xor(1,
2077 &dev_priv->mm.bsd_engine_dispatch_index);
2078
2079 return file_priv->bsd_engine;
2080 }
2081
2082 #define I915_USER_RINGS (4)
2083
2084 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
2085 [I915_EXEC_DEFAULT] = RCS0,
2086 [I915_EXEC_RENDER] = RCS0,
2087 [I915_EXEC_BLT] = BCS0,
2088 [I915_EXEC_BSD] = VCS0,
2089 [I915_EXEC_VEBOX] = VECS0
2090 };
2091
2092 static struct intel_engine_cs *
2093 eb_select_engine(struct drm_i915_private *dev_priv,
2094 struct drm_file *file,
2095 struct drm_i915_gem_execbuffer2 *args)
2096 {
2097 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2098 struct intel_engine_cs *engine;
2099
2100 if (user_ring_id > I915_USER_RINGS) {
2101 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2102 return NULL;
2103 }
2104
2105 if ((user_ring_id != I915_EXEC_BSD) &&
2106 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
2107 DRM_DEBUG("execbuf with non bsd ring but with invalid "
2108 "bsd dispatch flags: %d\n", (int)(args->flags));
2109 return NULL;
2110 }
2111
2112 if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
2113 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2114
2115 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2116 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
2117 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2118 bsd_idx <= I915_EXEC_BSD_RING2) {
2119 bsd_idx >>= I915_EXEC_BSD_SHIFT;
2120 bsd_idx--;
2121 } else {
2122 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
2123 bsd_idx);
2124 return NULL;
2125 }
2126
2127 engine = dev_priv->engine[_VCS(bsd_idx)];
2128 } else {
2129 engine = dev_priv->engine[user_ring_map[user_ring_id]];
2130 }
2131
2132 if (!engine) {
2133 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
2134 return NULL;
2135 }
2136
2137 return engine;
2138 }
2139
2140 static void
2141 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2142 {
2143 while (n--)
2144 drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2145 kvfree(fences);
2146 }
2147
2148 static struct drm_syncobj **
2149 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2150 struct drm_file *file)
2151 {
2152 const unsigned long nfences = args->num_cliprects;
2153 struct drm_i915_gem_exec_fence __user *user;
2154 struct drm_syncobj **fences;
2155 unsigned long n;
2156 int err;
2157
2158 if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2159 return NULL;
2160
2161 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2162 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2163 if (nfences > min_t(unsigned long,
2164 ULONG_MAX / sizeof(*user),
2165 SIZE_MAX / sizeof(*fences)))
2166 return ERR_PTR(-EINVAL);
2167
2168 user = u64_to_user_ptr(args->cliprects_ptr);
2169 if (!access_ok(user, nfences * sizeof(*user)))
2170 return ERR_PTR(-EFAULT);
2171
2172 fences = kvmalloc_array(nfences, sizeof(*fences),
2173 __GFP_NOWARN | GFP_KERNEL);
2174 if (!fences)
2175 return ERR_PTR(-ENOMEM);
2176
2177 for (n = 0; n < nfences; n++) {
2178 struct drm_i915_gem_exec_fence fence;
2179 struct drm_syncobj *syncobj;
2180
2181 if (__copy_from_user(&fence, user++, sizeof(fence))) {
2182 err = -EFAULT;
2183 goto err;
2184 }
2185
2186 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2187 err = -EINVAL;
2188 goto err;
2189 }
2190
2191 syncobj = drm_syncobj_find(file, fence.handle);
2192 if (!syncobj) {
2193 DRM_DEBUG("Invalid syncobj handle provided\n");
2194 err = -ENOENT;
2195 goto err;
2196 }
2197
2198 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2199 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2200
2201 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2202 }
2203
2204 return fences;
2205
2206 err:
2207 __free_fence_array(fences, n);
2208 return ERR_PTR(err);
2209 }
2210
2211 static void
2212 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2213 struct drm_syncobj **fences)
2214 {
2215 if (fences)
2216 __free_fence_array(fences, args->num_cliprects);
2217 }
2218
2219 static int
2220 await_fence_array(struct i915_execbuffer *eb,
2221 struct drm_syncobj **fences)
2222 {
2223 const unsigned int nfences = eb->args->num_cliprects;
2224 unsigned int n;
2225 int err;
2226
2227 for (n = 0; n < nfences; n++) {
2228 struct drm_syncobj *syncobj;
2229 struct dma_fence *fence;
2230 unsigned int flags;
2231
2232 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2233 if (!(flags & I915_EXEC_FENCE_WAIT))
2234 continue;
2235
2236 fence = drm_syncobj_fence_get(syncobj);
2237 if (!fence)
2238 return -EINVAL;
2239
2240 err = i915_request_await_dma_fence(eb->request, fence);
2241 dma_fence_put(fence);
2242 if (err < 0)
2243 return err;
2244 }
2245
2246 return 0;
2247 }
2248
2249 static void
2250 signal_fence_array(struct i915_execbuffer *eb,
2251 struct drm_syncobj **fences)
2252 {
2253 const unsigned int nfences = eb->args->num_cliprects;
2254 struct dma_fence * const fence = &eb->request->fence;
2255 unsigned int n;
2256
2257 for (n = 0; n < nfences; n++) {
2258 struct drm_syncobj *syncobj;
2259 unsigned int flags;
2260
2261 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2262 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2263 continue;
2264
2265 drm_syncobj_replace_fence(syncobj, fence);
2266 }
2267 }
2268
2269 static int
2270 i915_gem_do_execbuffer(struct drm_device *dev,
2271 struct drm_file *file,
2272 struct drm_i915_gem_execbuffer2 *args,
2273 struct drm_i915_gem_exec_object2 *exec,
2274 struct drm_syncobj **fences)
2275 {
2276 struct i915_execbuffer eb;
2277 struct dma_fence *in_fence = NULL;
2278 struct sync_file *out_fence = NULL;
2279 intel_wakeref_t wakeref;
2280 int out_fence_fd = -1;
2281 int err;
2282
2283 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2284 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2285 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2286
2287 eb.i915 = to_i915(dev);
2288 eb.file = file;
2289 eb.args = args;
2290 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2291 args->flags |= __EXEC_HAS_RELOC;
2292
2293 eb.exec = exec;
2294 eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
2295 eb.vma[0] = NULL;
2296 eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
2297
2298 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2299 reloc_cache_init(&eb.reloc_cache, eb.i915);
2300
2301 eb.buffer_count = args->buffer_count;
2302 eb.batch_start_offset = args->batch_start_offset;
2303 eb.batch_len = args->batch_len;
2304
2305 eb.batch_flags = 0;
2306 if (args->flags & I915_EXEC_SECURE) {
2307 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2308 return -EPERM;
2309
2310 eb.batch_flags |= I915_DISPATCH_SECURE;
2311 }
2312 if (args->flags & I915_EXEC_IS_PINNED)
2313 eb.batch_flags |= I915_DISPATCH_PINNED;
2314
2315 if (args->flags & I915_EXEC_FENCE_IN) {
2316 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2317 if (!in_fence)
2318 return -EINVAL;
2319 }
2320
2321 if (args->flags & I915_EXEC_FENCE_OUT) {
2322 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2323 if (out_fence_fd < 0) {
2324 err = out_fence_fd;
2325 goto err_in_fence;
2326 }
2327 }
2328
2329 err = eb_create(&eb);
2330 if (err)
2331 goto err_out_fence;
2332
2333 GEM_BUG_ON(!eb.lut_size);
2334
2335 err = eb_select_context(&eb);
2336 if (unlikely(err))
2337 goto err_destroy;
2338
2339 eb.engine = eb_select_engine(eb.i915, file, args);
2340 if (!eb.engine) {
2341 err = -EINVAL;
2342 goto err_engine;
2343 }
2344
2345 /*
2346 * Take a local wakeref for preparing to dispatch the execbuf as
2347 * we expect to access the hardware fairly frequently in the
2348 * process. Upon first dispatch, we acquire another prolonged
2349 * wakeref that we hold until the GPU has been idle for at least
2350 * 100ms.
2351 */
2352 wakeref = intel_runtime_pm_get(eb.i915);
2353
2354 err = i915_mutex_lock_interruptible(dev);
2355 if (err)
2356 goto err_rpm;
2357
2358 err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
2359 if (unlikely(err))
2360 goto err_unlock;
2361
2362 err = eb_relocate(&eb);
2363 if (err) {
2364 /*
2365 * If the user expects the execobject.offset and
2366 * reloc.presumed_offset to be an exact match,
2367 * as for using NO_RELOC, then we cannot update
2368 * the execobject.offset until we have completed
2369 * relocation.
2370 */
2371 args->flags &= ~__EXEC_HAS_RELOC;
2372 goto err_vma;
2373 }
2374
2375 if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2376 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2377 err = -EINVAL;
2378 goto err_vma;
2379 }
2380 if (eb.batch_start_offset > eb.batch->size ||
2381 eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2382 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2383 err = -EINVAL;
2384 goto err_vma;
2385 }
2386
2387 if (eb_use_cmdparser(&eb)) {
2388 struct i915_vma *vma;
2389
2390 vma = eb_parse(&eb, drm_is_current_master(file));
2391 if (IS_ERR(vma)) {
2392 err = PTR_ERR(vma);
2393 goto err_vma;
2394 }
2395
2396 if (vma) {
2397 /*
2398 * Batch parsed and accepted:
2399 *
2400 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2401 * bit from MI_BATCH_BUFFER_START commands issued in
2402 * the dispatch_execbuffer implementations. We
2403 * specifically don't want that set on batches the
2404 * command parser has accepted.
2405 */
2406 eb.batch_flags |= I915_DISPATCH_SECURE;
2407 eb.batch_start_offset = 0;
2408 eb.batch = vma;
2409 }
2410 }
2411
2412 if (eb.batch_len == 0)
2413 eb.batch_len = eb.batch->size - eb.batch_start_offset;
2414
2415 /*
2416 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2417 * batch" bit. Hence we need to pin secure batches into the global gtt.
2418 * hsw should have this fixed, but bdw mucks it up again. */
2419 if (eb.batch_flags & I915_DISPATCH_SECURE) {
2420 struct i915_vma *vma;
2421
2422 /*
2423 * So on first glance it looks freaky that we pin the batch here
2424 * outside of the reservation loop. But:
2425 * - The batch is already pinned into the relevant ppgtt, so we
2426 * already have the backing storage fully allocated.
2427 * - No other BO uses the global gtt (well contexts, but meh),
2428 * so we don't really have issues with multiple objects not
2429 * fitting due to fragmentation.
2430 * So this is actually safe.
2431 */
2432 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
2433 if (IS_ERR(vma)) {
2434 err = PTR_ERR(vma);
2435 goto err_vma;
2436 }
2437
2438 eb.batch = vma;
2439 }
2440
2441 /* All GPU relocation batches must be submitted prior to the user rq */
2442 GEM_BUG_ON(eb.reloc_cache.rq);
2443
2444 /* Allocate a request for this batch buffer nice and early. */
2445 eb.request = i915_request_alloc(eb.engine, eb.ctx);
2446 if (IS_ERR(eb.request)) {
2447 err = PTR_ERR(eb.request);
2448 goto err_batch_unpin;
2449 }
2450
2451 if (in_fence) {
2452 err = i915_request_await_dma_fence(eb.request, in_fence);
2453 if (err < 0)
2454 goto err_request;
2455 }
2456
2457 if (fences) {
2458 err = await_fence_array(&eb, fences);
2459 if (err)
2460 goto err_request;
2461 }
2462
2463 if (out_fence_fd != -1) {
2464 out_fence = sync_file_create(&eb.request->fence);
2465 if (!out_fence) {
2466 err = -ENOMEM;
2467 goto err_request;
2468 }
2469 }
2470
2471 /*
2472 * Whilst this request exists, batch_obj will be on the
2473 * active_list, and so will hold the active reference. Only when this
2474 * request is retired will the the batch_obj be moved onto the
2475 * inactive_list and lose its active reference. Hence we do not need
2476 * to explicitly hold another reference here.
2477 */
2478 eb.request->batch = eb.batch;
2479
2480 trace_i915_request_queue(eb.request, eb.batch_flags);
2481 err = eb_submit(&eb);
2482 err_request:
2483 i915_request_add(eb.request);
2484 add_to_client(eb.request, file);
2485
2486 if (fences)
2487 signal_fence_array(&eb, fences);
2488
2489 if (out_fence) {
2490 if (err == 0) {
2491 fd_install(out_fence_fd, out_fence->file);
2492 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2493 args->rsvd2 |= (u64)out_fence_fd << 32;
2494 out_fence_fd = -1;
2495 } else {
2496 fput(out_fence->file);
2497 }
2498 }
2499
2500 err_batch_unpin:
2501 if (eb.batch_flags & I915_DISPATCH_SECURE)
2502 i915_vma_unpin(eb.batch);
2503 err_vma:
2504 if (eb.exec)
2505 eb_release_vmas(&eb);
2506 err_unlock:
2507 mutex_unlock(&dev->struct_mutex);
2508 err_rpm:
2509 intel_runtime_pm_put(eb.i915, wakeref);
2510 err_engine:
2511 i915_gem_context_put(eb.ctx);
2512 err_destroy:
2513 eb_destroy(&eb);
2514 err_out_fence:
2515 if (out_fence_fd != -1)
2516 put_unused_fd(out_fence_fd);
2517 err_in_fence:
2518 dma_fence_put(in_fence);
2519 return err;
2520 }
2521
2522 static size_t eb_element_size(void)
2523 {
2524 return (sizeof(struct drm_i915_gem_exec_object2) +
2525 sizeof(struct i915_vma *) +
2526 sizeof(unsigned int));
2527 }
2528
2529 static bool check_buffer_count(size_t count)
2530 {
2531 const size_t sz = eb_element_size();
2532
2533 /*
2534 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2535 * array size (see eb_create()). Otherwise, we can accept an array as
2536 * large as can be addressed (though use large arrays at your peril)!
2537 */
2538
2539 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2540 }
2541
2542 /*
2543 * Legacy execbuffer just creates an exec2 list from the original exec object
2544 * list array and passes it to the real function.
2545 */
2546 int
2547 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2548 struct drm_file *file)
2549 {
2550 struct drm_i915_gem_execbuffer *args = data;
2551 struct drm_i915_gem_execbuffer2 exec2;
2552 struct drm_i915_gem_exec_object *exec_list = NULL;
2553 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2554 const size_t count = args->buffer_count;
2555 unsigned int i;
2556 int err;
2557
2558 if (!check_buffer_count(count)) {
2559 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2560 return -EINVAL;
2561 }
2562
2563 exec2.buffers_ptr = args->buffers_ptr;
2564 exec2.buffer_count = args->buffer_count;
2565 exec2.batch_start_offset = args->batch_start_offset;
2566 exec2.batch_len = args->batch_len;
2567 exec2.DR1 = args->DR1;
2568 exec2.DR4 = args->DR4;
2569 exec2.num_cliprects = args->num_cliprects;
2570 exec2.cliprects_ptr = args->cliprects_ptr;
2571 exec2.flags = I915_EXEC_RENDER;
2572 i915_execbuffer2_set_context_id(exec2, 0);
2573
2574 if (!i915_gem_check_execbuffer(&exec2))
2575 return -EINVAL;
2576
2577 /* Copy in the exec list from userland */
2578 exec_list = kvmalloc_array(count, sizeof(*exec_list),
2579 __GFP_NOWARN | GFP_KERNEL);
2580 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2581 __GFP_NOWARN | GFP_KERNEL);
2582 if (exec_list == NULL || exec2_list == NULL) {
2583 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2584 args->buffer_count);
2585 kvfree(exec_list);
2586 kvfree(exec2_list);
2587 return -ENOMEM;
2588 }
2589 err = copy_from_user(exec_list,
2590 u64_to_user_ptr(args->buffers_ptr),
2591 sizeof(*exec_list) * count);
2592 if (err) {
2593 DRM_DEBUG("copy %d exec entries failed %d\n",
2594 args->buffer_count, err);
2595 kvfree(exec_list);
2596 kvfree(exec2_list);
2597 return -EFAULT;
2598 }
2599
2600 for (i = 0; i < args->buffer_count; i++) {
2601 exec2_list[i].handle = exec_list[i].handle;
2602 exec2_list[i].relocation_count = exec_list[i].relocation_count;
2603 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2604 exec2_list[i].alignment = exec_list[i].alignment;
2605 exec2_list[i].offset = exec_list[i].offset;
2606 if (INTEL_GEN(to_i915(dev)) < 4)
2607 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2608 else
2609 exec2_list[i].flags = 0;
2610 }
2611
2612 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2613 if (exec2.flags & __EXEC_HAS_RELOC) {
2614 struct drm_i915_gem_exec_object __user *user_exec_list =
2615 u64_to_user_ptr(args->buffers_ptr);
2616
2617 /* Copy the new buffer offsets back to the user's exec list. */
2618 for (i = 0; i < args->buffer_count; i++) {
2619 if (!(exec2_list[i].offset & UPDATE))
2620 continue;
2621
2622 exec2_list[i].offset =
2623 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2624 exec2_list[i].offset &= PIN_OFFSET_MASK;
2625 if (__copy_to_user(&user_exec_list[i].offset,
2626 &exec2_list[i].offset,
2627 sizeof(user_exec_list[i].offset)))
2628 break;
2629 }
2630 }
2631
2632 kvfree(exec_list);
2633 kvfree(exec2_list);
2634 return err;
2635 }
2636
2637 int
2638 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2639 struct drm_file *file)
2640 {
2641 struct drm_i915_gem_execbuffer2 *args = data;
2642 struct drm_i915_gem_exec_object2 *exec2_list;
2643 struct drm_syncobj **fences = NULL;
2644 const size_t count = args->buffer_count;
2645 int err;
2646
2647 if (!check_buffer_count(count)) {
2648 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2649 return -EINVAL;
2650 }
2651
2652 if (!i915_gem_check_execbuffer(args))
2653 return -EINVAL;
2654
2655 /* Allocate an extra slot for use by the command parser */
2656 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2657 __GFP_NOWARN | GFP_KERNEL);
2658 if (exec2_list == NULL) {
2659 DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
2660 count);
2661 return -ENOMEM;
2662 }
2663 if (copy_from_user(exec2_list,
2664 u64_to_user_ptr(args->buffers_ptr),
2665 sizeof(*exec2_list) * count)) {
2666 DRM_DEBUG("copy %zd exec entries failed\n", count);
2667 kvfree(exec2_list);
2668 return -EFAULT;
2669 }
2670
2671 if (args->flags & I915_EXEC_FENCE_ARRAY) {
2672 fences = get_fence_array(args, file);
2673 if (IS_ERR(fences)) {
2674 kvfree(exec2_list);
2675 return PTR_ERR(fences);
2676 }
2677 }
2678
2679 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2680
2681 /*
2682 * Now that we have begun execution of the batchbuffer, we ignore
2683 * any new error after this point. Also given that we have already
2684 * updated the associated relocations, we try to write out the current
2685 * object locations irrespective of any error.
2686 */
2687 if (args->flags & __EXEC_HAS_RELOC) {
2688 struct drm_i915_gem_exec_object2 __user *user_exec_list =
2689 u64_to_user_ptr(args->buffers_ptr);
2690 unsigned int i;
2691
2692 /* Copy the new buffer offsets back to the user's exec list. */
2693 /*
2694 * Note: count * sizeof(*user_exec_list) does not overflow,
2695 * because we checked 'count' in check_buffer_count().
2696 *
2697 * And this range already got effectively checked earlier
2698 * when we did the "copy_from_user()" above.
2699 */
2700 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
2701 goto end;
2702
2703 for (i = 0; i < args->buffer_count; i++) {
2704 if (!(exec2_list[i].offset & UPDATE))
2705 continue;
2706
2707 exec2_list[i].offset =
2708 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2709 unsafe_put_user(exec2_list[i].offset,
2710 &user_exec_list[i].offset,
2711 end_user);
2712 }
2713 end_user:
2714 user_access_end();
2715 end:;
2716 }
2717
2718 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2719 put_fence_array(args, fences);
2720 kvfree(exec2_list);
2721 return err;
2722 }