2 * Copyright © 2008,2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
29 #include <linux/intel-iommu.h>
30 #include <linux/reservation.h>
31 #include <linux/sync_file.h>
32 #include <linux/uaccess.h>
34 #include <drm/drm_syncobj.h>
35 #include <drm/i915_drm.h>
38 #include "i915_gem_clflush.h"
39 #include "i915_trace.h"
40 #include "intel_drv.h"
41 #include "intel_frontbuffer.h"
47 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
50 #define __EXEC_OBJECT_HAS_REF BIT(31)
51 #define __EXEC_OBJECT_HAS_PIN BIT(30)
52 #define __EXEC_OBJECT_HAS_FENCE BIT(29)
53 #define __EXEC_OBJECT_NEEDS_MAP BIT(28)
54 #define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
55 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
56 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
58 #define __EXEC_HAS_RELOC BIT(31)
59 #define __EXEC_VALIDATED BIT(30)
60 #define __EXEC_INTERNAL_FLAGS (~0u << 30)
61 #define UPDATE PIN_OFFSET_FIXED
63 #define BATCH_OFFSET_BIAS (256*1024)
65 #define __I915_EXEC_ILLEGAL_FLAGS \
66 (__I915_EXEC_UNKNOWN_FLAGS | \
67 I915_EXEC_CONSTANTS_MASK | \
68 I915_EXEC_RESOURCE_STREAMER)
70 /* Catch emission of unexpected errors for CI! */
71 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
74 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
80 * DOC: User command execution
82 * Userspace submits commands to be executed on the GPU as an instruction
83 * stream within a GEM object we call a batchbuffer. This instructions may
84 * refer to other GEM objects containing auxiliary state such as kernels,
85 * samplers, render targets and even secondary batchbuffers. Userspace does
86 * not know where in the GPU memory these objects reside and so before the
87 * batchbuffer is passed to the GPU for execution, those addresses in the
88 * batchbuffer and auxiliary objects are updated. This is known as relocation,
89 * or patching. To try and avoid having to relocate each object on the next
90 * execution, userspace is told the location of those objects in this pass,
91 * but this remains just a hint as the kernel may choose a new location for
92 * any object in the future.
94 * At the level of talking to the hardware, submitting a batchbuffer for the
95 * GPU to execute is to add content to a buffer from which the HW
96 * command streamer is reading.
98 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
99 * Execlists, this command is not placed on the same buffer as the
102 * 2. Add a command to invalidate caches to the buffer.
104 * 3. Add a batchbuffer start command to the buffer; the start command is
105 * essentially a token together with the GPU address of the batchbuffer
108 * 4. Add a pipeline flush to the buffer.
110 * 5. Add a memory write command to the buffer to record when the GPU
111 * is done executing the batchbuffer. The memory write writes the
112 * global sequence number of the request, ``i915_request::global_seqno``;
113 * the i915 driver uses the current value in the register to determine
114 * if the GPU has completed the batchbuffer.
116 * 6. Add a user interrupt command to the buffer. This command instructs
117 * the GPU to issue an interrupt when the command, pipeline flush and
118 * memory write are completed.
120 * 7. Inform the hardware of the additional commands added to the buffer
121 * (by updating the tail pointer).
123 * Processing an execbuf ioctl is conceptually split up into a few phases.
125 * 1. Validation - Ensure all the pointers, handles and flags are valid.
126 * 2. Reservation - Assign GPU address space for every object
127 * 3. Relocation - Update any addresses to point to the final locations
128 * 4. Serialisation - Order the request with respect to its dependencies
129 * 5. Construction - Construct a request to execute the batchbuffer
130 * 6. Submission (at some point in the future execution)
132 * Reserving resources for the execbuf is the most complicated phase. We
133 * neither want to have to migrate the object in the address space, nor do
134 * we want to have to update any relocations pointing to this object. Ideally,
135 * we want to leave the object where it is and for all the existing relocations
136 * to match. If the object is given a new address, or if userspace thinks the
137 * object is elsewhere, we have to parse all the relocation entries and update
138 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
139 * all the target addresses in all of its objects match the value in the
140 * relocation entries and that they all match the presumed offsets given by the
141 * list of execbuffer objects. Using this knowledge, we know that if we haven't
142 * moved any buffers, all the relocation entries are valid and we can skip
143 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
144 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
146 * The addresses written in the objects must match the corresponding
147 * reloc.presumed_offset which in turn must match the corresponding
150 * Any render targets written to in the batch must be flagged with
153 * To avoid stalling, execobject.offset should match the current
154 * address of that object within the active context.
156 * The reservation is done is multiple phases. First we try and keep any
157 * object already bound in its current location - so as long as meets the
158 * constraints imposed by the new execbuffer. Any object left unbound after the
159 * first pass is then fitted into any available idle space. If an object does
160 * not fit, all objects are removed from the reservation and the process rerun
161 * after sorting the objects into a priority order (more difficult to fit
162 * objects are tried first). Failing that, the entire VM is cleared and we try
163 * to fit the execbuf once last time before concluding that it simply will not
166 * A small complication to all of this is that we allow userspace not only to
167 * specify an alignment and a size for the object in the address space, but
168 * we also allow userspace to specify the exact offset. This objects are
169 * simpler to place (the location is known a priori) all we have to do is make
170 * sure the space is available.
172 * Once all the objects are in place, patching up the buried pointers to point
173 * to the final locations is a fairly simple job of walking over the relocation
174 * entry arrays, looking up the right address and rewriting the value into
175 * the object. Simple! ... The relocation entries are stored in user memory
176 * and so to access them we have to copy them into a local buffer. That copy
177 * has to avoid taking any pagefaults as they may lead back to a GEM object
178 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
179 * the relocation into multiple passes. First we try to do everything within an
180 * atomic context (avoid the pagefaults) which requires that we never wait. If
181 * we detect that we may wait, or if we need to fault, then we have to fallback
182 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
183 * bells yet?) Dropping the mutex means that we lose all the state we have
184 * built up so far for the execbuf and we must reset any global data. However,
185 * we do leave the objects pinned in their final locations - which is a
186 * potential issue for concurrent execbufs. Once we have left the mutex, we can
187 * allocate and copy all the relocation entries into a large array at our
188 * leisure, reacquire the mutex, reclaim all the objects and other state and
189 * then proceed to update any incorrect addresses with the objects.
191 * As we process the relocation entries, we maintain a record of whether the
192 * object is being written to. Using NORELOC, we expect userspace to provide
193 * this information instead. We also check whether we can skip the relocation
194 * by comparing the expected value inside the relocation entry with the target's
195 * final address. If they differ, we have to map the current object and rewrite
196 * the 4 or 8 byte pointer within.
198 * Serialising an execbuf is quite simple according to the rules of the GEM
199 * ABI. Execution within each context is ordered by the order of submission.
200 * Writes to any GEM object are in order of submission and are exclusive. Reads
201 * from a GEM object are unordered with respect to other reads, but ordered by
202 * writes. A write submitted after a read cannot occur before the read, and
203 * similarly any read submitted after a write cannot occur before the write.
204 * Writes are ordered between engines such that only one write occurs at any
205 * time (completing any reads beforehand) - using semaphores where available
206 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
207 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
208 * reads before starting, and any read (either using set-domain or pread) must
209 * flush all GPU writes before starting. (Note we only employ a barrier before,
210 * we currently rely on userspace not concurrently starting a new execution
211 * whilst reading or writing to an object. This may be an advantage or not
212 * depending on how much you trust userspace not to shoot themselves in the
213 * foot.) Serialisation may just result in the request being inserted into
214 * a DAG awaiting its turn, but most simple is to wait on the CPU until
215 * all dependencies are resolved.
217 * After all of that, is just a matter of closing the request and handing it to
218 * the hardware (well, leaving it in a queue to be executed). However, we also
219 * offer the ability for batchbuffers to be run with elevated privileges so
220 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
221 * Before any batch is given extra privileges we first must check that it
222 * contains no nefarious instructions, we check that each instruction is from
223 * our whitelist and all registers are also from an allowed list. We first
224 * copy the user's batchbuffer to a shadow (so that the user doesn't have
225 * access to it, either by the CPU or GPU as we scan it) and then parse each
226 * instruction. If everything is ok, we set a flag telling the hardware to run
227 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
230 struct i915_execbuffer
{
231 struct drm_i915_private
*i915
; /** i915 backpointer */
232 struct drm_file
*file
; /** per-file lookup tables and limits */
233 struct drm_i915_gem_execbuffer2
*args
; /** ioctl parameters */
234 struct drm_i915_gem_exec_object2
*exec
; /** ioctl execobj[] */
235 struct i915_vma
**vma
;
238 struct intel_engine_cs
*engine
; /** engine to queue the request to */
239 struct i915_gem_context
*ctx
; /** context for building the request */
240 struct i915_address_space
*vm
; /** GTT and vma for the request */
242 struct i915_request
*request
; /** our request to build */
243 struct i915_vma
*batch
; /** identity of the batch obj/vma */
245 /** actual size of execobj[] as we may extend it for the cmdparser */
246 unsigned int buffer_count
;
248 /** list of vma not yet bound during reservation phase */
249 struct list_head unbound
;
251 /** list of vma that have execobj.relocation_count */
252 struct list_head relocs
;
255 * Track the most recently used object for relocations, as we
256 * frequently have to perform multiple relocations within the same
260 struct drm_mm_node node
; /** temporary GTT binding */
261 unsigned long vaddr
; /** Current kmap address */
262 unsigned long page
; /** Currently mapped page index */
263 unsigned int gen
; /** Cached value of INTEL_GEN */
264 bool use_64bit_reloc
: 1;
267 bool needs_unfenced
: 1;
269 struct i915_request
*rq
;
271 unsigned int rq_size
;
274 u64 invalid_flags
; /** Set of execobj.flags that are invalid */
275 u32 context_flags
; /** Set of execobj.flags to insert from the ctx */
277 u32 batch_start_offset
; /** Location within object of batch */
278 u32 batch_len
; /** Length of batch within object */
279 u32 batch_flags
; /** Flags composed for emit_bb_start() */
282 * Indicate either the size of the hastable used to resolve
283 * relocation handles, or if negative that we are using a direct
284 * index into the execobj[].
287 struct hlist_head
*buckets
; /** ht for relocation handles */
290 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
293 * Used to convert any address to canonical form.
294 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
295 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
296 * addresses to be in a canonical form:
297 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
298 * canonical form [63:48] == [47]."
300 #define GEN8_HIGH_ADDRESS_BIT 47
301 static inline u64
gen8_canonical_addr(u64 address
)
303 return sign_extend64(address
, GEN8_HIGH_ADDRESS_BIT
);
306 static inline u64
gen8_noncanonical_addr(u64 address
)
308 return address
& GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT
, 0);
311 static inline bool eb_use_cmdparser(const struct i915_execbuffer
*eb
)
313 return intel_engine_needs_cmd_parser(eb
->engine
) && eb
->batch_len
;
316 static int eb_create(struct i915_execbuffer
*eb
)
318 if (!(eb
->args
->flags
& I915_EXEC_HANDLE_LUT
)) {
319 unsigned int size
= 1 + ilog2(eb
->buffer_count
);
322 * Without a 1:1 association between relocation handles and
323 * the execobject[] index, we instead create a hashtable.
324 * We size it dynamically based on available memory, starting
325 * first with 1:1 assocative hash and scaling back until
326 * the allocation succeeds.
328 * Later on we use a positive lut_size to indicate we are
329 * using this hashtable, and a negative value to indicate a
335 /* While we can still reduce the allocation size, don't
336 * raise a warning and allow the allocation to fail.
337 * On the last pass though, we want to try as hard
338 * as possible to perform the allocation and warn
343 flags
|= __GFP_NORETRY
| __GFP_NOWARN
;
345 eb
->buckets
= kzalloc(sizeof(struct hlist_head
) << size
,
356 eb
->lut_size
= -eb
->buffer_count
;
363 eb_vma_misplaced(const struct drm_i915_gem_exec_object2
*entry
,
364 const struct i915_vma
*vma
,
367 if (vma
->node
.size
< entry
->pad_to_size
)
370 if (entry
->alignment
&& !IS_ALIGNED(vma
->node
.start
, entry
->alignment
))
373 if (flags
& EXEC_OBJECT_PINNED
&&
374 vma
->node
.start
!= entry
->offset
)
377 if (flags
& __EXEC_OBJECT_NEEDS_BIAS
&&
378 vma
->node
.start
< BATCH_OFFSET_BIAS
)
381 if (!(flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
) &&
382 (vma
->node
.start
+ vma
->node
.size
- 1) >> 32)
385 if (flags
& __EXEC_OBJECT_NEEDS_MAP
&&
386 !i915_vma_is_map_and_fenceable(vma
))
393 eb_pin_vma(struct i915_execbuffer
*eb
,
394 const struct drm_i915_gem_exec_object2
*entry
,
395 struct i915_vma
*vma
)
397 unsigned int exec_flags
= *vma
->exec_flags
;
401 pin_flags
= vma
->node
.start
;
403 pin_flags
= entry
->offset
& PIN_OFFSET_MASK
;
405 pin_flags
|= PIN_USER
| PIN_NOEVICT
| PIN_OFFSET_FIXED
;
406 if (unlikely(exec_flags
& EXEC_OBJECT_NEEDS_GTT
))
407 pin_flags
|= PIN_GLOBAL
;
409 if (unlikely(i915_vma_pin(vma
, 0, 0, pin_flags
)))
412 if (unlikely(exec_flags
& EXEC_OBJECT_NEEDS_FENCE
)) {
413 if (unlikely(i915_vma_pin_fence(vma
))) {
419 exec_flags
|= __EXEC_OBJECT_HAS_FENCE
;
422 *vma
->exec_flags
= exec_flags
| __EXEC_OBJECT_HAS_PIN
;
423 return !eb_vma_misplaced(entry
, vma
, exec_flags
);
426 static inline void __eb_unreserve_vma(struct i915_vma
*vma
, unsigned int flags
)
428 GEM_BUG_ON(!(flags
& __EXEC_OBJECT_HAS_PIN
));
430 if (unlikely(flags
& __EXEC_OBJECT_HAS_FENCE
))
431 __i915_vma_unpin_fence(vma
);
433 __i915_vma_unpin(vma
);
437 eb_unreserve_vma(struct i915_vma
*vma
, unsigned int *flags
)
439 if (!(*flags
& __EXEC_OBJECT_HAS_PIN
))
442 __eb_unreserve_vma(vma
, *flags
);
443 *flags
&= ~__EXEC_OBJECT_RESERVED
;
447 eb_validate_vma(struct i915_execbuffer
*eb
,
448 struct drm_i915_gem_exec_object2
*entry
,
449 struct i915_vma
*vma
)
451 if (unlikely(entry
->flags
& eb
->invalid_flags
))
454 if (unlikely(entry
->alignment
&& !is_power_of_2(entry
->alignment
)))
458 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
459 * any non-page-aligned or non-canonical addresses.
461 if (unlikely(entry
->flags
& EXEC_OBJECT_PINNED
&&
462 entry
->offset
!= gen8_canonical_addr(entry
->offset
& I915_GTT_PAGE_MASK
)))
465 /* pad_to_size was once a reserved field, so sanitize it */
466 if (entry
->flags
& EXEC_OBJECT_PAD_TO_SIZE
) {
467 if (unlikely(offset_in_page(entry
->pad_to_size
)))
470 entry
->pad_to_size
= 0;
473 if (unlikely(vma
->exec_flags
)) {
474 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
475 entry
->handle
, (int)(entry
- eb
->exec
));
480 * From drm_mm perspective address space is continuous,
481 * so from this point we're always using non-canonical
484 entry
->offset
= gen8_noncanonical_addr(entry
->offset
);
486 if (!eb
->reloc_cache
.has_fence
) {
487 entry
->flags
&= ~EXEC_OBJECT_NEEDS_FENCE
;
489 if ((entry
->flags
& EXEC_OBJECT_NEEDS_FENCE
||
490 eb
->reloc_cache
.needs_unfenced
) &&
491 i915_gem_object_is_tiled(vma
->obj
))
492 entry
->flags
|= EXEC_OBJECT_NEEDS_GTT
| __EXEC_OBJECT_NEEDS_MAP
;
495 if (!(entry
->flags
& EXEC_OBJECT_PINNED
))
496 entry
->flags
|= eb
->context_flags
;
502 eb_add_vma(struct i915_execbuffer
*eb
,
503 unsigned int i
, unsigned batch_idx
,
504 struct i915_vma
*vma
)
506 struct drm_i915_gem_exec_object2
*entry
= &eb
->exec
[i
];
509 GEM_BUG_ON(i915_vma_is_closed(vma
));
511 if (!(eb
->args
->flags
& __EXEC_VALIDATED
)) {
512 err
= eb_validate_vma(eb
, entry
, vma
);
517 if (eb
->lut_size
> 0) {
518 vma
->exec_handle
= entry
->handle
;
519 hlist_add_head(&vma
->exec_node
,
520 &eb
->buckets
[hash_32(entry
->handle
,
524 if (entry
->relocation_count
)
525 list_add_tail(&vma
->reloc_link
, &eb
->relocs
);
528 * Stash a pointer from the vma to execobj, so we can query its flags,
529 * size, alignment etc as provided by the user. Also we stash a pointer
530 * to the vma inside the execobj so that we can use a direct lookup
531 * to find the right target VMA when doing relocations.
534 eb
->flags
[i
] = entry
->flags
;
535 vma
->exec_flags
= &eb
->flags
[i
];
538 * SNA is doing fancy tricks with compressing batch buffers, which leads
539 * to negative relocation deltas. Usually that works out ok since the
540 * relocate address is still positive, except when the batch is placed
541 * very low in the GTT. Ensure this doesn't happen.
543 * Note that actual hangs have only been observed on gen7, but for
544 * paranoia do it everywhere.
546 if (i
== batch_idx
) {
547 if (entry
->relocation_count
&&
548 !(eb
->flags
[i
] & EXEC_OBJECT_PINNED
))
549 eb
->flags
[i
] |= __EXEC_OBJECT_NEEDS_BIAS
;
550 if (eb
->reloc_cache
.has_fence
)
551 eb
->flags
[i
] |= EXEC_OBJECT_NEEDS_FENCE
;
557 if (eb_pin_vma(eb
, entry
, vma
)) {
558 if (entry
->offset
!= vma
->node
.start
) {
559 entry
->offset
= vma
->node
.start
| UPDATE
;
560 eb
->args
->flags
|= __EXEC_HAS_RELOC
;
563 eb_unreserve_vma(vma
, vma
->exec_flags
);
565 list_add_tail(&vma
->exec_link
, &eb
->unbound
);
566 if (drm_mm_node_allocated(&vma
->node
))
567 err
= i915_vma_unbind(vma
);
569 vma
->exec_flags
= NULL
;
574 static inline int use_cpu_reloc(const struct reloc_cache
*cache
,
575 const struct drm_i915_gem_object
*obj
)
577 if (!i915_gem_object_has_struct_page(obj
))
580 if (DBG_FORCE_RELOC
== FORCE_CPU_RELOC
)
583 if (DBG_FORCE_RELOC
== FORCE_GTT_RELOC
)
586 return (cache
->has_llc
||
588 obj
->cache_level
!= I915_CACHE_NONE
);
591 static int eb_reserve_vma(const struct i915_execbuffer
*eb
,
592 struct i915_vma
*vma
)
594 struct drm_i915_gem_exec_object2
*entry
= exec_entry(eb
, vma
);
595 unsigned int exec_flags
= *vma
->exec_flags
;
599 pin_flags
= PIN_USER
| PIN_NONBLOCK
;
600 if (exec_flags
& EXEC_OBJECT_NEEDS_GTT
)
601 pin_flags
|= PIN_GLOBAL
;
604 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
605 * limit address to the first 4GBs for unflagged objects.
607 if (!(exec_flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
))
608 pin_flags
|= PIN_ZONE_4G
;
610 if (exec_flags
& __EXEC_OBJECT_NEEDS_MAP
)
611 pin_flags
|= PIN_MAPPABLE
;
613 if (exec_flags
& EXEC_OBJECT_PINNED
) {
614 pin_flags
|= entry
->offset
| PIN_OFFSET_FIXED
;
615 pin_flags
&= ~PIN_NONBLOCK
; /* force overlapping checks */
616 } else if (exec_flags
& __EXEC_OBJECT_NEEDS_BIAS
) {
617 pin_flags
|= BATCH_OFFSET_BIAS
| PIN_OFFSET_BIAS
;
620 err
= i915_vma_pin(vma
,
621 entry
->pad_to_size
, entry
->alignment
,
626 if (entry
->offset
!= vma
->node
.start
) {
627 entry
->offset
= vma
->node
.start
| UPDATE
;
628 eb
->args
->flags
|= __EXEC_HAS_RELOC
;
631 if (unlikely(exec_flags
& EXEC_OBJECT_NEEDS_FENCE
)) {
632 err
= i915_vma_pin_fence(vma
);
639 exec_flags
|= __EXEC_OBJECT_HAS_FENCE
;
642 *vma
->exec_flags
= exec_flags
| __EXEC_OBJECT_HAS_PIN
;
643 GEM_BUG_ON(eb_vma_misplaced(entry
, vma
, exec_flags
));
648 static int eb_reserve(struct i915_execbuffer
*eb
)
650 const unsigned int count
= eb
->buffer_count
;
651 struct list_head last
;
652 struct i915_vma
*vma
;
653 unsigned int i
, pass
;
657 * Attempt to pin all of the buffers into the GTT.
658 * This is done in 3 phases:
660 * 1a. Unbind all objects that do not match the GTT constraints for
661 * the execbuffer (fenceable, mappable, alignment etc).
662 * 1b. Increment pin count for already bound objects.
663 * 2. Bind new objects.
664 * 3. Decrement pin count.
666 * This avoid unnecessary unbinding of later objects in order to make
667 * room for the earlier objects *unless* we need to defragment.
673 list_for_each_entry(vma
, &eb
->unbound
, exec_link
) {
674 err
= eb_reserve_vma(eb
, vma
);
681 /* Resort *all* the objects into priority order */
682 INIT_LIST_HEAD(&eb
->unbound
);
683 INIT_LIST_HEAD(&last
);
684 for (i
= 0; i
< count
; i
++) {
685 unsigned int flags
= eb
->flags
[i
];
686 struct i915_vma
*vma
= eb
->vma
[i
];
688 if (flags
& EXEC_OBJECT_PINNED
&&
689 flags
& __EXEC_OBJECT_HAS_PIN
)
692 eb_unreserve_vma(vma
, &eb
->flags
[i
]);
694 if (flags
& EXEC_OBJECT_PINNED
)
695 /* Pinned must have their slot */
696 list_add(&vma
->exec_link
, &eb
->unbound
);
697 else if (flags
& __EXEC_OBJECT_NEEDS_MAP
)
698 /* Map require the lowest 256MiB (aperture) */
699 list_add_tail(&vma
->exec_link
, &eb
->unbound
);
700 else if (!(flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
))
701 /* Prioritise 4GiB region for restricted bo */
702 list_add(&vma
->exec_link
, &last
);
704 list_add_tail(&vma
->exec_link
, &last
);
706 list_splice_tail(&last
, &eb
->unbound
);
713 /* Too fragmented, unbind everything and retry */
714 err
= i915_gem_evict_vm(eb
->vm
);
725 static unsigned int eb_batch_index(const struct i915_execbuffer
*eb
)
727 if (eb
->args
->flags
& I915_EXEC_BATCH_FIRST
)
730 return eb
->buffer_count
- 1;
733 static int eb_select_context(struct i915_execbuffer
*eb
)
735 struct i915_gem_context
*ctx
;
737 ctx
= i915_gem_context_lookup(eb
->file
->driver_priv
, eb
->args
->rsvd1
);
743 eb
->vm
= &ctx
->ppgtt
->vm
;
744 eb
->invalid_flags
|= EXEC_OBJECT_NEEDS_GTT
;
746 eb
->vm
= &eb
->i915
->ggtt
.vm
;
749 eb
->context_flags
= 0;
750 if (test_bit(UCONTEXT_NO_ZEROMAP
, &ctx
->user_flags
))
751 eb
->context_flags
|= __EXEC_OBJECT_NEEDS_BIAS
;
756 static struct i915_request
*__eb_wait_for_ring(struct intel_ring
*ring
)
758 struct i915_request
*rq
;
761 * Completely unscientific finger-in-the-air estimates for suitable
762 * maximum user request size (to avoid blocking) and then backoff.
764 if (intel_ring_update_space(ring
) >= PAGE_SIZE
)
768 * Find a request that after waiting upon, there will be at least half
769 * the ring available. The hysteresis allows us to compete for the
770 * shared ring and should mean that we sleep less often prior to
771 * claiming our resources, but not so long that the ring completely
772 * drains before we can submit our next request.
774 list_for_each_entry(rq
, &ring
->request_list
, ring_link
) {
775 if (__intel_ring_space(rq
->postfix
,
776 ring
->emit
, ring
->size
) > ring
->size
/ 2)
779 if (&rq
->ring_link
== &ring
->request_list
)
780 return NULL
; /* weird, we will check again later for real */
782 return i915_request_get(rq
);
785 static int eb_wait_for_ring(const struct i915_execbuffer
*eb
)
787 const struct intel_context
*ce
;
788 struct i915_request
*rq
;
792 * Apply a light amount of backpressure to prevent excessive hogs
793 * from blocking waiting for space whilst holding struct_mutex and
794 * keeping all of their resources pinned.
797 ce
= intel_context_lookup(eb
->ctx
, eb
->engine
);
798 if (!ce
|| !ce
->ring
) /* first use, assume empty! */
801 rq
= __eb_wait_for_ring(ce
->ring
);
803 mutex_unlock(&eb
->i915
->drm
.struct_mutex
);
805 if (i915_request_wait(rq
,
806 I915_WAIT_INTERRUPTIBLE
,
807 MAX_SCHEDULE_TIMEOUT
) < 0)
810 i915_request_put(rq
);
812 mutex_lock(&eb
->i915
->drm
.struct_mutex
);
818 static int eb_lookup_vmas(struct i915_execbuffer
*eb
)
820 struct radix_tree_root
*handles_vma
= &eb
->ctx
->handles_vma
;
821 struct drm_i915_gem_object
*obj
;
822 unsigned int i
, batch
;
825 if (unlikely(i915_gem_context_is_closed(eb
->ctx
)))
828 if (unlikely(i915_gem_context_is_banned(eb
->ctx
)))
831 INIT_LIST_HEAD(&eb
->relocs
);
832 INIT_LIST_HEAD(&eb
->unbound
);
834 batch
= eb_batch_index(eb
);
836 for (i
= 0; i
< eb
->buffer_count
; i
++) {
837 u32 handle
= eb
->exec
[i
].handle
;
838 struct i915_lut_handle
*lut
;
839 struct i915_vma
*vma
;
841 vma
= radix_tree_lookup(handles_vma
, handle
);
845 obj
= i915_gem_object_lookup(eb
->file
, handle
);
846 if (unlikely(!obj
)) {
851 vma
= i915_vma_instance(obj
, eb
->vm
, NULL
);
857 lut
= i915_lut_handle_alloc();
858 if (unlikely(!lut
)) {
863 err
= radix_tree_insert(handles_vma
, handle
, vma
);
865 i915_lut_handle_free(lut
);
869 /* transfer ref to ctx */
870 if (!vma
->open_count
++)
871 i915_vma_reopen(vma
);
872 list_add(&lut
->obj_link
, &obj
->lut_list
);
873 list_add(&lut
->ctx_link
, &eb
->ctx
->handles_list
);
875 lut
->handle
= handle
;
878 err
= eb_add_vma(eb
, i
, batch
, vma
);
882 GEM_BUG_ON(vma
!= eb
->vma
[i
]);
883 GEM_BUG_ON(vma
->exec_flags
!= &eb
->flags
[i
]);
884 GEM_BUG_ON(drm_mm_node_allocated(&vma
->node
) &&
885 eb_vma_misplaced(&eb
->exec
[i
], vma
, eb
->flags
[i
]));
888 eb
->args
->flags
|= __EXEC_VALIDATED
;
889 return eb_reserve(eb
);
892 i915_gem_object_put(obj
);
898 static struct i915_vma
*
899 eb_get_vma(const struct i915_execbuffer
*eb
, unsigned long handle
)
901 if (eb
->lut_size
< 0) {
902 if (handle
>= -eb
->lut_size
)
904 return eb
->vma
[handle
];
906 struct hlist_head
*head
;
907 struct i915_vma
*vma
;
909 head
= &eb
->buckets
[hash_32(handle
, eb
->lut_size
)];
910 hlist_for_each_entry(vma
, head
, exec_node
) {
911 if (vma
->exec_handle
== handle
)
918 static void eb_release_vmas(const struct i915_execbuffer
*eb
)
920 const unsigned int count
= eb
->buffer_count
;
923 for (i
= 0; i
< count
; i
++) {
924 struct i915_vma
*vma
= eb
->vma
[i
];
925 unsigned int flags
= eb
->flags
[i
];
930 GEM_BUG_ON(vma
->exec_flags
!= &eb
->flags
[i
]);
931 vma
->exec_flags
= NULL
;
934 if (flags
& __EXEC_OBJECT_HAS_PIN
)
935 __eb_unreserve_vma(vma
, flags
);
937 if (flags
& __EXEC_OBJECT_HAS_REF
)
942 static void eb_reset_vmas(const struct i915_execbuffer
*eb
)
945 if (eb
->lut_size
> 0)
946 memset(eb
->buckets
, 0,
947 sizeof(struct hlist_head
) << eb
->lut_size
);
950 static void eb_destroy(const struct i915_execbuffer
*eb
)
952 GEM_BUG_ON(eb
->reloc_cache
.rq
);
954 if (eb
->lut_size
> 0)
959 relocation_target(const struct drm_i915_gem_relocation_entry
*reloc
,
960 const struct i915_vma
*target
)
962 return gen8_canonical_addr((int)reloc
->delta
+ target
->node
.start
);
965 static void reloc_cache_init(struct reloc_cache
*cache
,
966 struct drm_i915_private
*i915
)
970 /* Must be a variable in the struct to allow GCC to unroll. */
971 cache
->gen
= INTEL_GEN(i915
);
972 cache
->has_llc
= HAS_LLC(i915
);
973 cache
->use_64bit_reloc
= HAS_64BIT_RELOC(i915
);
974 cache
->has_fence
= cache
->gen
< 4;
975 cache
->needs_unfenced
= INTEL_INFO(i915
)->unfenced_needs_alignment
;
976 cache
->node
.allocated
= false;
981 static inline void *unmask_page(unsigned long p
)
983 return (void *)(uintptr_t)(p
& PAGE_MASK
);
986 static inline unsigned int unmask_flags(unsigned long p
)
988 return p
& ~PAGE_MASK
;
991 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
993 static inline struct i915_ggtt
*cache_to_ggtt(struct reloc_cache
*cache
)
995 struct drm_i915_private
*i915
=
996 container_of(cache
, struct i915_execbuffer
, reloc_cache
)->i915
;
1000 static void reloc_gpu_flush(struct reloc_cache
*cache
)
1002 GEM_BUG_ON(cache
->rq_size
>= cache
->rq
->batch
->obj
->base
.size
/ sizeof(u32
));
1003 cache
->rq_cmd
[cache
->rq_size
] = MI_BATCH_BUFFER_END
;
1005 __i915_gem_object_flush_map(cache
->rq
->batch
->obj
, 0, cache
->rq_size
);
1006 i915_gem_object_unpin_map(cache
->rq
->batch
->obj
);
1008 i915_gem_chipset_flush(cache
->rq
->i915
);
1010 i915_request_add(cache
->rq
);
1014 static void reloc_cache_reset(struct reloc_cache
*cache
)
1019 reloc_gpu_flush(cache
);
1024 vaddr
= unmask_page(cache
->vaddr
);
1025 if (cache
->vaddr
& KMAP
) {
1026 if (cache
->vaddr
& CLFLUSH_AFTER
)
1029 kunmap_atomic(vaddr
);
1030 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object
*)cache
->node
.mm
);
1033 io_mapping_unmap_atomic((void __iomem
*)vaddr
);
1034 if (cache
->node
.allocated
) {
1035 struct i915_ggtt
*ggtt
= cache_to_ggtt(cache
);
1037 ggtt
->vm
.clear_range(&ggtt
->vm
,
1040 drm_mm_remove_node(&cache
->node
);
1042 i915_vma_unpin((struct i915_vma
*)cache
->node
.mm
);
1050 static void *reloc_kmap(struct drm_i915_gem_object
*obj
,
1051 struct reloc_cache
*cache
,
1057 kunmap_atomic(unmask_page(cache
->vaddr
));
1059 unsigned int flushes
;
1062 err
= i915_gem_obj_prepare_shmem_write(obj
, &flushes
);
1064 return ERR_PTR(err
);
1066 BUILD_BUG_ON(KMAP
& CLFLUSH_FLAGS
);
1067 BUILD_BUG_ON((KMAP
| CLFLUSH_FLAGS
) & PAGE_MASK
);
1069 cache
->vaddr
= flushes
| KMAP
;
1070 cache
->node
.mm
= (void *)obj
;
1075 vaddr
= kmap_atomic(i915_gem_object_get_dirty_page(obj
, page
));
1076 cache
->vaddr
= unmask_flags(cache
->vaddr
) | (unsigned long)vaddr
;
1082 static void *reloc_iomap(struct drm_i915_gem_object
*obj
,
1083 struct reloc_cache
*cache
,
1086 struct i915_ggtt
*ggtt
= cache_to_ggtt(cache
);
1087 unsigned long offset
;
1091 io_mapping_unmap_atomic((void __force __iomem
*) unmask_page(cache
->vaddr
));
1093 struct i915_vma
*vma
;
1096 if (use_cpu_reloc(cache
, obj
))
1099 err
= i915_gem_object_set_to_gtt_domain(obj
, true);
1101 return ERR_PTR(err
);
1103 vma
= i915_gem_object_ggtt_pin(obj
, NULL
, 0, 0,
1108 memset(&cache
->node
, 0, sizeof(cache
->node
));
1109 err
= drm_mm_insert_node_in_range
1110 (&ggtt
->vm
.mm
, &cache
->node
,
1111 PAGE_SIZE
, 0, I915_COLOR_UNEVICTABLE
,
1112 0, ggtt
->mappable_end
,
1114 if (err
) /* no inactive aperture space, use cpu reloc */
1117 err
= i915_vma_put_fence(vma
);
1119 i915_vma_unpin(vma
);
1120 return ERR_PTR(err
);
1123 cache
->node
.start
= vma
->node
.start
;
1124 cache
->node
.mm
= (void *)vma
;
1128 offset
= cache
->node
.start
;
1129 if (cache
->node
.allocated
) {
1131 ggtt
->vm
.insert_page(&ggtt
->vm
,
1132 i915_gem_object_get_dma_address(obj
, page
),
1133 offset
, I915_CACHE_NONE
, 0);
1135 offset
+= page
<< PAGE_SHIFT
;
1138 vaddr
= (void __force
*)io_mapping_map_atomic_wc(&ggtt
->iomap
,
1141 cache
->vaddr
= (unsigned long)vaddr
;
1146 static void *reloc_vaddr(struct drm_i915_gem_object
*obj
,
1147 struct reloc_cache
*cache
,
1152 if (cache
->page
== page
) {
1153 vaddr
= unmask_page(cache
->vaddr
);
1156 if ((cache
->vaddr
& KMAP
) == 0)
1157 vaddr
= reloc_iomap(obj
, cache
, page
);
1159 vaddr
= reloc_kmap(obj
, cache
, page
);
1165 static void clflush_write32(u32
*addr
, u32 value
, unsigned int flushes
)
1167 if (unlikely(flushes
& (CLFLUSH_BEFORE
| CLFLUSH_AFTER
))) {
1168 if (flushes
& CLFLUSH_BEFORE
) {
1176 * Writes to the same cacheline are serialised by the CPU
1177 * (including clflush). On the write path, we only require
1178 * that it hits memory in an orderly fashion and place
1179 * mb barriers at the start and end of the relocation phase
1180 * to ensure ordering of clflush wrt to the system.
1182 if (flushes
& CLFLUSH_AFTER
)
1188 static int __reloc_gpu_alloc(struct i915_execbuffer
*eb
,
1189 struct i915_vma
*vma
,
1192 struct reloc_cache
*cache
= &eb
->reloc_cache
;
1193 struct drm_i915_gem_object
*obj
;
1194 struct i915_request
*rq
;
1195 struct i915_vma
*batch
;
1199 if (DBG_FORCE_RELOC
== FORCE_GPU_RELOC
) {
1201 if (obj
->cache_dirty
& ~obj
->cache_coherent
)
1202 i915_gem_clflush_object(obj
, 0);
1203 obj
->write_domain
= 0;
1206 GEM_BUG_ON(vma
->obj
->write_domain
& I915_GEM_DOMAIN_CPU
);
1208 obj
= i915_gem_batch_pool_get(&eb
->engine
->batch_pool
, PAGE_SIZE
);
1210 return PTR_ERR(obj
);
1212 cmd
= i915_gem_object_pin_map(obj
,
1216 i915_gem_object_unpin_pages(obj
);
1218 return PTR_ERR(cmd
);
1220 batch
= i915_vma_instance(obj
, vma
->vm
, NULL
);
1221 if (IS_ERR(batch
)) {
1222 err
= PTR_ERR(batch
);
1226 err
= i915_vma_pin(batch
, 0, 0, PIN_USER
| PIN_NONBLOCK
);
1230 rq
= i915_request_alloc(eb
->engine
, eb
->ctx
);
1236 err
= i915_request_await_object(rq
, vma
->obj
, true);
1240 err
= eb
->engine
->emit_bb_start(rq
,
1241 batch
->node
.start
, PAGE_SIZE
,
1242 cache
->gen
> 5 ? 0 : I915_DISPATCH_SECURE
);
1246 GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch
->resv
, true));
1247 err
= i915_vma_move_to_active(batch
, rq
, 0);
1251 err
= i915_vma_move_to_active(vma
, rq
, EXEC_OBJECT_WRITE
);
1256 i915_vma_unpin(batch
);
1259 cache
->rq_cmd
= cmd
;
1262 /* Return with batch mapping (cmd) still pinned */
1266 i915_request_skip(rq
, err
);
1268 i915_request_add(rq
);
1270 i915_vma_unpin(batch
);
1272 i915_gem_object_unpin_map(obj
);
1276 static u32
*reloc_gpu(struct i915_execbuffer
*eb
,
1277 struct i915_vma
*vma
,
1280 struct reloc_cache
*cache
= &eb
->reloc_cache
;
1283 if (cache
->rq_size
> PAGE_SIZE
/sizeof(u32
) - (len
+ 1))
1284 reloc_gpu_flush(cache
);
1286 if (unlikely(!cache
->rq
)) {
1289 /* If we need to copy for the cmdparser, we will stall anyway */
1290 if (eb_use_cmdparser(eb
))
1291 return ERR_PTR(-EWOULDBLOCK
);
1293 if (!intel_engine_can_store_dword(eb
->engine
))
1294 return ERR_PTR(-ENODEV
);
1296 err
= __reloc_gpu_alloc(eb
, vma
, len
);
1298 return ERR_PTR(err
);
1301 cmd
= cache
->rq_cmd
+ cache
->rq_size
;
1302 cache
->rq_size
+= len
;
1308 relocate_entry(struct i915_vma
*vma
,
1309 const struct drm_i915_gem_relocation_entry
*reloc
,
1310 struct i915_execbuffer
*eb
,
1311 const struct i915_vma
*target
)
1313 u64 offset
= reloc
->offset
;
1314 u64 target_offset
= relocation_target(reloc
, target
);
1315 bool wide
= eb
->reloc_cache
.use_64bit_reloc
;
1318 if (!eb
->reloc_cache
.vaddr
&&
1319 (DBG_FORCE_RELOC
== FORCE_GPU_RELOC
||
1320 !reservation_object_test_signaled_rcu(vma
->resv
, true))) {
1321 const unsigned int gen
= eb
->reloc_cache
.gen
;
1327 len
= offset
& 7 ? 8 : 5;
1333 batch
= reloc_gpu(eb
, vma
, len
);
1337 addr
= gen8_canonical_addr(vma
->node
.start
+ offset
);
1340 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
1341 *batch
++ = lower_32_bits(addr
);
1342 *batch
++ = upper_32_bits(addr
);
1343 *batch
++ = lower_32_bits(target_offset
);
1345 addr
= gen8_canonical_addr(addr
+ 4);
1347 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
1348 *batch
++ = lower_32_bits(addr
);
1349 *batch
++ = upper_32_bits(addr
);
1350 *batch
++ = upper_32_bits(target_offset
);
1352 *batch
++ = (MI_STORE_DWORD_IMM_GEN4
| (1 << 21)) + 1;
1353 *batch
++ = lower_32_bits(addr
);
1354 *batch
++ = upper_32_bits(addr
);
1355 *batch
++ = lower_32_bits(target_offset
);
1356 *batch
++ = upper_32_bits(target_offset
);
1358 } else if (gen
>= 6) {
1359 *batch
++ = MI_STORE_DWORD_IMM_GEN4
;
1362 *batch
++ = target_offset
;
1363 } else if (gen
>= 4) {
1364 *batch
++ = MI_STORE_DWORD_IMM_GEN4
| MI_USE_GGTT
;
1367 *batch
++ = target_offset
;
1369 *batch
++ = MI_STORE_DWORD_IMM
| MI_MEM_VIRTUAL
;
1371 *batch
++ = target_offset
;
1378 vaddr
= reloc_vaddr(vma
->obj
, &eb
->reloc_cache
, offset
>> PAGE_SHIFT
);
1380 return PTR_ERR(vaddr
);
1382 clflush_write32(vaddr
+ offset_in_page(offset
),
1383 lower_32_bits(target_offset
),
1384 eb
->reloc_cache
.vaddr
);
1387 offset
+= sizeof(u32
);
1388 target_offset
>>= 32;
1394 return target
->node
.start
| UPDATE
;
1398 eb_relocate_entry(struct i915_execbuffer
*eb
,
1399 struct i915_vma
*vma
,
1400 const struct drm_i915_gem_relocation_entry
*reloc
)
1402 struct i915_vma
*target
;
1405 /* we've already hold a reference to all valid objects */
1406 target
= eb_get_vma(eb
, reloc
->target_handle
);
1407 if (unlikely(!target
))
1410 /* Validate that the target is in a valid r/w GPU domain */
1411 if (unlikely(reloc
->write_domain
& (reloc
->write_domain
- 1))) {
1412 DRM_DEBUG("reloc with multiple write domains: "
1413 "target %d offset %d "
1414 "read %08x write %08x",
1415 reloc
->target_handle
,
1416 (int) reloc
->offset
,
1417 reloc
->read_domains
,
1418 reloc
->write_domain
);
1421 if (unlikely((reloc
->write_domain
| reloc
->read_domains
)
1422 & ~I915_GEM_GPU_DOMAINS
)) {
1423 DRM_DEBUG("reloc with read/write non-GPU domains: "
1424 "target %d offset %d "
1425 "read %08x write %08x",
1426 reloc
->target_handle
,
1427 (int) reloc
->offset
,
1428 reloc
->read_domains
,
1429 reloc
->write_domain
);
1433 if (reloc
->write_domain
) {
1434 *target
->exec_flags
|= EXEC_OBJECT_WRITE
;
1437 * Sandybridge PPGTT errata: We need a global gtt mapping
1438 * for MI and pipe_control writes because the gpu doesn't
1439 * properly redirect them through the ppgtt for non_secure
1442 if (reloc
->write_domain
== I915_GEM_DOMAIN_INSTRUCTION
&&
1443 IS_GEN(eb
->i915
, 6)) {
1444 err
= i915_vma_bind(target
, target
->obj
->cache_level
,
1447 "Unexpected failure to bind target VMA!"))
1453 * If the relocation already has the right value in it, no
1454 * more work needs to be done.
1456 if (!DBG_FORCE_RELOC
&&
1457 gen8_canonical_addr(target
->node
.start
) == reloc
->presumed_offset
)
1460 /* Check that the relocation address is valid... */
1461 if (unlikely(reloc
->offset
>
1462 vma
->size
- (eb
->reloc_cache
.use_64bit_reloc
? 8 : 4))) {
1463 DRM_DEBUG("Relocation beyond object bounds: "
1464 "target %d offset %d size %d.\n",
1465 reloc
->target_handle
,
1470 if (unlikely(reloc
->offset
& 3)) {
1471 DRM_DEBUG("Relocation not 4-byte aligned: "
1472 "target %d offset %d.\n",
1473 reloc
->target_handle
,
1474 (int)reloc
->offset
);
1479 * If we write into the object, we need to force the synchronisation
1480 * barrier, either with an asynchronous clflush or if we executed the
1481 * patching using the GPU (though that should be serialised by the
1482 * timeline). To be completely sure, and since we are required to
1483 * do relocations we are already stalling, disable the user's opt
1484 * out of our synchronisation.
1486 *vma
->exec_flags
&= ~EXEC_OBJECT_ASYNC
;
1488 /* and update the user's relocation entry */
1489 return relocate_entry(vma
, reloc
, eb
, target
);
1492 static int eb_relocate_vma(struct i915_execbuffer
*eb
, struct i915_vma
*vma
)
1494 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1495 struct drm_i915_gem_relocation_entry stack
[N_RELOC(512)];
1496 struct drm_i915_gem_relocation_entry __user
*urelocs
;
1497 const struct drm_i915_gem_exec_object2
*entry
= exec_entry(eb
, vma
);
1498 unsigned int remain
;
1500 urelocs
= u64_to_user_ptr(entry
->relocs_ptr
);
1501 remain
= entry
->relocation_count
;
1502 if (unlikely(remain
> N_RELOC(ULONG_MAX
)))
1506 * We must check that the entire relocation array is safe
1507 * to read. However, if the array is not writable the user loses
1508 * the updated relocation values.
1510 if (unlikely(!access_ok(urelocs
, remain
*sizeof(*urelocs
))))
1514 struct drm_i915_gem_relocation_entry
*r
= stack
;
1515 unsigned int count
=
1516 min_t(unsigned int, remain
, ARRAY_SIZE(stack
));
1517 unsigned int copied
;
1520 * This is the fast path and we cannot handle a pagefault
1521 * whilst holding the struct mutex lest the user pass in the
1522 * relocations contained within a mmaped bo. For in such a case
1523 * we, the page fault handler would call i915_gem_fault() and
1524 * we would try to acquire the struct mutex again. Obviously
1525 * this is bad and so lockdep complains vehemently.
1527 pagefault_disable();
1528 copied
= __copy_from_user_inatomic(r
, urelocs
, count
* sizeof(r
[0]));
1530 if (unlikely(copied
)) {
1537 u64 offset
= eb_relocate_entry(eb
, vma
, r
);
1539 if (likely(offset
== 0)) {
1540 } else if ((s64
)offset
< 0) {
1541 remain
= (int)offset
;
1545 * Note that reporting an error now
1546 * leaves everything in an inconsistent
1547 * state as we have *already* changed
1548 * the relocation value inside the
1549 * object. As we have not changed the
1550 * reloc.presumed_offset or will not
1551 * change the execobject.offset, on the
1552 * call we may not rewrite the value
1553 * inside the object, leaving it
1554 * dangling and causing a GPU hang. Unless
1555 * userspace dynamically rebuilds the
1556 * relocations on each execbuf rather than
1557 * presume a static tree.
1559 * We did previously check if the relocations
1560 * were writable (access_ok), an error now
1561 * would be a strange race with mprotect,
1562 * having already demonstrated that we
1563 * can read from this userspace address.
1565 offset
= gen8_canonical_addr(offset
& ~UPDATE
);
1566 if (unlikely(__put_user(offset
, &urelocs
[r
-stack
].presumed_offset
))) {
1571 } while (r
++, --count
);
1572 urelocs
+= ARRAY_SIZE(stack
);
1575 reloc_cache_reset(&eb
->reloc_cache
);
1580 eb_relocate_vma_slow(struct i915_execbuffer
*eb
, struct i915_vma
*vma
)
1582 const struct drm_i915_gem_exec_object2
*entry
= exec_entry(eb
, vma
);
1583 struct drm_i915_gem_relocation_entry
*relocs
=
1584 u64_to_ptr(typeof(*relocs
), entry
->relocs_ptr
);
1588 for (i
= 0; i
< entry
->relocation_count
; i
++) {
1589 u64 offset
= eb_relocate_entry(eb
, vma
, &relocs
[i
]);
1591 if ((s64
)offset
< 0) {
1598 reloc_cache_reset(&eb
->reloc_cache
);
1602 static int check_relocations(const struct drm_i915_gem_exec_object2
*entry
)
1604 const char __user
*addr
, *end
;
1606 char __maybe_unused c
;
1608 size
= entry
->relocation_count
;
1612 if (size
> N_RELOC(ULONG_MAX
))
1615 addr
= u64_to_user_ptr(entry
->relocs_ptr
);
1616 size
*= sizeof(struct drm_i915_gem_relocation_entry
);
1617 if (!access_ok(addr
, size
))
1621 for (; addr
< end
; addr
+= PAGE_SIZE
) {
1622 int err
= __get_user(c
, addr
);
1626 return __get_user(c
, end
- 1);
1629 static int eb_copy_relocations(const struct i915_execbuffer
*eb
)
1631 const unsigned int count
= eb
->buffer_count
;
1635 for (i
= 0; i
< count
; i
++) {
1636 const unsigned int nreloc
= eb
->exec
[i
].relocation_count
;
1637 struct drm_i915_gem_relocation_entry __user
*urelocs
;
1638 struct drm_i915_gem_relocation_entry
*relocs
;
1640 unsigned long copied
;
1645 err
= check_relocations(&eb
->exec
[i
]);
1649 urelocs
= u64_to_user_ptr(eb
->exec
[i
].relocs_ptr
);
1650 size
= nreloc
* sizeof(*relocs
);
1652 relocs
= kvmalloc_array(size
, 1, GFP_KERNEL
);
1658 /* copy_from_user is limited to < 4GiB */
1662 min_t(u64
, BIT_ULL(31), size
- copied
);
1664 if (__copy_from_user((char *)relocs
+ copied
,
1665 (char __user
*)urelocs
+ copied
,
1676 } while (copied
< size
);
1679 * As we do not update the known relocation offsets after
1680 * relocating (due to the complexities in lock handling),
1681 * we need to mark them as invalid now so that we force the
1682 * relocation processing next time. Just in case the target
1683 * object is evicted and then rebound into its old
1684 * presumed_offset before the next execbuffer - if that
1685 * happened we would make the mistake of assuming that the
1686 * relocations were valid.
1688 if (!user_access_begin(urelocs
, size
))
1691 for (copied
= 0; copied
< nreloc
; copied
++)
1693 &urelocs
[copied
].presumed_offset
,
1697 eb
->exec
[i
].relocs_ptr
= (uintptr_t)relocs
;
1704 struct drm_i915_gem_relocation_entry
*relocs
=
1705 u64_to_ptr(typeof(*relocs
), eb
->exec
[i
].relocs_ptr
);
1706 if (eb
->exec
[i
].relocation_count
)
1712 static int eb_prefault_relocations(const struct i915_execbuffer
*eb
)
1714 const unsigned int count
= eb
->buffer_count
;
1717 if (unlikely(i915_modparams
.prefault_disable
))
1720 for (i
= 0; i
< count
; i
++) {
1723 err
= check_relocations(&eb
->exec
[i
]);
1731 static noinline
int eb_relocate_slow(struct i915_execbuffer
*eb
)
1733 struct drm_device
*dev
= &eb
->i915
->drm
;
1734 bool have_copy
= false;
1735 struct i915_vma
*vma
;
1739 if (signal_pending(current
)) {
1744 /* We may process another execbuffer during the unlock... */
1746 mutex_unlock(&dev
->struct_mutex
);
1749 * We take 3 passes through the slowpatch.
1751 * 1 - we try to just prefault all the user relocation entries and
1752 * then attempt to reuse the atomic pagefault disabled fast path again.
1754 * 2 - we copy the user entries to a local buffer here outside of the
1755 * local and allow ourselves to wait upon any rendering before
1758 * 3 - we already have a local copy of the relocation entries, but
1759 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1762 err
= eb_prefault_relocations(eb
);
1763 } else if (!have_copy
) {
1764 err
= eb_copy_relocations(eb
);
1765 have_copy
= err
== 0;
1771 mutex_lock(&dev
->struct_mutex
);
1775 /* A frequent cause for EAGAIN are currently unavailable client pages */
1776 flush_workqueue(eb
->i915
->mm
.userptr_wq
);
1778 err
= i915_mutex_lock_interruptible(dev
);
1780 mutex_lock(&dev
->struct_mutex
);
1784 /* reacquire the objects */
1785 err
= eb_lookup_vmas(eb
);
1789 GEM_BUG_ON(!eb
->batch
);
1791 list_for_each_entry(vma
, &eb
->relocs
, reloc_link
) {
1793 pagefault_disable();
1794 err
= eb_relocate_vma(eb
, vma
);
1799 err
= eb_relocate_vma_slow(eb
, vma
);
1806 * Leave the user relocations as are, this is the painfully slow path,
1807 * and we want to avoid the complication of dropping the lock whilst
1808 * having buffers reserved in the aperture and so causing spurious
1809 * ENOSPC for random operations.
1818 const unsigned int count
= eb
->buffer_count
;
1821 for (i
= 0; i
< count
; i
++) {
1822 const struct drm_i915_gem_exec_object2
*entry
=
1824 struct drm_i915_gem_relocation_entry
*relocs
;
1826 if (!entry
->relocation_count
)
1829 relocs
= u64_to_ptr(typeof(*relocs
), entry
->relocs_ptr
);
1837 static int eb_relocate(struct i915_execbuffer
*eb
)
1839 if (eb_lookup_vmas(eb
))
1842 /* The objects are in their final locations, apply the relocations. */
1843 if (eb
->args
->flags
& __EXEC_HAS_RELOC
) {
1844 struct i915_vma
*vma
;
1846 list_for_each_entry(vma
, &eb
->relocs
, reloc_link
) {
1847 if (eb_relocate_vma(eb
, vma
))
1855 return eb_relocate_slow(eb
);
1858 static int eb_move_to_gpu(struct i915_execbuffer
*eb
)
1860 const unsigned int count
= eb
->buffer_count
;
1864 for (i
= 0; i
< count
; i
++) {
1865 unsigned int flags
= eb
->flags
[i
];
1866 struct i915_vma
*vma
= eb
->vma
[i
];
1867 struct drm_i915_gem_object
*obj
= vma
->obj
;
1869 if (flags
& EXEC_OBJECT_CAPTURE
) {
1870 struct i915_capture_list
*capture
;
1872 capture
= kmalloc(sizeof(*capture
), GFP_KERNEL
);
1873 if (unlikely(!capture
))
1876 capture
->next
= eb
->request
->capture_list
;
1877 capture
->vma
= eb
->vma
[i
];
1878 eb
->request
->capture_list
= capture
;
1882 * If the GPU is not _reading_ through the CPU cache, we need
1883 * to make sure that any writes (both previous GPU writes from
1884 * before a change in snooping levels and normal CPU writes)
1885 * caught in that cache are flushed to main memory.
1888 * obj->cache_dirty &&
1889 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1890 * but gcc's optimiser doesn't handle that as well and emits
1891 * two jumps instead of one. Maybe one day...
1893 if (unlikely(obj
->cache_dirty
& ~obj
->cache_coherent
)) {
1894 if (i915_gem_clflush_object(obj
, 0))
1895 flags
&= ~EXEC_OBJECT_ASYNC
;
1898 if (flags
& EXEC_OBJECT_ASYNC
)
1901 err
= i915_request_await_object
1902 (eb
->request
, obj
, flags
& EXEC_OBJECT_WRITE
);
1907 for (i
= 0; i
< count
; i
++) {
1908 unsigned int flags
= eb
->flags
[i
];
1909 struct i915_vma
*vma
= eb
->vma
[i
];
1911 err
= i915_vma_move_to_active(vma
, eb
->request
, flags
);
1912 if (unlikely(err
)) {
1913 i915_request_skip(eb
->request
, err
);
1917 __eb_unreserve_vma(vma
, flags
);
1918 vma
->exec_flags
= NULL
;
1920 if (unlikely(flags
& __EXEC_OBJECT_HAS_REF
))
1925 /* Unconditionally flush any chipset caches (for streaming writes). */
1926 i915_gem_chipset_flush(eb
->i915
);
1931 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2
*exec
)
1933 if (exec
->flags
& __I915_EXEC_ILLEGAL_FLAGS
)
1936 /* Kernel clipping was a DRI1 misfeature */
1937 if (!(exec
->flags
& I915_EXEC_FENCE_ARRAY
)) {
1938 if (exec
->num_cliprects
|| exec
->cliprects_ptr
)
1942 if (exec
->DR4
== 0xffffffff) {
1943 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1946 if (exec
->DR1
|| exec
->DR4
)
1949 if ((exec
->batch_start_offset
| exec
->batch_len
) & 0x7)
1955 static int i915_reset_gen7_sol_offsets(struct i915_request
*rq
)
1960 if (!IS_GEN(rq
->i915
, 7) || rq
->engine
->id
!= RCS0
) {
1961 DRM_DEBUG("sol reset is gen7/rcs only\n");
1965 cs
= intel_ring_begin(rq
, 4 * 2 + 2);
1969 *cs
++ = MI_LOAD_REGISTER_IMM(4);
1970 for (i
= 0; i
< 4; i
++) {
1971 *cs
++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i
));
1975 intel_ring_advance(rq
, cs
);
1980 static struct i915_vma
*eb_parse(struct i915_execbuffer
*eb
, bool is_master
)
1982 struct drm_i915_gem_object
*shadow_batch_obj
;
1983 struct i915_vma
*vma
;
1986 shadow_batch_obj
= i915_gem_batch_pool_get(&eb
->engine
->batch_pool
,
1987 PAGE_ALIGN(eb
->batch_len
));
1988 if (IS_ERR(shadow_batch_obj
))
1989 return ERR_CAST(shadow_batch_obj
);
1991 err
= intel_engine_cmd_parser(eb
->engine
,
1994 eb
->batch_start_offset
,
1998 if (err
== -EACCES
) /* unhandled chained batch */
2005 vma
= i915_gem_object_ggtt_pin(shadow_batch_obj
, NULL
, 0, 0, 0);
2009 eb
->vma
[eb
->buffer_count
] = i915_vma_get(vma
);
2010 eb
->flags
[eb
->buffer_count
] =
2011 __EXEC_OBJECT_HAS_PIN
| __EXEC_OBJECT_HAS_REF
;
2012 vma
->exec_flags
= &eb
->flags
[eb
->buffer_count
];
2016 i915_gem_object_unpin_pages(shadow_batch_obj
);
2021 add_to_client(struct i915_request
*rq
, struct drm_file
*file
)
2023 rq
->file_priv
= file
->driver_priv
;
2024 list_add_tail(&rq
->client_link
, &rq
->file_priv
->mm
.request_list
);
2027 static int eb_submit(struct i915_execbuffer
*eb
)
2031 err
= eb_move_to_gpu(eb
);
2035 if (eb
->args
->flags
& I915_EXEC_GEN7_SOL_RESET
) {
2036 err
= i915_reset_gen7_sol_offsets(eb
->request
);
2042 * After we completed waiting for other engines (using HW semaphores)
2043 * then we can signal that this request/batch is ready to run. This
2044 * allows us to determine if the batch is still waiting on the GPU
2045 * or actually running by checking the breadcrumb.
2047 if (eb
->engine
->emit_init_breadcrumb
) {
2048 err
= eb
->engine
->emit_init_breadcrumb(eb
->request
);
2053 err
= eb
->engine
->emit_bb_start(eb
->request
,
2054 eb
->batch
->node
.start
+
2055 eb
->batch_start_offset
,
2065 * Find one BSD ring to dispatch the corresponding BSD command.
2066 * The engine index is returned.
2069 gen8_dispatch_bsd_engine(struct drm_i915_private
*dev_priv
,
2070 struct drm_file
*file
)
2072 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2074 /* Check whether the file_priv has already selected one ring. */
2075 if ((int)file_priv
->bsd_engine
< 0)
2076 file_priv
->bsd_engine
= atomic_fetch_xor(1,
2077 &dev_priv
->mm
.bsd_engine_dispatch_index
);
2079 return file_priv
->bsd_engine
;
2082 #define I915_USER_RINGS (4)
2084 static const enum intel_engine_id user_ring_map
[I915_USER_RINGS
+ 1] = {
2085 [I915_EXEC_DEFAULT
] = RCS0
,
2086 [I915_EXEC_RENDER
] = RCS0
,
2087 [I915_EXEC_BLT
] = BCS0
,
2088 [I915_EXEC_BSD
] = VCS0
,
2089 [I915_EXEC_VEBOX
] = VECS0
2092 static struct intel_engine_cs
*
2093 eb_select_engine(struct drm_i915_private
*dev_priv
,
2094 struct drm_file
*file
,
2095 struct drm_i915_gem_execbuffer2
*args
)
2097 unsigned int user_ring_id
= args
->flags
& I915_EXEC_RING_MASK
;
2098 struct intel_engine_cs
*engine
;
2100 if (user_ring_id
> I915_USER_RINGS
) {
2101 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id
);
2105 if ((user_ring_id
!= I915_EXEC_BSD
) &&
2106 ((args
->flags
& I915_EXEC_BSD_MASK
) != 0)) {
2107 DRM_DEBUG("execbuf with non bsd ring but with invalid "
2108 "bsd dispatch flags: %d\n", (int)(args
->flags
));
2112 if (user_ring_id
== I915_EXEC_BSD
&& HAS_ENGINE(dev_priv
, VCS1
)) {
2113 unsigned int bsd_idx
= args
->flags
& I915_EXEC_BSD_MASK
;
2115 if (bsd_idx
== I915_EXEC_BSD_DEFAULT
) {
2116 bsd_idx
= gen8_dispatch_bsd_engine(dev_priv
, file
);
2117 } else if (bsd_idx
>= I915_EXEC_BSD_RING1
&&
2118 bsd_idx
<= I915_EXEC_BSD_RING2
) {
2119 bsd_idx
>>= I915_EXEC_BSD_SHIFT
;
2122 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
2127 engine
= dev_priv
->engine
[_VCS(bsd_idx
)];
2129 engine
= dev_priv
->engine
[user_ring_map
[user_ring_id
]];
2133 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id
);
2141 __free_fence_array(struct drm_syncobj
**fences
, unsigned int n
)
2144 drm_syncobj_put(ptr_mask_bits(fences
[n
], 2));
2148 static struct drm_syncobj
**
2149 get_fence_array(struct drm_i915_gem_execbuffer2
*args
,
2150 struct drm_file
*file
)
2152 const unsigned long nfences
= args
->num_cliprects
;
2153 struct drm_i915_gem_exec_fence __user
*user
;
2154 struct drm_syncobj
**fences
;
2158 if (!(args
->flags
& I915_EXEC_FENCE_ARRAY
))
2161 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2162 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2163 if (nfences
> min_t(unsigned long,
2164 ULONG_MAX
/ sizeof(*user
),
2165 SIZE_MAX
/ sizeof(*fences
)))
2166 return ERR_PTR(-EINVAL
);
2168 user
= u64_to_user_ptr(args
->cliprects_ptr
);
2169 if (!access_ok(user
, nfences
* sizeof(*user
)))
2170 return ERR_PTR(-EFAULT
);
2172 fences
= kvmalloc_array(nfences
, sizeof(*fences
),
2173 __GFP_NOWARN
| GFP_KERNEL
);
2175 return ERR_PTR(-ENOMEM
);
2177 for (n
= 0; n
< nfences
; n
++) {
2178 struct drm_i915_gem_exec_fence fence
;
2179 struct drm_syncobj
*syncobj
;
2181 if (__copy_from_user(&fence
, user
++, sizeof(fence
))) {
2186 if (fence
.flags
& __I915_EXEC_FENCE_UNKNOWN_FLAGS
) {
2191 syncobj
= drm_syncobj_find(file
, fence
.handle
);
2193 DRM_DEBUG("Invalid syncobj handle provided\n");
2198 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN
- 1) &
2199 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS
);
2201 fences
[n
] = ptr_pack_bits(syncobj
, fence
.flags
, 2);
2207 __free_fence_array(fences
, n
);
2208 return ERR_PTR(err
);
2212 put_fence_array(struct drm_i915_gem_execbuffer2
*args
,
2213 struct drm_syncobj
**fences
)
2216 __free_fence_array(fences
, args
->num_cliprects
);
2220 await_fence_array(struct i915_execbuffer
*eb
,
2221 struct drm_syncobj
**fences
)
2223 const unsigned int nfences
= eb
->args
->num_cliprects
;
2227 for (n
= 0; n
< nfences
; n
++) {
2228 struct drm_syncobj
*syncobj
;
2229 struct dma_fence
*fence
;
2232 syncobj
= ptr_unpack_bits(fences
[n
], &flags
, 2);
2233 if (!(flags
& I915_EXEC_FENCE_WAIT
))
2236 fence
= drm_syncobj_fence_get(syncobj
);
2240 err
= i915_request_await_dma_fence(eb
->request
, fence
);
2241 dma_fence_put(fence
);
2250 signal_fence_array(struct i915_execbuffer
*eb
,
2251 struct drm_syncobj
**fences
)
2253 const unsigned int nfences
= eb
->args
->num_cliprects
;
2254 struct dma_fence
* const fence
= &eb
->request
->fence
;
2257 for (n
= 0; n
< nfences
; n
++) {
2258 struct drm_syncobj
*syncobj
;
2261 syncobj
= ptr_unpack_bits(fences
[n
], &flags
, 2);
2262 if (!(flags
& I915_EXEC_FENCE_SIGNAL
))
2265 drm_syncobj_replace_fence(syncobj
, fence
);
2270 i915_gem_do_execbuffer(struct drm_device
*dev
,
2271 struct drm_file
*file
,
2272 struct drm_i915_gem_execbuffer2
*args
,
2273 struct drm_i915_gem_exec_object2
*exec
,
2274 struct drm_syncobj
**fences
)
2276 struct i915_execbuffer eb
;
2277 struct dma_fence
*in_fence
= NULL
;
2278 struct sync_file
*out_fence
= NULL
;
2279 intel_wakeref_t wakeref
;
2280 int out_fence_fd
= -1;
2283 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS
& ~__I915_EXEC_ILLEGAL_FLAGS
);
2284 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS
&
2285 ~__EXEC_OBJECT_UNKNOWN_FLAGS
);
2287 eb
.i915
= to_i915(dev
);
2290 if (DBG_FORCE_RELOC
|| !(args
->flags
& I915_EXEC_NO_RELOC
))
2291 args
->flags
|= __EXEC_HAS_RELOC
;
2294 eb
.vma
= (struct i915_vma
**)(exec
+ args
->buffer_count
+ 1);
2296 eb
.flags
= (unsigned int *)(eb
.vma
+ args
->buffer_count
+ 1);
2298 eb
.invalid_flags
= __EXEC_OBJECT_UNKNOWN_FLAGS
;
2299 reloc_cache_init(&eb
.reloc_cache
, eb
.i915
);
2301 eb
.buffer_count
= args
->buffer_count
;
2302 eb
.batch_start_offset
= args
->batch_start_offset
;
2303 eb
.batch_len
= args
->batch_len
;
2306 if (args
->flags
& I915_EXEC_SECURE
) {
2307 if (!drm_is_current_master(file
) || !capable(CAP_SYS_ADMIN
))
2310 eb
.batch_flags
|= I915_DISPATCH_SECURE
;
2312 if (args
->flags
& I915_EXEC_IS_PINNED
)
2313 eb
.batch_flags
|= I915_DISPATCH_PINNED
;
2315 if (args
->flags
& I915_EXEC_FENCE_IN
) {
2316 in_fence
= sync_file_get_fence(lower_32_bits(args
->rsvd2
));
2321 if (args
->flags
& I915_EXEC_FENCE_OUT
) {
2322 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
2323 if (out_fence_fd
< 0) {
2329 err
= eb_create(&eb
);
2333 GEM_BUG_ON(!eb
.lut_size
);
2335 err
= eb_select_context(&eb
);
2339 eb
.engine
= eb_select_engine(eb
.i915
, file
, args
);
2346 * Take a local wakeref for preparing to dispatch the execbuf as
2347 * we expect to access the hardware fairly frequently in the
2348 * process. Upon first dispatch, we acquire another prolonged
2349 * wakeref that we hold until the GPU has been idle for at least
2352 wakeref
= intel_runtime_pm_get(eb
.i915
);
2354 err
= i915_mutex_lock_interruptible(dev
);
2358 err
= eb_wait_for_ring(&eb
); /* may temporarily drop struct_mutex */
2362 err
= eb_relocate(&eb
);
2365 * If the user expects the execobject.offset and
2366 * reloc.presumed_offset to be an exact match,
2367 * as for using NO_RELOC, then we cannot update
2368 * the execobject.offset until we have completed
2371 args
->flags
&= ~__EXEC_HAS_RELOC
;
2375 if (unlikely(*eb
.batch
->exec_flags
& EXEC_OBJECT_WRITE
)) {
2376 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2380 if (eb
.batch_start_offset
> eb
.batch
->size
||
2381 eb
.batch_len
> eb
.batch
->size
- eb
.batch_start_offset
) {
2382 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2387 if (eb_use_cmdparser(&eb
)) {
2388 struct i915_vma
*vma
;
2390 vma
= eb_parse(&eb
, drm_is_current_master(file
));
2398 * Batch parsed and accepted:
2400 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2401 * bit from MI_BATCH_BUFFER_START commands issued in
2402 * the dispatch_execbuffer implementations. We
2403 * specifically don't want that set on batches the
2404 * command parser has accepted.
2406 eb
.batch_flags
|= I915_DISPATCH_SECURE
;
2407 eb
.batch_start_offset
= 0;
2412 if (eb
.batch_len
== 0)
2413 eb
.batch_len
= eb
.batch
->size
- eb
.batch_start_offset
;
2416 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2417 * batch" bit. Hence we need to pin secure batches into the global gtt.
2418 * hsw should have this fixed, but bdw mucks it up again. */
2419 if (eb
.batch_flags
& I915_DISPATCH_SECURE
) {
2420 struct i915_vma
*vma
;
2423 * So on first glance it looks freaky that we pin the batch here
2424 * outside of the reservation loop. But:
2425 * - The batch is already pinned into the relevant ppgtt, so we
2426 * already have the backing storage fully allocated.
2427 * - No other BO uses the global gtt (well contexts, but meh),
2428 * so we don't really have issues with multiple objects not
2429 * fitting due to fragmentation.
2430 * So this is actually safe.
2432 vma
= i915_gem_object_ggtt_pin(eb
.batch
->obj
, NULL
, 0, 0, 0);
2441 /* All GPU relocation batches must be submitted prior to the user rq */
2442 GEM_BUG_ON(eb
.reloc_cache
.rq
);
2444 /* Allocate a request for this batch buffer nice and early. */
2445 eb
.request
= i915_request_alloc(eb
.engine
, eb
.ctx
);
2446 if (IS_ERR(eb
.request
)) {
2447 err
= PTR_ERR(eb
.request
);
2448 goto err_batch_unpin
;
2452 err
= i915_request_await_dma_fence(eb
.request
, in_fence
);
2458 err
= await_fence_array(&eb
, fences
);
2463 if (out_fence_fd
!= -1) {
2464 out_fence
= sync_file_create(&eb
.request
->fence
);
2472 * Whilst this request exists, batch_obj will be on the
2473 * active_list, and so will hold the active reference. Only when this
2474 * request is retired will the the batch_obj be moved onto the
2475 * inactive_list and lose its active reference. Hence we do not need
2476 * to explicitly hold another reference here.
2478 eb
.request
->batch
= eb
.batch
;
2480 trace_i915_request_queue(eb
.request
, eb
.batch_flags
);
2481 err
= eb_submit(&eb
);
2483 i915_request_add(eb
.request
);
2484 add_to_client(eb
.request
, file
);
2487 signal_fence_array(&eb
, fences
);
2491 fd_install(out_fence_fd
, out_fence
->file
);
2492 args
->rsvd2
&= GENMASK_ULL(31, 0); /* keep in-fence */
2493 args
->rsvd2
|= (u64
)out_fence_fd
<< 32;
2496 fput(out_fence
->file
);
2501 if (eb
.batch_flags
& I915_DISPATCH_SECURE
)
2502 i915_vma_unpin(eb
.batch
);
2505 eb_release_vmas(&eb
);
2507 mutex_unlock(&dev
->struct_mutex
);
2509 intel_runtime_pm_put(eb
.i915
, wakeref
);
2511 i915_gem_context_put(eb
.ctx
);
2515 if (out_fence_fd
!= -1)
2516 put_unused_fd(out_fence_fd
);
2518 dma_fence_put(in_fence
);
2522 static size_t eb_element_size(void)
2524 return (sizeof(struct drm_i915_gem_exec_object2
) +
2525 sizeof(struct i915_vma
*) +
2526 sizeof(unsigned int));
2529 static bool check_buffer_count(size_t count
)
2531 const size_t sz
= eb_element_size();
2534 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2535 * array size (see eb_create()). Otherwise, we can accept an array as
2536 * large as can be addressed (though use large arrays at your peril)!
2539 return !(count
< 1 || count
> INT_MAX
|| count
> SIZE_MAX
/ sz
- 1);
2543 * Legacy execbuffer just creates an exec2 list from the original exec object
2544 * list array and passes it to the real function.
2547 i915_gem_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
2548 struct drm_file
*file
)
2550 struct drm_i915_gem_execbuffer
*args
= data
;
2551 struct drm_i915_gem_execbuffer2 exec2
;
2552 struct drm_i915_gem_exec_object
*exec_list
= NULL
;
2553 struct drm_i915_gem_exec_object2
*exec2_list
= NULL
;
2554 const size_t count
= args
->buffer_count
;
2558 if (!check_buffer_count(count
)) {
2559 DRM_DEBUG("execbuf2 with %zd buffers\n", count
);
2563 exec2
.buffers_ptr
= args
->buffers_ptr
;
2564 exec2
.buffer_count
= args
->buffer_count
;
2565 exec2
.batch_start_offset
= args
->batch_start_offset
;
2566 exec2
.batch_len
= args
->batch_len
;
2567 exec2
.DR1
= args
->DR1
;
2568 exec2
.DR4
= args
->DR4
;
2569 exec2
.num_cliprects
= args
->num_cliprects
;
2570 exec2
.cliprects_ptr
= args
->cliprects_ptr
;
2571 exec2
.flags
= I915_EXEC_RENDER
;
2572 i915_execbuffer2_set_context_id(exec2
, 0);
2574 if (!i915_gem_check_execbuffer(&exec2
))
2577 /* Copy in the exec list from userland */
2578 exec_list
= kvmalloc_array(count
, sizeof(*exec_list
),
2579 __GFP_NOWARN
| GFP_KERNEL
);
2580 exec2_list
= kvmalloc_array(count
+ 1, eb_element_size(),
2581 __GFP_NOWARN
| GFP_KERNEL
);
2582 if (exec_list
== NULL
|| exec2_list
== NULL
) {
2583 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2584 args
->buffer_count
);
2589 err
= copy_from_user(exec_list
,
2590 u64_to_user_ptr(args
->buffers_ptr
),
2591 sizeof(*exec_list
) * count
);
2593 DRM_DEBUG("copy %d exec entries failed %d\n",
2594 args
->buffer_count
, err
);
2600 for (i
= 0; i
< args
->buffer_count
; i
++) {
2601 exec2_list
[i
].handle
= exec_list
[i
].handle
;
2602 exec2_list
[i
].relocation_count
= exec_list
[i
].relocation_count
;
2603 exec2_list
[i
].relocs_ptr
= exec_list
[i
].relocs_ptr
;
2604 exec2_list
[i
].alignment
= exec_list
[i
].alignment
;
2605 exec2_list
[i
].offset
= exec_list
[i
].offset
;
2606 if (INTEL_GEN(to_i915(dev
)) < 4)
2607 exec2_list
[i
].flags
= EXEC_OBJECT_NEEDS_FENCE
;
2609 exec2_list
[i
].flags
= 0;
2612 err
= i915_gem_do_execbuffer(dev
, file
, &exec2
, exec2_list
, NULL
);
2613 if (exec2
.flags
& __EXEC_HAS_RELOC
) {
2614 struct drm_i915_gem_exec_object __user
*user_exec_list
=
2615 u64_to_user_ptr(args
->buffers_ptr
);
2617 /* Copy the new buffer offsets back to the user's exec list. */
2618 for (i
= 0; i
< args
->buffer_count
; i
++) {
2619 if (!(exec2_list
[i
].offset
& UPDATE
))
2622 exec2_list
[i
].offset
=
2623 gen8_canonical_addr(exec2_list
[i
].offset
& PIN_OFFSET_MASK
);
2624 exec2_list
[i
].offset
&= PIN_OFFSET_MASK
;
2625 if (__copy_to_user(&user_exec_list
[i
].offset
,
2626 &exec2_list
[i
].offset
,
2627 sizeof(user_exec_list
[i
].offset
)))
2638 i915_gem_execbuffer2_ioctl(struct drm_device
*dev
, void *data
,
2639 struct drm_file
*file
)
2641 struct drm_i915_gem_execbuffer2
*args
= data
;
2642 struct drm_i915_gem_exec_object2
*exec2_list
;
2643 struct drm_syncobj
**fences
= NULL
;
2644 const size_t count
= args
->buffer_count
;
2647 if (!check_buffer_count(count
)) {
2648 DRM_DEBUG("execbuf2 with %zd buffers\n", count
);
2652 if (!i915_gem_check_execbuffer(args
))
2655 /* Allocate an extra slot for use by the command parser */
2656 exec2_list
= kvmalloc_array(count
+ 1, eb_element_size(),
2657 __GFP_NOWARN
| GFP_KERNEL
);
2658 if (exec2_list
== NULL
) {
2659 DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
2663 if (copy_from_user(exec2_list
,
2664 u64_to_user_ptr(args
->buffers_ptr
),
2665 sizeof(*exec2_list
) * count
)) {
2666 DRM_DEBUG("copy %zd exec entries failed\n", count
);
2671 if (args
->flags
& I915_EXEC_FENCE_ARRAY
) {
2672 fences
= get_fence_array(args
, file
);
2673 if (IS_ERR(fences
)) {
2675 return PTR_ERR(fences
);
2679 err
= i915_gem_do_execbuffer(dev
, file
, args
, exec2_list
, fences
);
2682 * Now that we have begun execution of the batchbuffer, we ignore
2683 * any new error after this point. Also given that we have already
2684 * updated the associated relocations, we try to write out the current
2685 * object locations irrespective of any error.
2687 if (args
->flags
& __EXEC_HAS_RELOC
) {
2688 struct drm_i915_gem_exec_object2 __user
*user_exec_list
=
2689 u64_to_user_ptr(args
->buffers_ptr
);
2692 /* Copy the new buffer offsets back to the user's exec list. */
2694 * Note: count * sizeof(*user_exec_list) does not overflow,
2695 * because we checked 'count' in check_buffer_count().
2697 * And this range already got effectively checked earlier
2698 * when we did the "copy_from_user()" above.
2700 if (!user_access_begin(user_exec_list
, count
* sizeof(*user_exec_list
)))
2703 for (i
= 0; i
< args
->buffer_count
; i
++) {
2704 if (!(exec2_list
[i
].offset
& UPDATE
))
2707 exec2_list
[i
].offset
=
2708 gen8_canonical_addr(exec2_list
[i
].offset
& PIN_OFFSET_MASK
);
2709 unsafe_put_user(exec2_list
[i
].offset
,
2710 &user_exec_list
[i
].offset
,
2718 args
->flags
&= ~__I915_EXEC_UNKNOWN_FLAGS
;
2719 put_fence_array(args
, fences
);