2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
34 #include "gt/intel_ring.h"
35 #include "gt/intel_rps.h"
37 #include "i915_active.h"
39 #include "i915_globals.h"
40 #include "i915_trace.h"
44 struct list_head link
;
46 struct i915_sw_fence
*fence
;
47 void (*hook
)(struct i915_request
*rq
, struct dma_fence
*signal
);
48 struct i915_request
*signal
;
51 static struct i915_global_request
{
52 struct i915_global base
;
53 struct kmem_cache
*slab_requests
;
54 struct kmem_cache
*slab_execute_cbs
;
57 static const char *i915_fence_get_driver_name(struct dma_fence
*fence
)
59 return dev_name(to_request(fence
)->i915
->drm
.dev
);
62 static const char *i915_fence_get_timeline_name(struct dma_fence
*fence
)
64 const struct i915_gem_context
*ctx
;
67 * The timeline struct (as part of the ppgtt underneath a context)
68 * may be freed when the request is no longer in use by the GPU.
69 * We could extend the life of a context to beyond that of all
70 * fences, possibly keeping the hw resource around indefinitely,
71 * or we just give them a false name. Since
72 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
73 * lie seems justifiable.
75 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
))
78 ctx
= i915_request_gem_context(to_request(fence
));
80 return "[" DRIVER_NAME
"]";
85 static bool i915_fence_signaled(struct dma_fence
*fence
)
87 return i915_request_completed(to_request(fence
));
90 static bool i915_fence_enable_signaling(struct dma_fence
*fence
)
92 return i915_request_enable_breadcrumb(to_request(fence
));
95 static signed long i915_fence_wait(struct dma_fence
*fence
,
99 return i915_request_wait(to_request(fence
),
100 interruptible
| I915_WAIT_PRIORITY
,
104 static void i915_fence_release(struct dma_fence
*fence
)
106 struct i915_request
*rq
= to_request(fence
);
109 * The request is put onto a RCU freelist (i.e. the address
110 * is immediately reused), mark the fences as being freed now.
111 * Otherwise the debugobjects for the fences are only marked as
112 * freed when the slab cache itself is freed, and so we would get
113 * caught trying to reuse dead objects.
115 i915_sw_fence_fini(&rq
->submit
);
116 i915_sw_fence_fini(&rq
->semaphore
);
118 kmem_cache_free(global
.slab_requests
, rq
);
121 const struct dma_fence_ops i915_fence_ops
= {
122 .get_driver_name
= i915_fence_get_driver_name
,
123 .get_timeline_name
= i915_fence_get_timeline_name
,
124 .enable_signaling
= i915_fence_enable_signaling
,
125 .signaled
= i915_fence_signaled
,
126 .wait
= i915_fence_wait
,
127 .release
= i915_fence_release
,
130 static void irq_execute_cb(struct irq_work
*wrk
)
132 struct execute_cb
*cb
= container_of(wrk
, typeof(*cb
), work
);
134 i915_sw_fence_complete(cb
->fence
);
135 kmem_cache_free(global
.slab_execute_cbs
, cb
);
138 static void irq_execute_cb_hook(struct irq_work
*wrk
)
140 struct execute_cb
*cb
= container_of(wrk
, typeof(*cb
), work
);
142 cb
->hook(container_of(cb
->fence
, struct i915_request
, submit
),
144 i915_request_put(cb
->signal
);
149 static void __notify_execute_cb(struct i915_request
*rq
)
151 struct execute_cb
*cb
;
153 lockdep_assert_held(&rq
->lock
);
155 if (list_empty(&rq
->execute_cb
))
158 list_for_each_entry(cb
, &rq
->execute_cb
, link
)
159 irq_work_queue(&cb
->work
);
162 * XXX Rollback on __i915_request_unsubmit()
164 * In the future, perhaps when we have an active time-slicing scheduler,
165 * it will be interesting to unsubmit parallel execution and remove
166 * busywaits from the GPU until their master is restarted. This is
167 * quite hairy, we have to carefully rollback the fence and do a
168 * preempt-to-idle cycle on the target engine, all the while the
169 * master execute_cb may refire.
171 INIT_LIST_HEAD(&rq
->execute_cb
);
175 remove_from_client(struct i915_request
*request
)
177 struct drm_i915_file_private
*file_priv
;
179 if (!READ_ONCE(request
->file_priv
))
183 file_priv
= xchg(&request
->file_priv
, NULL
);
185 spin_lock(&file_priv
->mm
.lock
);
186 list_del(&request
->client_link
);
187 spin_unlock(&file_priv
->mm
.lock
);
192 static void free_capture_list(struct i915_request
*request
)
194 struct i915_capture_list
*capture
;
196 capture
= fetch_and_zero(&request
->capture_list
);
198 struct i915_capture_list
*next
= capture
->next
;
205 static void __i915_request_fill(struct i915_request
*rq
, u8 val
)
207 void *vaddr
= rq
->ring
->vaddr
;
211 if (rq
->postfix
< head
) {
212 memset(vaddr
+ head
, val
, rq
->ring
->size
- head
);
215 memset(vaddr
+ head
, val
, rq
->postfix
- head
);
218 static void remove_from_engine(struct i915_request
*rq
)
220 struct intel_engine_cs
*engine
, *locked
;
223 * Virtual engines complicate acquiring the engine timeline lock,
224 * as their rq->engine pointer is not stable until under that
225 * engine lock. The simple ploy we use is to take the lock then
226 * check that the rq still belongs to the newly locked engine.
228 locked
= READ_ONCE(rq
->engine
);
229 spin_lock_irq(&locked
->active
.lock
);
230 while (unlikely(locked
!= (engine
= READ_ONCE(rq
->engine
)))) {
231 spin_unlock(&locked
->active
.lock
);
232 spin_lock(&engine
->active
.lock
);
235 list_del_init(&rq
->sched
.link
);
236 clear_bit(I915_FENCE_FLAG_PQUEUE
, &rq
->fence
.flags
);
237 clear_bit(I915_FENCE_FLAG_HOLD
, &rq
->fence
.flags
);
238 spin_unlock_irq(&locked
->active
.lock
);
241 bool i915_request_retire(struct i915_request
*rq
)
243 if (!i915_request_completed(rq
))
248 GEM_BUG_ON(!i915_sw_fence_signaled(&rq
->submit
));
249 trace_i915_request_retire(rq
);
252 * We know the GPU must have read the request to have
253 * sent us the seqno + interrupt, so use the position
254 * of tail of the request to update the last known position
257 * Note this requires that we are always called in request
260 GEM_BUG_ON(!list_is_first(&rq
->link
,
261 &i915_request_timeline(rq
)->requests
));
262 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM
))
263 /* Poison before we release our space in the ring */
264 __i915_request_fill(rq
, POISON_FREE
);
265 rq
->ring
->head
= rq
->postfix
;
268 * We only loosely track inflight requests across preemption,
269 * and so we may find ourselves attempting to retire a _completed_
270 * request that we have removed from the HW and put back on a run
273 remove_from_engine(rq
);
275 spin_lock_irq(&rq
->lock
);
276 i915_request_mark_complete(rq
);
277 if (!i915_request_signaled(rq
))
278 dma_fence_signal_locked(&rq
->fence
);
279 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &rq
->fence
.flags
))
280 i915_request_cancel_breadcrumb(rq
);
281 if (i915_request_has_waitboost(rq
)) {
282 GEM_BUG_ON(!atomic_read(&rq
->engine
->gt
->rps
.num_waiters
));
283 atomic_dec(&rq
->engine
->gt
->rps
.num_waiters
);
285 if (!test_bit(I915_FENCE_FLAG_ACTIVE
, &rq
->fence
.flags
)) {
286 set_bit(I915_FENCE_FLAG_ACTIVE
, &rq
->fence
.flags
);
287 __notify_execute_cb(rq
);
289 GEM_BUG_ON(!list_empty(&rq
->execute_cb
));
290 spin_unlock_irq(&rq
->lock
);
292 remove_from_client(rq
);
293 __list_del_entry(&rq
->link
); /* poison neither prev/next (RCU walks) */
295 intel_context_exit(rq
->context
);
296 intel_context_unpin(rq
->context
);
298 free_capture_list(rq
);
299 i915_sched_node_fini(&rq
->sched
);
300 i915_request_put(rq
);
305 void i915_request_retire_upto(struct i915_request
*rq
)
307 struct intel_timeline
* const tl
= i915_request_timeline(rq
);
308 struct i915_request
*tmp
;
312 GEM_BUG_ON(!i915_request_completed(rq
));
315 tmp
= list_first_entry(&tl
->requests
, typeof(*tmp
), link
);
316 } while (i915_request_retire(tmp
) && tmp
!= rq
);
320 __await_execution(struct i915_request
*rq
,
321 struct i915_request
*signal
,
322 void (*hook
)(struct i915_request
*rq
,
323 struct dma_fence
*signal
),
326 struct execute_cb
*cb
;
328 if (i915_request_is_active(signal
)) {
330 hook(rq
, &signal
->fence
);
334 cb
= kmem_cache_alloc(global
.slab_execute_cbs
, gfp
);
338 cb
->fence
= &rq
->submit
;
339 i915_sw_fence_await(cb
->fence
);
340 init_irq_work(&cb
->work
, irq_execute_cb
);
344 cb
->signal
= i915_request_get(signal
);
345 cb
->work
.func
= irq_execute_cb_hook
;
348 spin_lock_irq(&signal
->lock
);
349 if (i915_request_is_active(signal
)) {
351 hook(rq
, &signal
->fence
);
352 i915_request_put(signal
);
354 i915_sw_fence_complete(cb
->fence
);
355 kmem_cache_free(global
.slab_execute_cbs
, cb
);
357 list_add_tail(&cb
->link
, &signal
->execute_cb
);
359 spin_unlock_irq(&signal
->lock
);
361 /* Copy across semaphore status as we need the same behaviour */
362 rq
->sched
.flags
|= signal
->sched
.flags
;
366 static bool fatal_error(int error
)
369 case 0: /* not an error! */
370 case -EAGAIN
: /* innocent victim of a GT reset (__i915_request_reset) */
371 case -ETIMEDOUT
: /* waiting for Godot (timer_i915_sw_fence_wake) */
378 void __i915_request_skip(struct i915_request
*rq
)
380 GEM_BUG_ON(!fatal_error(rq
->fence
.error
));
382 if (rq
->infix
== rq
->postfix
)
386 * As this request likely depends on state from the lost
387 * context, clear out all the user operations leaving the
388 * breadcrumb at the end (so we get the fence notifications).
390 __i915_request_fill(rq
, 0);
391 rq
->infix
= rq
->postfix
;
394 void i915_request_set_error_once(struct i915_request
*rq
, int error
)
398 GEM_BUG_ON(!IS_ERR_VALUE((long)error
));
400 if (i915_request_signaled(rq
))
403 old
= READ_ONCE(rq
->fence
.error
);
405 if (fatal_error(old
))
407 } while (!try_cmpxchg(&rq
->fence
.error
, &old
, error
));
410 bool __i915_request_submit(struct i915_request
*request
)
412 struct intel_engine_cs
*engine
= request
->engine
;
415 RQ_TRACE(request
, "\n");
417 GEM_BUG_ON(!irqs_disabled());
418 lockdep_assert_held(&engine
->active
.lock
);
421 * With the advent of preempt-to-busy, we frequently encounter
422 * requests that we have unsubmitted from HW, but left running
423 * until the next ack and so have completed in the meantime. On
424 * resubmission of that completed request, we can skip
425 * updating the payload, and execlists can even skip submitting
428 * We must remove the request from the caller's priority queue,
429 * and the caller must only call us when the request is in their
430 * priority queue, under the active.lock. This ensures that the
431 * request has *not* yet been retired and we can safely move
432 * the request into the engine->active.list where it will be
433 * dropped upon retiring. (Otherwise if resubmit a *retired*
434 * request, this would be a horrible use-after-free.)
436 if (i915_request_completed(request
))
439 if (unlikely(intel_context_is_banned(request
->context
)))
440 i915_request_set_error_once(request
, -EIO
);
441 if (unlikely(fatal_error(request
->fence
.error
)))
442 __i915_request_skip(request
);
445 * Are we using semaphores when the gpu is already saturated?
447 * Using semaphores incurs a cost in having the GPU poll a
448 * memory location, busywaiting for it to change. The continual
449 * memory reads can have a noticeable impact on the rest of the
450 * system with the extra bus traffic, stalling the cpu as it too
451 * tries to access memory across the bus (perf stat -e bus-cycles).
453 * If we installed a semaphore on this request and we only submit
454 * the request after the signaler completed, that indicates the
455 * system is overloaded and using semaphores at this time only
456 * increases the amount of work we are doing. If so, we disable
457 * further use of semaphores until we are idle again, whence we
458 * optimistically try again.
460 if (request
->sched
.semaphores
&&
461 i915_sw_fence_signaled(&request
->semaphore
))
462 engine
->saturated
|= request
->sched
.semaphores
;
464 engine
->emit_fini_breadcrumb(request
,
465 request
->ring
->vaddr
+ request
->postfix
);
467 trace_i915_request_execute(request
);
471 xfer
: /* We may be recursing from the signal callback of another i915 fence */
472 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
474 if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
)) {
475 list_move_tail(&request
->sched
.link
, &engine
->active
.requests
);
476 clear_bit(I915_FENCE_FLAG_PQUEUE
, &request
->fence
.flags
);
479 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
) &&
480 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &request
->fence
.flags
) &&
481 !i915_request_enable_breadcrumb(request
))
482 intel_engine_signal_breadcrumbs(engine
);
484 __notify_execute_cb(request
);
486 spin_unlock(&request
->lock
);
491 void i915_request_submit(struct i915_request
*request
)
493 struct intel_engine_cs
*engine
= request
->engine
;
496 /* Will be called from irq-context when using foreign fences. */
497 spin_lock_irqsave(&engine
->active
.lock
, flags
);
499 __i915_request_submit(request
);
501 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
504 void __i915_request_unsubmit(struct i915_request
*request
)
506 struct intel_engine_cs
*engine
= request
->engine
;
508 RQ_TRACE(request
, "\n");
510 GEM_BUG_ON(!irqs_disabled());
511 lockdep_assert_held(&engine
->active
.lock
);
514 * Only unwind in reverse order, required so that the per-context list
515 * is kept in seqno/ring order.
518 /* We may be recursing from the signal callback of another i915 fence */
519 spin_lock_nested(&request
->lock
, SINGLE_DEPTH_NESTING
);
521 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
, &request
->fence
.flags
))
522 i915_request_cancel_breadcrumb(request
);
524 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
));
525 clear_bit(I915_FENCE_FLAG_ACTIVE
, &request
->fence
.flags
);
527 spin_unlock(&request
->lock
);
529 /* We've already spun, don't charge on resubmitting. */
530 if (request
->sched
.semaphores
&& i915_request_started(request
)) {
531 request
->sched
.attr
.priority
|= I915_PRIORITY_NOSEMAPHORE
;
532 request
->sched
.semaphores
= 0;
536 * We don't need to wake_up any waiters on request->execute, they
537 * will get woken by any other event or us re-adding this request
538 * to the engine timeline (__i915_request_submit()). The waiters
539 * should be quite adapt at finding that the request now has a new
540 * global_seqno to the one they went to sleep on.
544 void i915_request_unsubmit(struct i915_request
*request
)
546 struct intel_engine_cs
*engine
= request
->engine
;
549 /* Will be called from irq-context when using foreign fences. */
550 spin_lock_irqsave(&engine
->active
.lock
, flags
);
552 __i915_request_unsubmit(request
);
554 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
557 static int __i915_sw_fence_call
558 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
560 struct i915_request
*request
=
561 container_of(fence
, typeof(*request
), submit
);
565 trace_i915_request_submit(request
);
567 if (unlikely(fence
->error
))
568 i915_request_set_error_once(request
, fence
->error
);
571 * We need to serialize use of the submit_request() callback
572 * with its hotplugging performed during an emergency
573 * i915_gem_set_wedged(). We use the RCU mechanism to mark the
574 * critical section in order to force i915_gem_set_wedged() to
575 * wait until the submit_request() is completed before
579 request
->engine
->submit_request(request
);
584 i915_request_put(request
);
591 static void irq_semaphore_cb(struct irq_work
*wrk
)
593 struct i915_request
*rq
=
594 container_of(wrk
, typeof(*rq
), semaphore_work
);
596 i915_schedule_bump_priority(rq
, I915_PRIORITY_NOSEMAPHORE
);
597 i915_request_put(rq
);
600 static int __i915_sw_fence_call
601 semaphore_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
603 struct i915_request
*rq
= container_of(fence
, typeof(*rq
), semaphore
);
607 if (!(READ_ONCE(rq
->sched
.attr
.priority
) & I915_PRIORITY_NOSEMAPHORE
)) {
608 i915_request_get(rq
);
609 init_irq_work(&rq
->semaphore_work
, irq_semaphore_cb
);
610 irq_work_queue(&rq
->semaphore_work
);
615 i915_request_put(rq
);
622 static void retire_requests(struct intel_timeline
*tl
)
624 struct i915_request
*rq
, *rn
;
626 list_for_each_entry_safe(rq
, rn
, &tl
->requests
, link
)
627 if (!i915_request_retire(rq
))
631 static noinline
struct i915_request
*
632 request_alloc_slow(struct intel_timeline
*tl
, gfp_t gfp
)
634 struct i915_request
*rq
;
636 if (list_empty(&tl
->requests
))
639 if (!gfpflags_allow_blocking(gfp
))
642 /* Move our oldest request to the slab-cache (if not in use!) */
643 rq
= list_first_entry(&tl
->requests
, typeof(*rq
), link
);
644 i915_request_retire(rq
);
646 rq
= kmem_cache_alloc(global
.slab_requests
,
647 gfp
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
651 /* Ratelimit ourselves to prevent oom from malicious clients */
652 rq
= list_last_entry(&tl
->requests
, typeof(*rq
), link
);
653 cond_synchronize_rcu(rq
->rcustate
);
655 /* Retire our old requests in the hope that we free some */
659 return kmem_cache_alloc(global
.slab_requests
, gfp
);
662 static void __i915_request_ctor(void *arg
)
664 struct i915_request
*rq
= arg
;
666 spin_lock_init(&rq
->lock
);
667 i915_sched_node_init(&rq
->sched
);
668 i915_sw_fence_init(&rq
->submit
, submit_notify
);
669 i915_sw_fence_init(&rq
->semaphore
, semaphore_notify
);
671 dma_fence_init(&rq
->fence
, &i915_fence_ops
, &rq
->lock
, 0, 0);
673 rq
->file_priv
= NULL
;
674 rq
->capture_list
= NULL
;
676 INIT_LIST_HEAD(&rq
->execute_cb
);
679 struct i915_request
*
680 __i915_request_create(struct intel_context
*ce
, gfp_t gfp
)
682 struct intel_timeline
*tl
= ce
->timeline
;
683 struct i915_request
*rq
;
687 might_sleep_if(gfpflags_allow_blocking(gfp
));
689 /* Check that the caller provided an already pinned context */
690 __intel_context_pin(ce
);
693 * Beware: Dragons be flying overhead.
695 * We use RCU to look up requests in flight. The lookups may
696 * race with the request being allocated from the slab freelist.
697 * That is the request we are writing to here, may be in the process
698 * of being read by __i915_active_request_get_rcu(). As such,
699 * we have to be very careful when overwriting the contents. During
700 * the RCU lookup, we change chase the request->engine pointer,
701 * read the request->global_seqno and increment the reference count.
703 * The reference count is incremented atomically. If it is zero,
704 * the lookup knows the request is unallocated and complete. Otherwise,
705 * it is either still in use, or has been reallocated and reset
706 * with dma_fence_init(). This increment is safe for release as we
707 * check that the request we have a reference to and matches the active
710 * Before we increment the refcount, we chase the request->engine
711 * pointer. We must not call kmem_cache_zalloc() or else we set
712 * that pointer to NULL and cause a crash during the lookup. If
713 * we see the request is completed (based on the value of the
714 * old engine and seqno), the lookup is complete and reports NULL.
715 * If we decide the request is not completed (new engine or seqno),
716 * then we grab a reference and double check that it is still the
717 * active request - which it won't be and restart the lookup.
719 * Do not use kmem_cache_zalloc() here!
721 rq
= kmem_cache_alloc(global
.slab_requests
,
722 gfp
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
724 rq
= request_alloc_slow(tl
, gfp
);
731 rq
->i915
= ce
->engine
->i915
;
733 rq
->engine
= ce
->engine
;
735 rq
->execution_mask
= ce
->engine
->mask
;
737 kref_init(&rq
->fence
.refcount
);
740 INIT_LIST_HEAD(&rq
->fence
.cb_list
);
742 ret
= intel_timeline_get_seqno(tl
, rq
, &seqno
);
746 rq
->fence
.context
= tl
->fence_context
;
747 rq
->fence
.seqno
= seqno
;
749 RCU_INIT_POINTER(rq
->timeline
, tl
);
750 RCU_INIT_POINTER(rq
->hwsp_cacheline
, tl
->hwsp_cacheline
);
751 rq
->hwsp_seqno
= tl
->hwsp_seqno
;
752 GEM_BUG_ON(i915_request_completed(rq
));
754 rq
->rcustate
= get_state_synchronize_rcu(); /* acts as smp_mb() */
756 /* We bump the ref for the fence chain */
757 i915_sw_fence_reinit(&i915_request_get(rq
)->submit
);
758 i915_sw_fence_reinit(&i915_request_get(rq
)->semaphore
);
760 i915_sched_node_reinit(&rq
->sched
);
762 /* No zalloc, everything must be cleared after use */
764 GEM_BUG_ON(rq
->file_priv
);
765 GEM_BUG_ON(rq
->capture_list
);
766 GEM_BUG_ON(!list_empty(&rq
->execute_cb
));
769 * Reserve space in the ring buffer for all the commands required to
770 * eventually emit this request. This is to guarantee that the
771 * i915_request_add() call can't fail. Note that the reserve may need
772 * to be redone if the request is not actually submitted straight
773 * away, e.g. because a GPU scheduler has deferred it.
775 * Note that due to how we add reserved_space to intel_ring_begin()
776 * we need to double our request to ensure that if we need to wrap
777 * around inside i915_request_add() there is sufficient space at
778 * the beginning of the ring as well.
781 2 * rq
->engine
->emit_fini_breadcrumb_dw
* sizeof(u32
);
784 * Record the position of the start of the request so that
785 * should we detect the updated seqno part-way through the
786 * GPU processing the request, we never over-estimate the
787 * position of the head.
789 rq
->head
= rq
->ring
->emit
;
791 ret
= rq
->engine
->request_alloc(rq
);
795 rq
->infix
= rq
->ring
->emit
; /* end of header; start of user payload */
797 intel_context_mark_active(ce
);
798 list_add_tail_rcu(&rq
->link
, &tl
->requests
);
803 ce
->ring
->emit
= rq
->head
;
805 /* Make sure we didn't add ourselves to external state before freeing */
806 GEM_BUG_ON(!list_empty(&rq
->sched
.signalers_list
));
807 GEM_BUG_ON(!list_empty(&rq
->sched
.waiters_list
));
810 kmem_cache_free(global
.slab_requests
, rq
);
812 intel_context_unpin(ce
);
816 struct i915_request
*
817 i915_request_create(struct intel_context
*ce
)
819 struct i915_request
*rq
;
820 struct intel_timeline
*tl
;
822 tl
= intel_context_timeline_lock(ce
);
826 /* Move our oldest request to the slab-cache (if not in use!) */
827 rq
= list_first_entry(&tl
->requests
, typeof(*rq
), link
);
828 if (!list_is_last(&rq
->link
, &tl
->requests
))
829 i915_request_retire(rq
);
831 intel_context_enter(ce
);
832 rq
= __i915_request_create(ce
, GFP_KERNEL
);
833 intel_context_exit(ce
); /* active reference transferred to request */
837 /* Check that we do not interrupt ourselves with a new request */
838 rq
->cookie
= lockdep_pin_lock(&tl
->mutex
);
843 intel_context_timeline_unlock(tl
);
848 i915_request_await_start(struct i915_request
*rq
, struct i915_request
*signal
)
850 struct dma_fence
*fence
;
853 if (i915_request_timeline(rq
) == rcu_access_pointer(signal
->timeline
))
856 if (i915_request_started(signal
))
861 spin_lock_irq(&signal
->lock
);
863 struct list_head
*pos
= READ_ONCE(signal
->link
.prev
);
864 struct i915_request
*prev
;
866 /* Confirm signal has not been retired, the link is valid */
867 if (unlikely(i915_request_started(signal
)))
870 /* Is signal the earliest request on its timeline? */
871 if (pos
== &rcu_dereference(signal
->timeline
)->requests
)
875 * Peek at the request before us in the timeline. That
876 * request will only be valid before it is retired, so
877 * after acquiring a reference to it, confirm that it is
878 * still part of the signaler's timeline.
880 prev
= list_entry(pos
, typeof(*prev
), link
);
881 if (!i915_request_get_rcu(prev
))
884 /* After the strong barrier, confirm prev is still attached */
885 if (unlikely(READ_ONCE(prev
->link
.next
) != &signal
->link
)) {
886 i915_request_put(prev
);
890 fence
= &prev
->fence
;
892 spin_unlock_irq(&signal
->lock
);
898 if (!intel_timeline_sync_is_later(i915_request_timeline(rq
), fence
))
899 err
= i915_sw_fence_await_dma_fence(&rq
->submit
,
902 dma_fence_put(fence
);
907 static intel_engine_mask_t
908 already_busywaiting(struct i915_request
*rq
)
911 * Polling a semaphore causes bus traffic, delaying other users of
912 * both the GPU and CPU. We want to limit the impact on others,
913 * while taking advantage of early submission to reduce GPU
914 * latency. Therefore we restrict ourselves to not using more
915 * than one semaphore from each source, and not using a semaphore
916 * if we have detected the engine is saturated (i.e. would not be
917 * submitted early and cause bus traffic reading an already passed
920 * See the are-we-too-late? check in __i915_request_submit().
922 return rq
->sched
.semaphores
| READ_ONCE(rq
->engine
->saturated
);
926 __emit_semaphore_wait(struct i915_request
*to
,
927 struct i915_request
*from
,
930 const int has_token
= INTEL_GEN(to
->i915
) >= 12;
935 GEM_BUG_ON(INTEL_GEN(to
->i915
) < 8);
937 /* We need to pin the signaler's HWSP until we are finished reading. */
938 err
= intel_timeline_read_hwsp(from
, to
, &hwsp_offset
);
946 cs
= intel_ring_begin(to
, len
);
951 * Using greater-than-or-equal here means we have to worry
952 * about seqno wraparound. To side step that issue, we swap
953 * the timeline HWSP upon wrapping, so that everyone listening
954 * for the old (pre-wrap) values do not see the much smaller
955 * (post-wrap) values than they were expecting (and so wait
958 *cs
++ = (MI_SEMAPHORE_WAIT
|
959 MI_SEMAPHORE_GLOBAL_GTT
|
961 MI_SEMAPHORE_SAD_GTE_SDD
) +
971 intel_ring_advance(to
, cs
);
976 emit_semaphore_wait(struct i915_request
*to
,
977 struct i915_request
*from
,
980 const intel_engine_mask_t mask
= READ_ONCE(from
->engine
)->mask
;
982 if (!intel_context_use_semaphores(to
->context
))
985 if (!rcu_access_pointer(from
->hwsp_cacheline
))
988 /* Just emit the first semaphore we see as request space is limited. */
989 if (already_busywaiting(to
) & mask
)
992 if (i915_request_await_start(to
, from
) < 0)
995 /* Only submit our spinner after the signaler is running! */
996 if (__await_execution(to
, from
, NULL
, gfp
))
999 if (__emit_semaphore_wait(to
, from
, from
->fence
.seqno
))
1002 to
->sched
.semaphores
|= mask
;
1003 to
->sched
.flags
|= I915_SCHED_HAS_SEMAPHORE_CHAIN
;
1007 return i915_sw_fence_await_dma_fence(&to
->submit
,
1013 i915_request_await_request(struct i915_request
*to
, struct i915_request
*from
)
1017 GEM_BUG_ON(to
== from
);
1018 GEM_BUG_ON(to
->timeline
== from
->timeline
);
1020 if (i915_request_completed(from
))
1023 if (to
->engine
->schedule
) {
1024 ret
= i915_sched_node_add_dependency(&to
->sched
, &from
->sched
);
1029 if (to
->engine
== from
->engine
)
1030 ret
= i915_sw_fence_await_sw_fence_gfp(&to
->submit
,
1034 ret
= emit_semaphore_wait(to
, from
, I915_FENCE_GFP
);
1038 if (to
->sched
.flags
& I915_SCHED_HAS_SEMAPHORE_CHAIN
) {
1039 ret
= i915_sw_fence_await_dma_fence(&to
->semaphore
,
1050 i915_request_await_dma_fence(struct i915_request
*rq
, struct dma_fence
*fence
)
1052 struct dma_fence
**child
= &fence
;
1053 unsigned int nchild
= 1;
1057 * Note that if the fence-array was created in signal-on-any mode,
1058 * we should *not* decompose it into its individual fences. However,
1059 * we don't currently store which mode the fence-array is operating
1060 * in. Fortunately, the only user of signal-on-any is private to
1061 * amdgpu and we should not see any incoming fence-array from
1062 * sync-file being in signal-on-any mode.
1064 if (dma_fence_is_array(fence
)) {
1065 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
1067 child
= array
->fences
;
1068 nchild
= array
->num_fences
;
1069 GEM_BUG_ON(!nchild
);
1074 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
1075 i915_sw_fence_set_error_once(&rq
->submit
, fence
->error
);
1080 * Requests on the same timeline are explicitly ordered, along
1081 * with their dependencies, by i915_request_add() which ensures
1082 * that requests are submitted in-order through each ring.
1084 if (fence
->context
== rq
->fence
.context
)
1087 /* Squash repeated waits to the same timelines */
1088 if (fence
->context
&&
1089 intel_timeline_sync_is_later(i915_request_timeline(rq
),
1093 if (dma_fence_is_i915(fence
))
1094 ret
= i915_request_await_request(rq
, to_request(fence
));
1096 ret
= i915_sw_fence_await_dma_fence(&rq
->submit
, fence
,
1097 fence
->context
? I915_FENCE_TIMEOUT
: 0,
1102 /* Record the latest fence used against each timeline */
1104 intel_timeline_sync_set(i915_request_timeline(rq
),
1111 static bool intel_timeline_sync_has_start(struct intel_timeline
*tl
,
1112 struct dma_fence
*fence
)
1114 return __intel_timeline_sync_is_later(tl
,
1119 static int intel_timeline_sync_set_start(struct intel_timeline
*tl
,
1120 const struct dma_fence
*fence
)
1122 return __intel_timeline_sync_set(tl
, fence
->context
, fence
->seqno
- 1);
1126 __i915_request_await_execution(struct i915_request
*to
,
1127 struct i915_request
*from
,
1128 void (*hook
)(struct i915_request
*rq
,
1129 struct dma_fence
*signal
))
1133 GEM_BUG_ON(intel_context_is_barrier(from
->context
));
1135 /* Submit both requests at the same time */
1136 err
= __await_execution(to
, from
, hook
, I915_FENCE_GFP
);
1140 /* Squash repeated depenendices to the same timelines */
1141 if (intel_timeline_sync_has_start(i915_request_timeline(to
),
1146 * Wait until the start of this request.
1148 * The execution cb fires when we submit the request to HW. But in
1149 * many cases this may be long before the request itself is ready to
1150 * run (consider that we submit 2 requests for the same context, where
1151 * the request of interest is behind an indefinite spinner). So we hook
1152 * up to both to reduce our queues and keep the execution lag minimised
1153 * in the worst case, though we hope that the await_start is elided.
1155 err
= i915_request_await_start(to
, from
);
1160 * Ensure both start together [after all semaphores in signal]
1162 * Now that we are queued to the HW at roughly the same time (thanks
1163 * to the execute cb) and are ready to run at roughly the same time
1164 * (thanks to the await start), our signaler may still be indefinitely
1165 * delayed by waiting on a semaphore from a remote engine. If our
1166 * signaler depends on a semaphore, so indirectly do we, and we do not
1167 * want to start our payload until our signaler also starts theirs.
1170 * However, there is also a second condition for which we need to wait
1171 * for the precise start of the signaler. Consider that the signaler
1172 * was submitted in a chain of requests following another context
1173 * (with just an ordinary intra-engine fence dependency between the
1174 * two). In this case the signaler is queued to HW, but not for
1175 * immediate execution, and so we must wait until it reaches the
1178 if (intel_engine_has_semaphores(to
->engine
)) {
1179 err
= __emit_semaphore_wait(to
, from
, from
->fence
.seqno
- 1);
1184 /* Couple the dependency tree for PI on this exposed to->fence */
1185 if (to
->engine
->schedule
) {
1186 err
= i915_sched_node_add_dependency(&to
->sched
, &from
->sched
);
1191 return intel_timeline_sync_set_start(i915_request_timeline(to
),
1196 i915_request_await_execution(struct i915_request
*rq
,
1197 struct dma_fence
*fence
,
1198 void (*hook
)(struct i915_request
*rq
,
1199 struct dma_fence
*signal
))
1201 struct dma_fence
**child
= &fence
;
1202 unsigned int nchild
= 1;
1205 if (dma_fence_is_array(fence
)) {
1206 struct dma_fence_array
*array
= to_dma_fence_array(fence
);
1208 /* XXX Error for signal-on-any fence arrays */
1210 child
= array
->fences
;
1211 nchild
= array
->num_fences
;
1212 GEM_BUG_ON(!nchild
);
1217 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->flags
)) {
1218 i915_sw_fence_set_error_once(&rq
->submit
, fence
->error
);
1223 * We don't squash repeated fence dependencies here as we
1224 * want to run our callback in all cases.
1227 if (dma_fence_is_i915(fence
))
1228 ret
= __i915_request_await_execution(rq
,
1232 ret
= i915_sw_fence_await_dma_fence(&rq
->submit
, fence
,
1243 * i915_request_await_object - set this request to (async) wait upon a bo
1244 * @to: request we are wishing to use
1245 * @obj: object which may be in use on another ring.
1246 * @write: whether the wait is on behalf of a writer
1248 * This code is meant to abstract object synchronization with the GPU.
1249 * Conceptually we serialise writes between engines inside the GPU.
1250 * We only allow one engine to write into a buffer at any time, but
1251 * multiple readers. To ensure each has a coherent view of memory, we must:
1253 * - If there is an outstanding write request to the object, the new
1254 * request must wait for it to complete (either CPU or in hw, requests
1255 * on the same ring will be naturally ordered).
1257 * - If we are a write request (pending_write_domain is set), the new
1258 * request must wait for outstanding read requests to complete.
1260 * Returns 0 if successful, else propagates up the lower layer error.
1263 i915_request_await_object(struct i915_request
*to
,
1264 struct drm_i915_gem_object
*obj
,
1267 struct dma_fence
*excl
;
1271 struct dma_fence
**shared
;
1272 unsigned int count
, i
;
1274 ret
= dma_resv_get_fences_rcu(obj
->base
.resv
,
1275 &excl
, &count
, &shared
);
1279 for (i
= 0; i
< count
; i
++) {
1280 ret
= i915_request_await_dma_fence(to
, shared
[i
]);
1284 dma_fence_put(shared
[i
]);
1287 for (; i
< count
; i
++)
1288 dma_fence_put(shared
[i
]);
1291 excl
= dma_resv_get_excl_rcu(obj
->base
.resv
);
1296 ret
= i915_request_await_dma_fence(to
, excl
);
1298 dma_fence_put(excl
);
1304 static struct i915_request
*
1305 __i915_request_add_to_timeline(struct i915_request
*rq
)
1307 struct intel_timeline
*timeline
= i915_request_timeline(rq
);
1308 struct i915_request
*prev
;
1311 * Dependency tracking and request ordering along the timeline
1312 * is special cased so that we can eliminate redundant ordering
1313 * operations while building the request (we know that the timeline
1314 * itself is ordered, and here we guarantee it).
1316 * As we know we will need to emit tracking along the timeline,
1317 * we embed the hooks into our request struct -- at the cost of
1318 * having to have specialised no-allocation interfaces (which will
1319 * be beneficial elsewhere).
1321 * A second benefit to open-coding i915_request_await_request is
1322 * that we can apply a slight variant of the rules specialised
1323 * for timelines that jump between engines (such as virtual engines).
1324 * If we consider the case of virtual engine, we must emit a dma-fence
1325 * to prevent scheduling of the second request until the first is
1326 * complete (to maximise our greedy late load balancing) and this
1327 * precludes optimising to use semaphores serialisation of a single
1328 * timeline across engines.
1330 prev
= to_request(__i915_active_fence_set(&timeline
->last_request
,
1332 if (prev
&& !i915_request_completed(prev
)) {
1334 * The requests are supposed to be kept in order. However,
1335 * we need to be wary in case the timeline->last_request
1336 * is used as a barrier for external modification to this
1339 GEM_BUG_ON(prev
->context
== rq
->context
&&
1340 i915_seqno_passed(prev
->fence
.seqno
,
1343 if (is_power_of_2(READ_ONCE(prev
->engine
)->mask
| rq
->engine
->mask
))
1344 i915_sw_fence_await_sw_fence(&rq
->submit
,
1348 __i915_sw_fence_await_dma_fence(&rq
->submit
,
1351 if (rq
->engine
->schedule
)
1352 __i915_sched_node_add_dependency(&rq
->sched
,
1359 * Make sure that no request gazumped us - if it was allocated after
1360 * our i915_request_alloc() and called __i915_request_add() before
1361 * us, the timeline will hold its seqno which is later than ours.
1363 GEM_BUG_ON(timeline
->seqno
!= rq
->fence
.seqno
);
1369 * NB: This function is not allowed to fail. Doing so would mean the the
1370 * request is not being tracked for completion but the work itself is
1371 * going to happen on the hardware. This would be a Bad Thing(tm).
1373 struct i915_request
*__i915_request_commit(struct i915_request
*rq
)
1375 struct intel_engine_cs
*engine
= rq
->engine
;
1376 struct intel_ring
*ring
= rq
->ring
;
1382 * To ensure that this call will not fail, space for its emissions
1383 * should already have been reserved in the ring buffer. Let the ring
1384 * know that it is time to use that space up.
1386 GEM_BUG_ON(rq
->reserved_space
> ring
->space
);
1387 rq
->reserved_space
= 0;
1388 rq
->emitted_jiffies
= jiffies
;
1391 * Record the position of the start of the breadcrumb so that
1392 * should we detect the updated seqno part-way through the
1393 * GPU processing the request, we never over-estimate the
1394 * position of the ring's HEAD.
1396 cs
= intel_ring_begin(rq
, engine
->emit_fini_breadcrumb_dw
);
1397 GEM_BUG_ON(IS_ERR(cs
));
1398 rq
->postfix
= intel_ring_offset(rq
, cs
);
1400 return __i915_request_add_to_timeline(rq
);
1403 void __i915_request_queue(struct i915_request
*rq
,
1404 const struct i915_sched_attr
*attr
)
1407 * Let the backend know a new request has arrived that may need
1408 * to adjust the existing execution schedule due to a high priority
1409 * request - i.e. we may want to preempt the current request in order
1410 * to run a high priority dependency chain *before* we can execute this
1413 * This is called before the request is ready to run so that we can
1414 * decide whether to preempt the entire chain so that it is ready to
1415 * run at the earliest possible convenience.
1417 if (attr
&& rq
->engine
->schedule
)
1418 rq
->engine
->schedule(rq
, attr
);
1419 i915_sw_fence_commit(&rq
->semaphore
);
1420 i915_sw_fence_commit(&rq
->submit
);
1423 void i915_request_add(struct i915_request
*rq
)
1425 struct intel_timeline
* const tl
= i915_request_timeline(rq
);
1426 struct i915_sched_attr attr
= {};
1427 struct i915_gem_context
*ctx
;
1429 lockdep_assert_held(&tl
->mutex
);
1430 lockdep_unpin_lock(&tl
->mutex
, rq
->cookie
);
1432 trace_i915_request_add(rq
);
1433 __i915_request_commit(rq
);
1435 /* XXX placeholder for selftests */
1437 ctx
= rcu_dereference(rq
->context
->gem_context
);
1442 if (!(rq
->sched
.flags
& I915_SCHED_HAS_SEMAPHORE_CHAIN
))
1443 attr
.priority
|= I915_PRIORITY_NOSEMAPHORE
;
1444 if (list_empty(&rq
->sched
.signalers_list
))
1445 attr
.priority
|= I915_PRIORITY_WAIT
;
1448 __i915_request_queue(rq
, &attr
);
1449 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1451 mutex_unlock(&tl
->mutex
);
1454 static unsigned long local_clock_ns(unsigned int *cpu
)
1459 * Cheaply and approximately convert from nanoseconds to microseconds.
1460 * The result and subsequent calculations are also defined in the same
1461 * approximate microseconds units. The principal source of timing
1462 * error here is from the simple truncation.
1464 * Note that local_clock() is only defined wrt to the current CPU;
1465 * the comparisons are no longer valid if we switch CPUs. Instead of
1466 * blocking preemption for the entire busywait, we can detect the CPU
1467 * switch and use that as indicator of system load and a reason to
1468 * stop busywaiting, see busywait_stop().
1477 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
1479 unsigned int this_cpu
;
1481 if (time_after(local_clock_ns(&this_cpu
), timeout
))
1484 return this_cpu
!= cpu
;
1487 static bool __i915_spin_request(const struct i915_request
* const rq
, int state
)
1489 unsigned long timeout_ns
;
1493 * Only wait for the request if we know it is likely to complete.
1495 * We don't track the timestamps around requests, nor the average
1496 * request length, so we do not have a good indicator that this
1497 * request will complete within the timeout. What we do know is the
1498 * order in which requests are executed by the context and so we can
1499 * tell if the request has been started. If the request is not even
1500 * running yet, it is a fair assumption that it will not complete
1501 * within our relatively short timeout.
1503 if (!i915_request_is_running(rq
))
1507 * When waiting for high frequency requests, e.g. during synchronous
1508 * rendering split between the CPU and GPU, the finite amount of time
1509 * required to set up the irq and wait upon it limits the response
1510 * rate. By busywaiting on the request completion for a short while we
1511 * can service the high frequency waits as quick as possible. However,
1512 * if it is a slow request, we want to sleep as quickly as possible.
1513 * The tradeoff between waiting and sleeping is roughly the time it
1514 * takes to sleep on a request, on the order of a microsecond.
1517 timeout_ns
= READ_ONCE(rq
->engine
->props
.max_busywait_duration_ns
);
1518 timeout_ns
+= local_clock_ns(&cpu
);
1520 if (i915_request_completed(rq
))
1523 if (signal_pending_state(state
, current
))
1526 if (busywait_stop(timeout_ns
, cpu
))
1530 } while (!need_resched());
1535 struct request_wait
{
1536 struct dma_fence_cb cb
;
1537 struct task_struct
*tsk
;
1540 static void request_wait_wake(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
1542 struct request_wait
*wait
= container_of(cb
, typeof(*wait
), cb
);
1544 wake_up_process(wait
->tsk
);
1548 * i915_request_wait - wait until execution of request has finished
1549 * @rq: the request to wait upon
1550 * @flags: how to wait
1551 * @timeout: how long to wait in jiffies
1553 * i915_request_wait() waits for the request to be completed, for a
1554 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1557 * Returns the remaining time (in jiffies) if the request completed, which may
1558 * be zero or -ETIME if the request is unfinished after the timeout expires.
1559 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1560 * pending before the request completes.
1562 long i915_request_wait(struct i915_request
*rq
,
1566 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
1567 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
1568 struct request_wait wait
;
1571 GEM_BUG_ON(timeout
< 0);
1573 if (dma_fence_is_signaled(&rq
->fence
))
1579 trace_i915_request_wait_begin(rq
, flags
);
1582 * We must never wait on the GPU while holding a lock as we
1583 * may need to perform a GPU reset. So while we don't need to
1584 * serialise wait/reset with an explicit lock, we do want
1585 * lockdep to detect potential dependency cycles.
1587 mutex_acquire(&rq
->engine
->gt
->reset
.mutex
.dep_map
, 0, 0, _THIS_IP_
);
1590 * Optimistic spin before touching IRQs.
1592 * We may use a rather large value here to offset the penalty of
1593 * switching away from the active task. Frequently, the client will
1594 * wait upon an old swapbuffer to throttle itself to remain within a
1595 * frame of the gpu. If the client is running in lockstep with the gpu,
1596 * then it should not be waiting long at all, and a sleep now will incur
1597 * extra scheduler latency in producing the next frame. To try to
1598 * avoid adding the cost of enabling/disabling the interrupt to the
1599 * short wait, we first spin to see if the request would have completed
1600 * in the time taken to setup the interrupt.
1602 * We need upto 5us to enable the irq, and upto 20us to hide the
1603 * scheduler latency of a context switch, ignoring the secondary
1604 * impacts from a context switch such as cache eviction.
1606 * The scheme used for low-latency IO is called "hybrid interrupt
1607 * polling". The suggestion there is to sleep until just before you
1608 * expect to be woken by the device interrupt and then poll for its
1609 * completion. That requires having a good predictor for the request
1610 * duration, which we currently lack.
1612 if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT
) &&
1613 __i915_spin_request(rq
, state
)) {
1614 dma_fence_signal(&rq
->fence
);
1619 * This client is about to stall waiting for the GPU. In many cases
1620 * this is undesirable and limits the throughput of the system, as
1621 * many clients cannot continue processing user input/output whilst
1622 * blocked. RPS autotuning may take tens of milliseconds to respond
1623 * to the GPU load and thus incurs additional latency for the client.
1624 * We can circumvent that by promoting the GPU frequency to maximum
1625 * before we sleep. This makes the GPU throttle up much more quickly
1626 * (good for benchmarks and user experience, e.g. window animations),
1627 * but at a cost of spending more power processing the workload
1628 * (bad for battery).
1630 if (flags
& I915_WAIT_PRIORITY
) {
1631 if (!i915_request_started(rq
) && INTEL_GEN(rq
->i915
) >= 6)
1632 intel_rps_boost(rq
);
1633 i915_schedule_bump_priority(rq
, I915_PRIORITY_WAIT
);
1637 if (dma_fence_add_callback(&rq
->fence
, &wait
.cb
, request_wait_wake
))
1641 set_current_state(state
);
1643 if (i915_request_completed(rq
)) {
1644 dma_fence_signal(&rq
->fence
);
1648 intel_engine_flush_submission(rq
->engine
);
1650 if (signal_pending_state(state
, current
)) {
1651 timeout
= -ERESTARTSYS
;
1660 timeout
= io_schedule_timeout(timeout
);
1662 __set_current_state(TASK_RUNNING
);
1664 dma_fence_remove_callback(&rq
->fence
, &wait
.cb
);
1667 mutex_release(&rq
->engine
->gt
->reset
.mutex
.dep_map
, _THIS_IP_
);
1668 trace_i915_request_wait_end(rq
);
1672 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1673 #include "selftests/mock_request.c"
1674 #include "selftests/i915_request.c"
1677 static void i915_global_request_shrink(void)
1679 kmem_cache_shrink(global
.slab_execute_cbs
);
1680 kmem_cache_shrink(global
.slab_requests
);
1683 static void i915_global_request_exit(void)
1685 kmem_cache_destroy(global
.slab_execute_cbs
);
1686 kmem_cache_destroy(global
.slab_requests
);
1689 static struct i915_global_request global
= { {
1690 .shrink
= i915_global_request_shrink
,
1691 .exit
= i915_global_request_exit
,
1694 int __init
i915_global_request_init(void)
1696 global
.slab_requests
=
1697 kmem_cache_create("i915_request",
1698 sizeof(struct i915_request
),
1699 __alignof__(struct i915_request
),
1700 SLAB_HWCACHE_ALIGN
|
1701 SLAB_RECLAIM_ACCOUNT
|
1702 SLAB_TYPESAFE_BY_RCU
,
1703 __i915_request_ctor
);
1704 if (!global
.slab_requests
)
1707 global
.slab_execute_cbs
= KMEM_CACHE(execute_cb
,
1708 SLAB_HWCACHE_ALIGN
|
1709 SLAB_RECLAIM_ACCOUNT
|
1710 SLAB_TYPESAFE_BY_RCU
);
1711 if (!global
.slab_execute_cbs
)
1714 i915_global_register(&global
.base
);
1718 kmem_cache_destroy(global
.slab_requests
);