2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "gem/i915_gem_context.h"
31 #include "intel_context.h"
32 #include "intel_engine.h"
33 #include "intel_engine_pm.h"
34 #include "intel_engine_pool.h"
35 #include "intel_engine_user.h"
37 #include "intel_gt_requests.h"
38 #include "intel_gt_pm.h"
39 #include "intel_lrc.h"
40 #include "intel_reset.h"
41 #include "intel_ring.h"
43 /* Haswell does have the CXT_SIZE register however it does not appear to be
44 * valid. Now, docs explain in dwords what is in the context object. The full
45 * size is 70720 bytes, however, the power context and execlist context will
46 * never be saved (power context is stored elsewhere, and execlists don't work
47 * on HSW) - so the final size, including the extra state required for the
48 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
50 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
52 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
53 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
54 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
55 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
56 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
58 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
60 #define MAX_MMIO_BASES 3
65 /* mmio bases table *must* be sorted in reverse gen order */
66 struct engine_mmio_base
{
69 } mmio_bases
[MAX_MMIO_BASES
];
72 static const struct engine_info intel_engines
[] = {
75 .class = RENDER_CLASS
,
78 { .gen
= 1, .base
= RENDER_RING_BASE
}
83 .class = COPY_ENGINE_CLASS
,
86 { .gen
= 6, .base
= BLT_RING_BASE
}
91 .class = VIDEO_DECODE_CLASS
,
94 { .gen
= 11, .base
= GEN11_BSD_RING_BASE
},
95 { .gen
= 6, .base
= GEN6_BSD_RING_BASE
},
96 { .gen
= 4, .base
= BSD_RING_BASE
}
101 .class = VIDEO_DECODE_CLASS
,
104 { .gen
= 11, .base
= GEN11_BSD2_RING_BASE
},
105 { .gen
= 8, .base
= GEN8_BSD2_RING_BASE
}
110 .class = VIDEO_DECODE_CLASS
,
113 { .gen
= 11, .base
= GEN11_BSD3_RING_BASE
}
118 .class = VIDEO_DECODE_CLASS
,
121 { .gen
= 11, .base
= GEN11_BSD4_RING_BASE
}
126 .class = VIDEO_ENHANCEMENT_CLASS
,
129 { .gen
= 11, .base
= GEN11_VEBOX_RING_BASE
},
130 { .gen
= 7, .base
= VEBOX_RING_BASE
}
135 .class = VIDEO_ENHANCEMENT_CLASS
,
138 { .gen
= 11, .base
= GEN11_VEBOX2_RING_BASE
}
144 * intel_engine_context_size() - return the size of the context for an engine
146 * @class: engine class
148 * Each engine class may require a different amount of space for a context
151 * Return: size (in bytes) of an engine class specific context image
153 * Note: this size includes the HWSP, which is part of the context image
154 * in LRC mode, but does not include the "shared data page" used with
155 * GuC submission. The caller should account for this if using the GuC.
157 u32
intel_engine_context_size(struct intel_gt
*gt
, u8
class)
159 struct intel_uncore
*uncore
= gt
->uncore
;
162 BUILD_BUG_ON(I915_GTT_PAGE_SIZE
!= PAGE_SIZE
);
166 switch (INTEL_GEN(gt
->i915
)) {
168 MISSING_CASE(INTEL_GEN(gt
->i915
));
169 return DEFAULT_LR_CONTEXT_RENDER_SIZE
;
172 return GEN11_LR_CONTEXT_RENDER_SIZE
;
174 return GEN10_LR_CONTEXT_RENDER_SIZE
;
176 return GEN9_LR_CONTEXT_RENDER_SIZE
;
178 return GEN8_LR_CONTEXT_RENDER_SIZE
;
180 if (IS_HASWELL(gt
->i915
))
181 return HSW_CXT_TOTAL_SIZE
;
183 cxt_size
= intel_uncore_read(uncore
, GEN7_CXT_SIZE
);
184 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size
) * 64,
187 cxt_size
= intel_uncore_read(uncore
, CXT_SIZE
);
188 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size
) * 64,
193 * There is a discrepancy here between the size reported
194 * by the register and the size of the context layout
195 * in the docs. Both are described as authorative!
197 * The discrepancy is on the order of a few cachelines,
198 * but the total is under one page (4k), which is our
199 * minimum allocation anyway so it should all come
202 cxt_size
= intel_uncore_read(uncore
, CXT_SIZE
) + 1;
203 drm_dbg(>
->i915
->drm
,
204 "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
205 INTEL_GEN(gt
->i915
), cxt_size
* 64,
207 return round_up(cxt_size
* 64, PAGE_SIZE
);
210 /* For the special day when i810 gets merged. */
218 case VIDEO_DECODE_CLASS
:
219 case VIDEO_ENHANCEMENT_CLASS
:
220 case COPY_ENGINE_CLASS
:
221 if (INTEL_GEN(gt
->i915
) < 8)
223 return GEN8_LR_CONTEXT_OTHER_SIZE
;
227 static u32
__engine_mmio_base(struct drm_i915_private
*i915
,
228 const struct engine_mmio_base
*bases
)
232 for (i
= 0; i
< MAX_MMIO_BASES
; i
++)
233 if (INTEL_GEN(i915
) >= bases
[i
].gen
)
236 GEM_BUG_ON(i
== MAX_MMIO_BASES
);
237 GEM_BUG_ON(!bases
[i
].base
);
239 return bases
[i
].base
;
242 static void __sprint_engine_name(struct intel_engine_cs
*engine
)
245 * Before we know what the uABI name for this engine will be,
246 * we still would like to keep track of this engine in the debug logs.
247 * We throw in a ' here as a reminder that this isn't its final name.
249 GEM_WARN_ON(snprintf(engine
->name
, sizeof(engine
->name
), "%s'%u",
250 intel_engine_class_repr(engine
->class),
251 engine
->instance
) >= sizeof(engine
->name
));
254 void intel_engine_set_hwsp_writemask(struct intel_engine_cs
*engine
, u32 mask
)
257 * Though they added more rings on g4x/ilk, they did not add
258 * per-engine HWSTAM until gen6.
260 if (INTEL_GEN(engine
->i915
) < 6 && engine
->class != RENDER_CLASS
)
263 if (INTEL_GEN(engine
->i915
) >= 3)
264 ENGINE_WRITE(engine
, RING_HWSTAM
, mask
);
266 ENGINE_WRITE16(engine
, RING_HWSTAM
, mask
);
269 static void intel_engine_sanitize_mmio(struct intel_engine_cs
*engine
)
271 /* Mask off all writes into the unknown HWSP */
272 intel_engine_set_hwsp_writemask(engine
, ~0u);
275 static int intel_engine_setup(struct intel_gt
*gt
, enum intel_engine_id id
)
277 const struct engine_info
*info
= &intel_engines
[id
];
278 struct drm_i915_private
*i915
= gt
->i915
;
279 struct intel_engine_cs
*engine
;
281 BUILD_BUG_ON(MAX_ENGINE_CLASS
>= BIT(GEN11_ENGINE_CLASS_WIDTH
));
282 BUILD_BUG_ON(MAX_ENGINE_INSTANCE
>= BIT(GEN11_ENGINE_INSTANCE_WIDTH
));
284 if (GEM_DEBUG_WARN_ON(id
>= ARRAY_SIZE(gt
->engine
)))
287 if (GEM_DEBUG_WARN_ON(info
->class > MAX_ENGINE_CLASS
))
290 if (GEM_DEBUG_WARN_ON(info
->instance
> MAX_ENGINE_INSTANCE
))
293 if (GEM_DEBUG_WARN_ON(gt
->engine_class
[info
->class][info
->instance
]))
296 engine
= kzalloc(sizeof(*engine
), GFP_KERNEL
);
300 BUILD_BUG_ON(BITS_PER_TYPE(engine
->mask
) < I915_NUM_ENGINES
);
303 engine
->legacy_idx
= INVALID_ENGINE
;
304 engine
->mask
= BIT(id
);
307 engine
->uncore
= gt
->uncore
;
308 engine
->hw_id
= engine
->guc_id
= info
->hw_id
;
309 engine
->mmio_base
= __engine_mmio_base(i915
, info
->mmio_bases
);
311 engine
->class = info
->class;
312 engine
->instance
= info
->instance
;
313 __sprint_engine_name(engine
);
315 engine
->props
.heartbeat_interval_ms
=
316 CONFIG_DRM_I915_HEARTBEAT_INTERVAL
;
317 engine
->props
.max_busywait_duration_ns
=
318 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT
;
319 engine
->props
.preempt_timeout_ms
=
320 CONFIG_DRM_I915_PREEMPT_TIMEOUT
;
321 engine
->props
.stop_timeout_ms
=
322 CONFIG_DRM_I915_STOP_TIMEOUT
;
323 engine
->props
.timeslice_duration_ms
=
324 CONFIG_DRM_I915_TIMESLICE_DURATION
;
326 /* Override to uninterruptible for OpenCL workloads. */
327 if (INTEL_GEN(i915
) == 12 && engine
->class == RENDER_CLASS
)
328 engine
->props
.preempt_timeout_ms
= 0;
330 engine
->context_size
= intel_engine_context_size(gt
, engine
->class);
331 if (WARN_ON(engine
->context_size
> BIT(20)))
332 engine
->context_size
= 0;
333 if (engine
->context_size
)
334 DRIVER_CAPS(i915
)->has_logical_contexts
= true;
336 /* Nothing to do here, execute in order of dependencies */
337 engine
->schedule
= NULL
;
339 ewma__engine_latency_init(&engine
->latency
);
340 seqlock_init(&engine
->stats
.lock
);
342 ATOMIC_INIT_NOTIFIER_HEAD(&engine
->context_status_notifier
);
344 /* Scrub mmio state on takeover */
345 intel_engine_sanitize_mmio(engine
);
347 gt
->engine_class
[info
->class][info
->instance
] = engine
;
348 gt
->engine
[id
] = engine
;
350 i915
->engine
[id
] = engine
;
355 static void __setup_engine_capabilities(struct intel_engine_cs
*engine
)
357 struct drm_i915_private
*i915
= engine
->i915
;
359 if (engine
->class == VIDEO_DECODE_CLASS
) {
361 * HEVC support is present on first engine instance
362 * before Gen11 and on all instances afterwards.
364 if (INTEL_GEN(i915
) >= 11 ||
365 (INTEL_GEN(i915
) >= 9 && engine
->instance
== 0))
366 engine
->uabi_capabilities
|=
367 I915_VIDEO_CLASS_CAPABILITY_HEVC
;
370 * SFC block is present only on even logical engine
373 if ((INTEL_GEN(i915
) >= 11 &&
374 RUNTIME_INFO(i915
)->vdbox_sfc_access
& engine
->mask
) ||
375 (INTEL_GEN(i915
) >= 9 && engine
->instance
== 0))
376 engine
->uabi_capabilities
|=
377 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC
;
378 } else if (engine
->class == VIDEO_ENHANCEMENT_CLASS
) {
379 if (INTEL_GEN(i915
) >= 9)
380 engine
->uabi_capabilities
|=
381 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC
;
385 static void intel_setup_engine_capabilities(struct intel_gt
*gt
)
387 struct intel_engine_cs
*engine
;
388 enum intel_engine_id id
;
390 for_each_engine(engine
, gt
, id
)
391 __setup_engine_capabilities(engine
);
395 * intel_engines_release() - free the resources allocated for Command Streamers
396 * @gt: pointer to struct intel_gt
398 void intel_engines_release(struct intel_gt
*gt
)
400 struct intel_engine_cs
*engine
;
401 enum intel_engine_id id
;
404 * Before we release the resources held by engine, we must be certain
405 * that the HW is no longer accessing them -- having the GPU scribble
406 * to or read from a page being used for something else causes no end
409 * The GPU should be reset by this point, but assume the worst just
410 * in case we aborted before completely initialising the engines.
412 GEM_BUG_ON(intel_gt_pm_is_awake(gt
));
413 if (!INTEL_INFO(gt
->i915
)->gpu_reset_clobbers_display
)
414 __intel_gt_reset(gt
, ALL_ENGINES
);
416 /* Decouple the backend; but keep the layout for late GPU resets */
417 for_each_engine(engine
, gt
, id
) {
418 intel_wakeref_wait_for_idle(&engine
->wakeref
);
419 GEM_BUG_ON(intel_engine_pm_is_awake(engine
));
421 if (!engine
->release
)
424 engine
->release(engine
);
425 engine
->release
= NULL
;
427 memset(&engine
->reset
, 0, sizeof(engine
->reset
));
429 gt
->i915
->engine
[id
] = NULL
;
433 void intel_engines_free(struct intel_gt
*gt
)
435 struct intel_engine_cs
*engine
;
436 enum intel_engine_id id
;
438 for_each_engine(engine
, gt
, id
) {
440 gt
->engine
[id
] = NULL
;
445 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
446 * @gt: pointer to struct intel_gt
448 * Return: non-zero if the initialization failed.
450 int intel_engines_init_mmio(struct intel_gt
*gt
)
452 struct drm_i915_private
*i915
= gt
->i915
;
453 struct intel_device_info
*device_info
= mkwrite_device_info(i915
);
454 const unsigned int engine_mask
= INTEL_INFO(i915
)->engine_mask
;
455 unsigned int mask
= 0;
459 drm_WARN_ON(&i915
->drm
, engine_mask
== 0);
460 drm_WARN_ON(&i915
->drm
, engine_mask
&
461 GENMASK(BITS_PER_TYPE(mask
) - 1, I915_NUM_ENGINES
));
463 if (i915_inject_probe_failure(i915
))
466 for (i
= 0; i
< ARRAY_SIZE(intel_engines
); i
++) {
467 if (!HAS_ENGINE(i915
, i
))
470 err
= intel_engine_setup(gt
, i
);
478 * Catch failures to update intel_engines table when the new engines
479 * are added to the driver by a warning and disabling the forgotten
482 if (drm_WARN_ON(&i915
->drm
, mask
!= engine_mask
))
483 device_info
->engine_mask
= mask
;
485 RUNTIME_INFO(i915
)->num_engines
= hweight32(mask
);
487 intel_gt_check_and_clear_faults(gt
);
489 intel_setup_engine_capabilities(gt
);
494 intel_engines_free(gt
);
498 void intel_engine_init_execlists(struct intel_engine_cs
*engine
)
500 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
502 execlists
->port_mask
= 1;
503 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists
)));
504 GEM_BUG_ON(execlists_num_ports(execlists
) > EXECLIST_MAX_PORTS
);
506 memset(execlists
->pending
, 0, sizeof(execlists
->pending
));
508 memset(execlists
->inflight
, 0, sizeof(execlists
->inflight
));
510 execlists
->queue_priority_hint
= INT_MIN
;
511 execlists
->queue
= RB_ROOT_CACHED
;
514 static void cleanup_status_page(struct intel_engine_cs
*engine
)
516 struct i915_vma
*vma
;
518 /* Prevent writes into HWSP after returning the page to the system */
519 intel_engine_set_hwsp_writemask(engine
, ~0u);
521 vma
= fetch_and_zero(&engine
->status_page
.vma
);
525 if (!HWS_NEEDS_PHYSICAL(engine
->i915
))
528 i915_gem_object_unpin_map(vma
->obj
);
529 i915_gem_object_put(vma
->obj
);
532 static int pin_ggtt_status_page(struct intel_engine_cs
*engine
,
533 struct i915_vma
*vma
)
537 if (!HAS_LLC(engine
->i915
) && i915_ggtt_has_aperture(engine
->gt
->ggtt
))
539 * On g33, we cannot place HWS above 256MiB, so
540 * restrict its pinning to the low mappable arena.
541 * Though this restriction is not documented for
542 * gen4, gen5, or byt, they also behave similarly
543 * and hang if the HWS is placed at the top of the
544 * GTT. To generalise, it appears that all !llc
545 * platforms have issues with us placing the HWS
546 * above the mappable region (even though we never
549 flags
= PIN_MAPPABLE
;
553 return i915_ggtt_pin(vma
, 0, flags
);
556 static int init_status_page(struct intel_engine_cs
*engine
)
558 struct drm_i915_gem_object
*obj
;
559 struct i915_vma
*vma
;
564 * Though the HWS register does support 36bit addresses, historically
565 * we have had hangs and corruption reported due to wild writes if
566 * the HWS is placed above 4G. We only allow objects to be allocated
567 * in GFP_DMA32 for i965, and no earlier physical address users had
568 * access to more than 4G.
570 obj
= i915_gem_object_create_internal(engine
->i915
, PAGE_SIZE
);
572 drm_err(&engine
->i915
->drm
,
573 "Failed to allocate status page\n");
577 i915_gem_object_set_cache_coherency(obj
, I915_CACHE_LLC
);
579 vma
= i915_vma_instance(obj
, &engine
->gt
->ggtt
->vm
, NULL
);
585 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
587 ret
= PTR_ERR(vaddr
);
591 engine
->status_page
.addr
= memset(vaddr
, 0, PAGE_SIZE
);
592 engine
->status_page
.vma
= vma
;
594 if (!HWS_NEEDS_PHYSICAL(engine
->i915
)) {
595 ret
= pin_ggtt_status_page(engine
, vma
);
603 i915_gem_object_unpin_map(obj
);
605 i915_gem_object_put(obj
);
609 static int engine_setup_common(struct intel_engine_cs
*engine
)
613 init_llist_head(&engine
->barrier_tasks
);
615 err
= init_status_page(engine
);
619 intel_engine_init_active(engine
, ENGINE_PHYSICAL
);
620 intel_engine_init_breadcrumbs(engine
);
621 intel_engine_init_execlists(engine
);
622 intel_engine_init_cmd_parser(engine
);
623 intel_engine_init__pm(engine
);
624 intel_engine_init_retire(engine
);
626 intel_engine_pool_init(&engine
->pool
);
628 /* Use the whole device by default */
630 intel_sseu_from_device_info(&RUNTIME_INFO(engine
->i915
)->sseu
);
632 intel_engine_init_workarounds(engine
);
633 intel_engine_init_whitelist(engine
);
634 intel_engine_init_ctx_wa(engine
);
639 struct measure_breadcrumb
{
640 struct i915_request rq
;
641 struct intel_ring ring
;
645 static int measure_breadcrumb_dw(struct intel_context
*ce
)
647 struct intel_engine_cs
*engine
= ce
->engine
;
648 struct measure_breadcrumb
*frame
;
651 GEM_BUG_ON(!engine
->gt
->scratch
);
653 frame
= kzalloc(sizeof(*frame
), GFP_KERNEL
);
657 frame
->rq
.i915
= engine
->i915
;
658 frame
->rq
.engine
= engine
;
659 frame
->rq
.context
= ce
;
660 rcu_assign_pointer(frame
->rq
.timeline
, ce
->timeline
);
662 frame
->ring
.vaddr
= frame
->cs
;
663 frame
->ring
.size
= sizeof(frame
->cs
);
664 frame
->ring
.effective_size
= frame
->ring
.size
;
665 intel_ring_update_space(&frame
->ring
);
666 frame
->rq
.ring
= &frame
->ring
;
668 mutex_lock(&ce
->timeline
->mutex
);
669 spin_lock_irq(&engine
->active
.lock
);
671 dw
= engine
->emit_fini_breadcrumb(&frame
->rq
, frame
->cs
) - frame
->cs
;
673 spin_unlock_irq(&engine
->active
.lock
);
674 mutex_unlock(&ce
->timeline
->mutex
);
676 GEM_BUG_ON(dw
& 1); /* RING_TAIL must be qword aligned */
683 intel_engine_init_active(struct intel_engine_cs
*engine
, unsigned int subclass
)
685 INIT_LIST_HEAD(&engine
->active
.requests
);
686 INIT_LIST_HEAD(&engine
->active
.hold
);
688 spin_lock_init(&engine
->active
.lock
);
689 lockdep_set_subclass(&engine
->active
.lock
, subclass
);
692 * Due to an interesting quirk in lockdep's internal debug tracking,
693 * after setting a subclass we must ensure the lock is used. Otherwise,
694 * nr_unused_locks is incremented once too often.
696 #ifdef CONFIG_DEBUG_LOCK_ALLOC
698 lock_map_acquire(&engine
->active
.lock
.dep_map
);
699 lock_map_release(&engine
->active
.lock
.dep_map
);
704 static struct intel_context
*
705 create_kernel_context(struct intel_engine_cs
*engine
)
707 static struct lock_class_key kernel
;
708 struct intel_context
*ce
;
711 ce
= intel_context_create(engine
);
715 __set_bit(CONTEXT_BARRIER_BIT
, &ce
->flags
);
717 err
= intel_context_pin(ce
); /* perma-pin so it is always available */
719 intel_context_put(ce
);
724 * Give our perma-pinned kernel timelines a separate lockdep class,
725 * so that we can use them from within the normal user timelines
726 * should we need to inject GPU operations during their request
729 lockdep_set_class(&ce
->timeline
->mutex
, &kernel
);
735 * intel_engines_init_common - initialize cengine state which might require hw access
736 * @engine: Engine to initialize.
738 * Initializes @engine@ structure members shared between legacy and execlists
739 * submission modes which do require hardware access.
741 * Typcally done at later stages of submission mode specific engine setup.
743 * Returns zero on success or an error code on failure.
745 static int engine_init_common(struct intel_engine_cs
*engine
)
747 struct intel_context
*ce
;
750 engine
->set_default_submission(engine
);
753 * We may need to do things with the shrinker which
754 * require us to immediately switch back to the default
755 * context. This can cause a problem as pinning the
756 * default context also requires GTT space which may not
757 * be available. To avoid this we always pin the default
760 ce
= create_kernel_context(engine
);
764 ret
= measure_breadcrumb_dw(ce
);
768 engine
->emit_fini_breadcrumb_dw
= ret
;
769 engine
->kernel_context
= ce
;
774 intel_context_put(ce
);
778 int intel_engines_init(struct intel_gt
*gt
)
780 int (*setup
)(struct intel_engine_cs
*engine
);
781 struct intel_engine_cs
*engine
;
782 enum intel_engine_id id
;
785 if (HAS_EXECLISTS(gt
->i915
))
786 setup
= intel_execlists_submission_setup
;
788 setup
= intel_ring_submission_setup
;
790 for_each_engine(engine
, gt
, id
) {
791 err
= engine_setup_common(engine
);
799 err
= engine_init_common(engine
);
803 intel_engine_add_user(engine
);
810 * intel_engines_cleanup_common - cleans up the engine state created by
811 * the common initiailizers.
812 * @engine: Engine to cleanup.
814 * This cleans up everything created by the common helpers.
816 void intel_engine_cleanup_common(struct intel_engine_cs
*engine
)
818 GEM_BUG_ON(!list_empty(&engine
->active
.requests
));
819 tasklet_kill(&engine
->execlists
.tasklet
); /* flush the callback */
821 cleanup_status_page(engine
);
823 intel_engine_fini_retire(engine
);
824 intel_engine_pool_fini(&engine
->pool
);
825 intel_engine_fini_breadcrumbs(engine
);
826 intel_engine_cleanup_cmd_parser(engine
);
828 if (engine
->default_state
)
829 i915_gem_object_put(engine
->default_state
);
831 if (engine
->kernel_context
) {
832 intel_context_unpin(engine
->kernel_context
);
833 intel_context_put(engine
->kernel_context
);
835 GEM_BUG_ON(!llist_empty(&engine
->barrier_tasks
));
837 intel_wa_list_free(&engine
->ctx_wa_list
);
838 intel_wa_list_free(&engine
->wa_list
);
839 intel_wa_list_free(&engine
->whitelist
);
843 * intel_engine_resume - re-initializes the HW state of the engine
844 * @engine: Engine to resume.
846 * Returns zero on success or an error code on failure.
848 int intel_engine_resume(struct intel_engine_cs
*engine
)
850 intel_engine_apply_workarounds(engine
);
851 intel_engine_apply_whitelist(engine
);
853 return engine
->resume(engine
);
856 u64
intel_engine_get_active_head(const struct intel_engine_cs
*engine
)
858 struct drm_i915_private
*i915
= engine
->i915
;
862 if (INTEL_GEN(i915
) >= 8)
863 acthd
= ENGINE_READ64(engine
, RING_ACTHD
, RING_ACTHD_UDW
);
864 else if (INTEL_GEN(i915
) >= 4)
865 acthd
= ENGINE_READ(engine
, RING_ACTHD
);
867 acthd
= ENGINE_READ(engine
, ACTHD
);
872 u64
intel_engine_get_last_batch_head(const struct intel_engine_cs
*engine
)
876 if (INTEL_GEN(engine
->i915
) >= 8)
877 bbaddr
= ENGINE_READ64(engine
, RING_BBADDR
, RING_BBADDR_UDW
);
879 bbaddr
= ENGINE_READ(engine
, RING_BBADDR
);
884 static unsigned long stop_timeout(const struct intel_engine_cs
*engine
)
886 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
890 * If we are doing a normal GPU reset, we can take our time and allow
891 * the engine to quiesce. We've stopped submission to the engine, and
892 * if we wait long enough an innocent context should complete and
893 * leave the engine idle. So they should not be caught unaware by
894 * the forthcoming GPU reset (which usually follows the stop_cs)!
896 return READ_ONCE(engine
->props
.stop_timeout_ms
);
899 int intel_engine_stop_cs(struct intel_engine_cs
*engine
)
901 struct intel_uncore
*uncore
= engine
->uncore
;
902 const u32 base
= engine
->mmio_base
;
903 const i915_reg_t mode
= RING_MI_MODE(base
);
906 if (INTEL_GEN(engine
->i915
) < 3)
909 ENGINE_TRACE(engine
, "\n");
911 intel_uncore_write_fw(uncore
, mode
, _MASKED_BIT_ENABLE(STOP_RING
));
914 if (__intel_wait_for_register_fw(uncore
,
915 mode
, MODE_IDLE
, MODE_IDLE
,
916 1000, stop_timeout(engine
),
918 ENGINE_TRACE(engine
, "timed out on STOP_RING -> IDLE\n");
922 /* A final mmio read to let GPU writes be hopefully flushed to memory */
923 intel_uncore_posting_read_fw(uncore
, mode
);
928 void intel_engine_cancel_stop_cs(struct intel_engine_cs
*engine
)
930 ENGINE_TRACE(engine
, "\n");
932 ENGINE_WRITE_FW(engine
, RING_MI_MODE
, _MASKED_BIT_DISABLE(STOP_RING
));
935 const char *i915_cache_level_str(struct drm_i915_private
*i915
, int type
)
938 case I915_CACHE_NONE
: return " uncached";
939 case I915_CACHE_LLC
: return HAS_LLC(i915
) ? " LLC" : " snooped";
940 case I915_CACHE_L3_LLC
: return " L3+LLC";
941 case I915_CACHE_WT
: return " WT";
947 read_subslice_reg(const struct intel_engine_cs
*engine
,
948 int slice
, int subslice
, i915_reg_t reg
)
950 struct drm_i915_private
*i915
= engine
->i915
;
951 struct intel_uncore
*uncore
= engine
->uncore
;
952 u32 mcr_mask
, mcr_ss
, mcr
, old_mcr
, val
;
953 enum forcewake_domains fw_domains
;
955 if (INTEL_GEN(i915
) >= 11) {
956 mcr_mask
= GEN11_MCR_SLICE_MASK
| GEN11_MCR_SUBSLICE_MASK
;
957 mcr_ss
= GEN11_MCR_SLICE(slice
) | GEN11_MCR_SUBSLICE(subslice
);
959 mcr_mask
= GEN8_MCR_SLICE_MASK
| GEN8_MCR_SUBSLICE_MASK
;
960 mcr_ss
= GEN8_MCR_SLICE(slice
) | GEN8_MCR_SUBSLICE(subslice
);
963 fw_domains
= intel_uncore_forcewake_for_reg(uncore
, reg
,
965 fw_domains
|= intel_uncore_forcewake_for_reg(uncore
,
967 FW_REG_READ
| FW_REG_WRITE
);
969 spin_lock_irq(&uncore
->lock
);
970 intel_uncore_forcewake_get__locked(uncore
, fw_domains
);
972 old_mcr
= mcr
= intel_uncore_read_fw(uncore
, GEN8_MCR_SELECTOR
);
976 intel_uncore_write_fw(uncore
, GEN8_MCR_SELECTOR
, mcr
);
978 val
= intel_uncore_read_fw(uncore
, reg
);
981 mcr
|= old_mcr
& mcr_mask
;
983 intel_uncore_write_fw(uncore
, GEN8_MCR_SELECTOR
, mcr
);
985 intel_uncore_forcewake_put__locked(uncore
, fw_domains
);
986 spin_unlock_irq(&uncore
->lock
);
991 /* NB: please notice the memset */
992 void intel_engine_get_instdone(const struct intel_engine_cs
*engine
,
993 struct intel_instdone
*instdone
)
995 struct drm_i915_private
*i915
= engine
->i915
;
996 const struct sseu_dev_info
*sseu
= &RUNTIME_INFO(i915
)->sseu
;
997 struct intel_uncore
*uncore
= engine
->uncore
;
998 u32 mmio_base
= engine
->mmio_base
;
1002 memset(instdone
, 0, sizeof(*instdone
));
1004 switch (INTEL_GEN(i915
)) {
1006 instdone
->instdone
=
1007 intel_uncore_read(uncore
, RING_INSTDONE(mmio_base
));
1009 if (engine
->id
!= RCS0
)
1012 instdone
->slice_common
=
1013 intel_uncore_read(uncore
, GEN7_SC_INSTDONE
);
1014 if (INTEL_GEN(i915
) >= 12) {
1015 instdone
->slice_common_extra
[0] =
1016 intel_uncore_read(uncore
, GEN12_SC_INSTDONE_EXTRA
);
1017 instdone
->slice_common_extra
[1] =
1018 intel_uncore_read(uncore
, GEN12_SC_INSTDONE_EXTRA2
);
1020 for_each_instdone_slice_subslice(i915
, sseu
, slice
, subslice
) {
1021 instdone
->sampler
[slice
][subslice
] =
1022 read_subslice_reg(engine
, slice
, subslice
,
1023 GEN7_SAMPLER_INSTDONE
);
1024 instdone
->row
[slice
][subslice
] =
1025 read_subslice_reg(engine
, slice
, subslice
,
1030 instdone
->instdone
=
1031 intel_uncore_read(uncore
, RING_INSTDONE(mmio_base
));
1033 if (engine
->id
!= RCS0
)
1036 instdone
->slice_common
=
1037 intel_uncore_read(uncore
, GEN7_SC_INSTDONE
);
1038 instdone
->sampler
[0][0] =
1039 intel_uncore_read(uncore
, GEN7_SAMPLER_INSTDONE
);
1040 instdone
->row
[0][0] =
1041 intel_uncore_read(uncore
, GEN7_ROW_INSTDONE
);
1047 instdone
->instdone
=
1048 intel_uncore_read(uncore
, RING_INSTDONE(mmio_base
));
1049 if (engine
->id
== RCS0
)
1050 /* HACK: Using the wrong struct member */
1051 instdone
->slice_common
=
1052 intel_uncore_read(uncore
, GEN4_INSTDONE1
);
1056 instdone
->instdone
= intel_uncore_read(uncore
, GEN2_INSTDONE
);
1061 static bool ring_is_idle(struct intel_engine_cs
*engine
)
1065 if (I915_SELFTEST_ONLY(!engine
->mmio_base
))
1068 if (!intel_engine_pm_get_if_awake(engine
))
1071 /* First check that no commands are left in the ring */
1072 if ((ENGINE_READ(engine
, RING_HEAD
) & HEAD_ADDR
) !=
1073 (ENGINE_READ(engine
, RING_TAIL
) & TAIL_ADDR
))
1076 /* No bit for gen2, so assume the CS parser is idle */
1077 if (INTEL_GEN(engine
->i915
) > 2 &&
1078 !(ENGINE_READ(engine
, RING_MI_MODE
) & MODE_IDLE
))
1081 intel_engine_pm_put(engine
);
1086 void intel_engine_flush_submission(struct intel_engine_cs
*engine
)
1088 struct tasklet_struct
*t
= &engine
->execlists
.tasklet
;
1090 if (__tasklet_is_scheduled(t
)) {
1092 if (tasklet_trylock(t
)) {
1093 /* Must wait for any GPU reset in progress. */
1094 if (__tasklet_is_enabled(t
))
1101 /* Otherwise flush the tasklet if it was running on another cpu */
1102 tasklet_unlock_wait(t
);
1106 * intel_engine_is_idle() - Report if the engine has finished process all work
1107 * @engine: the intel_engine_cs
1109 * Return true if there are no requests pending, nothing left to be submitted
1110 * to hardware, and that the engine is idle.
1112 bool intel_engine_is_idle(struct intel_engine_cs
*engine
)
1114 /* More white lies, if wedged, hw state is inconsistent */
1115 if (intel_gt_is_wedged(engine
->gt
))
1118 if (!intel_engine_pm_is_awake(engine
))
1121 /* Waiting to drain ELSP? */
1122 if (execlists_active(&engine
->execlists
)) {
1123 synchronize_hardirq(engine
->i915
->drm
.pdev
->irq
);
1125 intel_engine_flush_submission(engine
);
1127 if (execlists_active(&engine
->execlists
))
1131 /* ELSP is empty, but there are ready requests? E.g. after reset */
1132 if (!RB_EMPTY_ROOT(&engine
->execlists
.queue
.rb_root
))
1136 return ring_is_idle(engine
);
1139 bool intel_engines_are_idle(struct intel_gt
*gt
)
1141 struct intel_engine_cs
*engine
;
1142 enum intel_engine_id id
;
1145 * If the driver is wedged, HW state may be very inconsistent and
1146 * report that it is still busy, even though we have stopped using it.
1148 if (intel_gt_is_wedged(gt
))
1151 /* Already parked (and passed an idleness test); must still be idle */
1152 if (!READ_ONCE(gt
->awake
))
1155 for_each_engine(engine
, gt
, id
) {
1156 if (!intel_engine_is_idle(engine
))
1163 void intel_engines_reset_default_submission(struct intel_gt
*gt
)
1165 struct intel_engine_cs
*engine
;
1166 enum intel_engine_id id
;
1168 for_each_engine(engine
, gt
, id
)
1169 engine
->set_default_submission(engine
);
1172 bool intel_engine_can_store_dword(struct intel_engine_cs
*engine
)
1174 switch (INTEL_GEN(engine
->i915
)) {
1176 return false; /* uses physical not virtual addresses */
1178 /* maybe only uses physical not virtual addresses */
1179 return !(IS_I915G(engine
->i915
) || IS_I915GM(engine
->i915
));
1181 return !IS_I965G(engine
->i915
); /* who knows! */
1183 return engine
->class != VIDEO_DECODE_CLASS
; /* b0rked */
1189 static int print_sched_attr(struct drm_i915_private
*i915
,
1190 const struct i915_sched_attr
*attr
,
1191 char *buf
, int x
, int len
)
1193 if (attr
->priority
== I915_PRIORITY_INVALID
)
1196 x
+= snprintf(buf
+ x
, len
- x
,
1197 " prio=%d", attr
->priority
);
1202 static void print_request(struct drm_printer
*m
,
1203 struct i915_request
*rq
,
1206 const char *name
= rq
->fence
.ops
->get_timeline_name(&rq
->fence
);
1210 x
= print_sched_attr(rq
->i915
, &rq
->sched
.attr
, buf
, x
, sizeof(buf
));
1212 drm_printf(m
, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1214 rq
->fence
.context
, rq
->fence
.seqno
,
1215 i915_request_completed(rq
) ? "!" :
1216 i915_request_started(rq
) ? "*" :
1218 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
1219 &rq
->fence
.flags
) ? "+" :
1220 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
,
1221 &rq
->fence
.flags
) ? "-" :
1224 jiffies_to_msecs(jiffies
- rq
->emitted_jiffies
),
1228 static void hexdump(struct drm_printer
*m
, const void *buf
, size_t len
)
1230 const size_t rowsize
= 8 * sizeof(u32
);
1231 const void *prev
= NULL
;
1235 for (pos
= 0; pos
< len
; pos
+= rowsize
) {
1238 if (prev
&& !memcmp(prev
, buf
+ pos
, rowsize
)) {
1240 drm_printf(m
, "*\n");
1246 WARN_ON_ONCE(hex_dump_to_buffer(buf
+ pos
, len
- pos
,
1247 rowsize
, sizeof(u32
),
1249 false) >= sizeof(line
));
1250 drm_printf(m
, "[%04zx] %s\n", pos
, line
);
1257 static struct intel_timeline
*get_timeline(struct i915_request
*rq
)
1259 struct intel_timeline
*tl
;
1262 * Even though we are holding the engine->active.lock here, there
1263 * is no control over the submission queue per-se and we are
1264 * inspecting the active state at a random point in time, with an
1265 * unknown queue. Play safe and make sure the timeline remains valid.
1266 * (Only being used for pretty printing, one extra kref shouldn't
1267 * cause a camel stampede!)
1270 tl
= rcu_dereference(rq
->timeline
);
1271 if (!kref_get_unless_zero(&tl
->kref
))
1278 static const char *repr_timer(const struct timer_list
*t
)
1280 if (!READ_ONCE(t
->expires
))
1283 if (timer_pending(t
))
1289 static void intel_engine_print_registers(struct intel_engine_cs
*engine
,
1290 struct drm_printer
*m
)
1292 struct drm_i915_private
*dev_priv
= engine
->i915
;
1293 struct intel_engine_execlists
* const execlists
= &engine
->execlists
;
1296 if (engine
->id
== RENDER_CLASS
&& IS_GEN_RANGE(dev_priv
, 4, 7))
1297 drm_printf(m
, "\tCCID: 0x%08x\n", ENGINE_READ(engine
, CCID
));
1298 drm_printf(m
, "\tRING_START: 0x%08x\n",
1299 ENGINE_READ(engine
, RING_START
));
1300 drm_printf(m
, "\tRING_HEAD: 0x%08x\n",
1301 ENGINE_READ(engine
, RING_HEAD
) & HEAD_ADDR
);
1302 drm_printf(m
, "\tRING_TAIL: 0x%08x\n",
1303 ENGINE_READ(engine
, RING_TAIL
) & TAIL_ADDR
);
1304 drm_printf(m
, "\tRING_CTL: 0x%08x%s\n",
1305 ENGINE_READ(engine
, RING_CTL
),
1306 ENGINE_READ(engine
, RING_CTL
) & (RING_WAIT
| RING_WAIT_SEMAPHORE
) ? " [waiting]" : "");
1307 if (INTEL_GEN(engine
->i915
) > 2) {
1308 drm_printf(m
, "\tRING_MODE: 0x%08x%s\n",
1309 ENGINE_READ(engine
, RING_MI_MODE
),
1310 ENGINE_READ(engine
, RING_MI_MODE
) & (MODE_IDLE
) ? " [idle]" : "");
1313 if (INTEL_GEN(dev_priv
) >= 6) {
1314 drm_printf(m
, "\tRING_IMR: 0x%08x\n",
1315 ENGINE_READ(engine
, RING_IMR
));
1316 drm_printf(m
, "\tRING_ESR: 0x%08x\n",
1317 ENGINE_READ(engine
, RING_ESR
));
1318 drm_printf(m
, "\tRING_EMR: 0x%08x\n",
1319 ENGINE_READ(engine
, RING_EMR
));
1320 drm_printf(m
, "\tRING_EIR: 0x%08x\n",
1321 ENGINE_READ(engine
, RING_EIR
));
1324 addr
= intel_engine_get_active_head(engine
);
1325 drm_printf(m
, "\tACTHD: 0x%08x_%08x\n",
1326 upper_32_bits(addr
), lower_32_bits(addr
));
1327 addr
= intel_engine_get_last_batch_head(engine
);
1328 drm_printf(m
, "\tBBADDR: 0x%08x_%08x\n",
1329 upper_32_bits(addr
), lower_32_bits(addr
));
1330 if (INTEL_GEN(dev_priv
) >= 8)
1331 addr
= ENGINE_READ64(engine
, RING_DMA_FADD
, RING_DMA_FADD_UDW
);
1332 else if (INTEL_GEN(dev_priv
) >= 4)
1333 addr
= ENGINE_READ(engine
, RING_DMA_FADD
);
1335 addr
= ENGINE_READ(engine
, DMA_FADD_I8XX
);
1336 drm_printf(m
, "\tDMA_FADDR: 0x%08x_%08x\n",
1337 upper_32_bits(addr
), lower_32_bits(addr
));
1338 if (INTEL_GEN(dev_priv
) >= 4) {
1339 drm_printf(m
, "\tIPEIR: 0x%08x\n",
1340 ENGINE_READ(engine
, RING_IPEIR
));
1341 drm_printf(m
, "\tIPEHR: 0x%08x\n",
1342 ENGINE_READ(engine
, RING_IPEHR
));
1344 drm_printf(m
, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine
, IPEIR
));
1345 drm_printf(m
, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine
, IPEHR
));
1348 if (HAS_EXECLISTS(dev_priv
)) {
1349 struct i915_request
* const *port
, *rq
;
1351 &engine
->status_page
.addr
[I915_HWS_CSB_BUF0_INDEX
];
1352 const u8 num_entries
= execlists
->csb_size
;
1356 drm_printf(m
, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1357 yesno(test_bit(TASKLET_STATE_SCHED
,
1358 &engine
->execlists
.tasklet
.state
)),
1359 enableddisabled(!atomic_read(&engine
->execlists
.tasklet
.count
)),
1360 repr_timer(&engine
->execlists
.preempt
),
1361 repr_timer(&engine
->execlists
.timer
));
1363 read
= execlists
->csb_head
;
1364 write
= READ_ONCE(*execlists
->csb_write
);
1366 drm_printf(m
, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1367 ENGINE_READ(engine
, RING_EXECLIST_STATUS_LO
),
1368 ENGINE_READ(engine
, RING_EXECLIST_STATUS_HI
),
1369 read
, write
, num_entries
);
1371 if (read
>= num_entries
)
1373 if (write
>= num_entries
)
1376 write
+= num_entries
;
1377 while (read
< write
) {
1378 idx
= ++read
% num_entries
;
1379 drm_printf(m
, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1380 idx
, hws
[idx
* 2], hws
[idx
* 2 + 1]);
1383 execlists_active_lock_bh(execlists
);
1385 for (port
= execlists
->active
; (rq
= *port
); port
++) {
1389 len
= scnprintf(hdr
, sizeof(hdr
),
1391 (int)(port
- execlists
->active
));
1392 if (!i915_request_signaled(rq
)) {
1393 struct intel_timeline
*tl
= get_timeline(rq
);
1395 len
+= scnprintf(hdr
+ len
, sizeof(hdr
) - len
,
1396 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
1397 i915_ggtt_offset(rq
->ring
->vma
),
1398 tl
? tl
->hwsp_offset
: 0,
1400 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq
->context
),
1404 intel_timeline_put(tl
);
1406 scnprintf(hdr
+ len
, sizeof(hdr
) - len
, "rq: ");
1407 print_request(m
, rq
, hdr
);
1409 for (port
= execlists
->pending
; (rq
= *port
); port
++) {
1410 struct intel_timeline
*tl
= get_timeline(rq
);
1413 snprintf(hdr
, sizeof(hdr
),
1414 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1415 (int)(port
- execlists
->pending
),
1416 i915_ggtt_offset(rq
->ring
->vma
),
1417 tl
? tl
->hwsp_offset
: 0,
1419 print_request(m
, rq
, hdr
);
1422 intel_timeline_put(tl
);
1425 execlists_active_unlock_bh(execlists
);
1426 } else if (INTEL_GEN(dev_priv
) > 6) {
1427 drm_printf(m
, "\tPP_DIR_BASE: 0x%08x\n",
1428 ENGINE_READ(engine
, RING_PP_DIR_BASE
));
1429 drm_printf(m
, "\tPP_DIR_BASE_READ: 0x%08x\n",
1430 ENGINE_READ(engine
, RING_PP_DIR_BASE_READ
));
1431 drm_printf(m
, "\tPP_DIR_DCLV: 0x%08x\n",
1432 ENGINE_READ(engine
, RING_PP_DIR_DCLV
));
1436 static void print_request_ring(struct drm_printer
*m
, struct i915_request
*rq
)
1442 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1443 rq
->head
, rq
->postfix
, rq
->tail
,
1444 rq
->batch
? upper_32_bits(rq
->batch
->node
.start
) : ~0u,
1445 rq
->batch
? lower_32_bits(rq
->batch
->node
.start
) : ~0u);
1447 size
= rq
->tail
- rq
->head
;
1448 if (rq
->tail
< rq
->head
)
1449 size
+= rq
->ring
->size
;
1451 ring
= kmalloc(size
, GFP_ATOMIC
);
1453 const void *vaddr
= rq
->ring
->vaddr
;
1454 unsigned int head
= rq
->head
;
1455 unsigned int len
= 0;
1457 if (rq
->tail
< head
) {
1458 len
= rq
->ring
->size
- head
;
1459 memcpy(ring
, vaddr
+ head
, len
);
1462 memcpy(ring
+ len
, vaddr
+ head
, size
- len
);
1464 hexdump(m
, ring
, size
);
1469 static unsigned long list_count(struct list_head
*list
)
1471 struct list_head
*pos
;
1472 unsigned long count
= 0;
1474 list_for_each(pos
, list
)
1480 void intel_engine_dump(struct intel_engine_cs
*engine
,
1481 struct drm_printer
*m
,
1482 const char *header
, ...)
1484 struct i915_gpu_error
* const error
= &engine
->i915
->gpu_error
;
1485 struct i915_request
*rq
;
1486 intel_wakeref_t wakeref
;
1487 unsigned long flags
;
1492 va_start(ap
, header
);
1493 drm_vprintf(m
, header
, &ap
);
1497 if (intel_gt_is_wedged(engine
->gt
))
1498 drm_printf(m
, "*** WEDGED ***\n");
1500 drm_printf(m
, "\tAwake? %d\n", atomic_read(&engine
->wakeref
.count
));
1501 drm_printf(m
, "\tBarriers?: %s\n",
1502 yesno(!llist_empty(&engine
->barrier_tasks
)));
1503 drm_printf(m
, "\tLatency: %luus\n",
1504 ewma__engine_latency_read(&engine
->latency
));
1507 rq
= READ_ONCE(engine
->heartbeat
.systole
);
1509 drm_printf(m
, "\tHeartbeat: %d ms ago\n",
1510 jiffies_to_msecs(jiffies
- rq
->emitted_jiffies
));
1512 drm_printf(m
, "\tReset count: %d (global %d)\n",
1513 i915_reset_engine_count(error
, engine
),
1514 i915_reset_count(error
));
1516 drm_printf(m
, "\tRequests:\n");
1518 spin_lock_irqsave(&engine
->active
.lock
, flags
);
1519 rq
= intel_engine_find_active_request(engine
);
1521 struct intel_timeline
*tl
= get_timeline(rq
);
1523 print_request(m
, rq
, "\t\tactive ");
1525 drm_printf(m
, "\t\tring->start: 0x%08x\n",
1526 i915_ggtt_offset(rq
->ring
->vma
));
1527 drm_printf(m
, "\t\tring->head: 0x%08x\n",
1529 drm_printf(m
, "\t\tring->tail: 0x%08x\n",
1531 drm_printf(m
, "\t\tring->emit: 0x%08x\n",
1533 drm_printf(m
, "\t\tring->space: 0x%08x\n",
1537 drm_printf(m
, "\t\tring->hwsp: 0x%08x\n",
1539 intel_timeline_put(tl
);
1542 print_request_ring(m
, rq
);
1544 if (rq
->context
->lrc_reg_state
) {
1545 drm_printf(m
, "Logical Ring Context:\n");
1546 hexdump(m
, rq
->context
->lrc_reg_state
, PAGE_SIZE
);
1549 drm_printf(m
, "\tOn hold?: %lu\n", list_count(&engine
->active
.hold
));
1550 spin_unlock_irqrestore(&engine
->active
.lock
, flags
);
1552 drm_printf(m
, "\tMMIO base: 0x%08x\n", engine
->mmio_base
);
1553 wakeref
= intel_runtime_pm_get_if_in_use(engine
->uncore
->rpm
);
1555 intel_engine_print_registers(engine
, m
);
1556 intel_runtime_pm_put(engine
->uncore
->rpm
, wakeref
);
1558 drm_printf(m
, "\tDevice is asleep; skipping register dump\n");
1561 intel_execlists_show_requests(engine
, m
, print_request
, 8);
1563 drm_printf(m
, "HWSP:\n");
1564 hexdump(m
, engine
->status_page
.addr
, PAGE_SIZE
);
1566 drm_printf(m
, "Idle? %s\n", yesno(intel_engine_is_idle(engine
)));
1568 intel_engine_print_breadcrumbs(engine
, m
);
1572 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1573 * @engine: engine to enable stats collection
1575 * Start collecting the engine busyness data for @engine.
1577 * Returns 0 on success or a negative error code.
1579 int intel_enable_engine_stats(struct intel_engine_cs
*engine
)
1581 struct intel_engine_execlists
*execlists
= &engine
->execlists
;
1582 unsigned long flags
;
1585 if (!intel_engine_supports_stats(engine
))
1588 execlists_active_lock_bh(execlists
);
1589 write_seqlock_irqsave(&engine
->stats
.lock
, flags
);
1591 if (unlikely(engine
->stats
.enabled
== ~0)) {
1596 if (engine
->stats
.enabled
++ == 0) {
1597 struct i915_request
* const *port
;
1598 struct i915_request
*rq
;
1600 engine
->stats
.enabled_at
= ktime_get();
1602 /* XXX submission method oblivious? */
1603 for (port
= execlists
->active
; (rq
= *port
); port
++)
1604 engine
->stats
.active
++;
1606 for (port
= execlists
->pending
; (rq
= *port
); port
++) {
1607 /* Exclude any contexts already counted in active */
1608 if (!intel_context_inflight_count(rq
->context
))
1609 engine
->stats
.active
++;
1612 if (engine
->stats
.active
)
1613 engine
->stats
.start
= engine
->stats
.enabled_at
;
1617 write_sequnlock_irqrestore(&engine
->stats
.lock
, flags
);
1618 execlists_active_unlock_bh(execlists
);
1623 static ktime_t
__intel_engine_get_busy_time(struct intel_engine_cs
*engine
)
1625 ktime_t total
= engine
->stats
.total
;
1628 * If the engine is executing something at the moment
1629 * add it to the total.
1631 if (engine
->stats
.active
)
1632 total
= ktime_add(total
,
1633 ktime_sub(ktime_get(), engine
->stats
.start
));
1639 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1640 * @engine: engine to report on
1642 * Returns accumulated time @engine was busy since engine stats were enabled.
1644 ktime_t
intel_engine_get_busy_time(struct intel_engine_cs
*engine
)
1650 seq
= read_seqbegin(&engine
->stats
.lock
);
1651 total
= __intel_engine_get_busy_time(engine
);
1652 } while (read_seqretry(&engine
->stats
.lock
, seq
));
1658 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1659 * @engine: engine to disable stats collection
1661 * Stops collecting the engine busyness data for @engine.
1663 void intel_disable_engine_stats(struct intel_engine_cs
*engine
)
1665 unsigned long flags
;
1667 if (!intel_engine_supports_stats(engine
))
1670 write_seqlock_irqsave(&engine
->stats
.lock
, flags
);
1671 WARN_ON_ONCE(engine
->stats
.enabled
== 0);
1672 if (--engine
->stats
.enabled
== 0) {
1673 engine
->stats
.total
= __intel_engine_get_busy_time(engine
);
1674 engine
->stats
.active
= 0;
1676 write_sequnlock_irqrestore(&engine
->stats
.lock
, flags
);
1679 static bool match_ring(struct i915_request
*rq
)
1681 u32 ring
= ENGINE_READ(rq
->engine
, RING_START
);
1683 return ring
== i915_ggtt_offset(rq
->ring
->vma
);
1686 struct i915_request
*
1687 intel_engine_find_active_request(struct intel_engine_cs
*engine
)
1689 struct i915_request
*request
, *active
= NULL
;
1692 * We are called by the error capture, reset and to dump engine
1693 * state at random points in time. In particular, note that neither is
1694 * crucially ordered with an interrupt. After a hang, the GPU is dead
1695 * and we assume that no more writes can happen (we waited long enough
1696 * for all writes that were in transaction to be flushed) - adding an
1697 * extra delay for a recent interrupt is pointless. Hence, we do
1698 * not need an engine->irq_seqno_barrier() before the seqno reads.
1699 * At all other times, we must assume the GPU is still running, but
1700 * we only care about the snapshot of this moment.
1702 lockdep_assert_held(&engine
->active
.lock
);
1705 request
= execlists_active(&engine
->execlists
);
1707 struct intel_timeline
*tl
= request
->context
->timeline
;
1709 list_for_each_entry_from_reverse(request
, &tl
->requests
, link
) {
1710 if (i915_request_completed(request
))
1720 list_for_each_entry(request
, &engine
->active
.requests
, sched
.link
) {
1721 if (i915_request_completed(request
))
1724 if (!i915_request_started(request
))
1727 /* More than one preemptible request may match! */
1728 if (!match_ring(request
))
1738 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1739 #include "mock_engine.c"
1740 #include "selftest_engine.c"
1741 #include "selftest_engine_cs.c"