2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
10 #include <linux/average.h>
11 #include <linux/hashtable.h>
12 #include <linux/irq_work.h>
13 #include <linux/kref.h>
14 #include <linux/list.h>
15 #include <linux/llist.h>
16 #include <linux/rbtree.h>
17 #include <linux/timer.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
23 #include "i915_priolist_types.h"
24 #include "i915_selftest.h"
25 #include "intel_engine_pool_types.h"
26 #include "intel_sseu.h"
27 #include "intel_timeline_types.h"
28 #include "intel_wakeref.h"
29 #include "intel_workarounds_types.h"
31 /* Legacy HW Engine ID */
42 /* Gen11+ HW Engine class + instance */
43 #define RENDER_CLASS 0
44 #define VIDEO_DECODE_CLASS 1
45 #define VIDEO_ENHANCEMENT_CLASS 2
46 #define COPY_ENGINE_CLASS 3
48 #define MAX_ENGINE_CLASS 4
49 #define MAX_ENGINE_INSTANCE 3
51 #define I915_MAX_SLICES 3
52 #define I915_MAX_SUBSLICES 8
54 #define I915_CMD_HASH_ORDER 9
57 struct drm_i915_gem_object
;
58 struct drm_i915_reg_table
;
59 struct i915_gem_context
;
61 struct i915_sched_attr
;
66 typedef u8 intel_engine_mask_t
;
67 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
69 struct intel_hw_status_page
{
74 struct intel_instdone
{
76 /* The following exist only in the RCS engine */
78 u32 slice_common_extra
[2];
79 u32 sampler
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
80 u32 row
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
84 * we use a single page to load ctx workarounds so all of these
85 * values are referred in terms of dwords
87 * struct i915_wa_ctx_bb:
88 * offset: specifies batch starting position, also helpful in case
89 * if we want to have multiple batches at different offsets based on
90 * some criteria. It is not a requirement at the moment but provides
91 * an option for future use.
92 * size: size of the batch in DWORDS
94 struct i915_ctx_workarounds
{
95 struct i915_wa_ctx_bb
{
98 } indirect_ctx
, per_ctx
;
102 #define I915_MAX_VCS 4
103 #define I915_MAX_VECS 2
106 * Engine IDs definitions.
107 * Keep instances of the same type engine together.
109 enum intel_engine_id
{
116 #define _VCS(n) (VCS0 + (n))
119 #define _VECS(n) (VECS0 + (n))
121 #define INVALID_ENGINE ((enum intel_engine_id)-1)
124 /* A simple estimator for the round-trip latency of an engine */
125 DECLARE_EWMA(_engine_latency
, 6, 4)
127 struct st_preempt_hang
{
128 struct completion completion
;
133 * struct intel_engine_execlists - execlist submission queue and port state
135 * The struct intel_engine_execlists represents the combined logical state of
136 * driver and the hardware state for execlist mode of submission.
138 struct intel_engine_execlists
{
140 * @tasklet: softirq tasklet for bottom handler
142 struct tasklet_struct tasklet
;
145 * @timer: kick the current context if its timeslice expires
147 struct timer_list timer
;
150 * @preempt: reset the current context if it fails to give way
152 struct timer_list preempt
;
155 * @default_priolist: priority list for I915_PRIORITY_NORMAL
157 struct i915_priolist default_priolist
;
160 * @ccid: identifier for contexts submitted to this engine
165 * @yield: CCID at the time of the last semaphore-wait interrupt.
167 * Instead of leaving a semaphore busy-spinning on an engine, we would
168 * like to switch to another ready context, i.e. yielding the semaphore
174 * @error_interrupt: CS Master EIR
176 * The CS generates an interrupt when it detects an error. We capture
177 * the first error interrupt, record the EIR and schedule the tasklet.
178 * In the tasklet, we process the pending CS events to ensure we have
179 * the guilty request, and then reset the engine.
184 * @no_priolist: priority lists disabled
189 * @submit_reg: gen-specific execlist submission register
190 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
191 * the ExecList Submission Queue Contents register array for Gen11+
193 u32 __iomem
*submit_reg
;
196 * @ctrl_reg: the enhanced execlists control register, used to load the
197 * submit queue on the HW and to request preemptions to idle
199 u32 __iomem
*ctrl_reg
;
201 #define EXECLIST_MAX_PORTS 2
203 * @active: the currently known context executing on HW
205 struct i915_request
* const *active
;
207 * @inflight: the set of contexts submitted and acknowleged by HW
209 * The set of inflight contexts is managed by reading CS events
210 * from the HW. On a context-switch event (not preemption), we
211 * know the HW has transitioned from port0 to port1, and we
212 * advance our inflight/active tracking accordingly.
214 struct i915_request
*inflight
[EXECLIST_MAX_PORTS
+ 1 /* sentinel */];
216 * @pending: the next set of contexts submitted to ELSP
218 * We store the array of contexts that we submit to HW (via ELSP) and
219 * promote them to the inflight array once HW has signaled the
220 * preemption or idle-to-active event.
222 struct i915_request
*pending
[EXECLIST_MAX_PORTS
+ 1];
225 * @port_mask: number of execlist ports - 1
227 unsigned int port_mask
;
230 * @switch_priority_hint: Second context priority.
232 * We submit multiple contexts to the HW simultaneously and would
233 * like to occasionally switch between them to emulate timeslicing.
234 * To know when timeslicing is suitable, we track the priority of
235 * the context submitted second.
237 int switch_priority_hint
;
240 * @queue_priority_hint: Highest pending priority.
242 * When we add requests into the queue, or adjust the priority of
243 * executing requests, we compute the maximum priority of those
244 * pending requests. We can then use this value to determine if
245 * we need to preempt the executing requests to service the queue.
246 * However, since the we may have recorded the priority of an inflight
247 * request we wanted to preempt but since completed, at the time of
248 * dequeuing the priority hint may no longer may match the highest
249 * available request priority.
251 int queue_priority_hint
;
254 * @queue: queue of requests, in priority lists
256 struct rb_root_cached queue
;
257 struct rb_root_cached
virtual;
260 * @csb_write: control register for Context Switch buffer
262 * Note this register may be either mmio or HWSP shadow.
267 * @csb_status: status array for Context Switch buffer
269 * Note these register may be either mmio or HWSP shadow.
274 * @csb_size: context status buffer FIFO size
279 * @csb_head: context status buffer head
283 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang
;)
286 #define INTEL_ENGINE_CS_MAX_NAME 8
288 struct intel_engine_cs
{
289 struct drm_i915_private
*i915
;
291 struct intel_uncore
*uncore
;
292 char name
[INTEL_ENGINE_CS_MAX_NAME
];
294 enum intel_engine_id id
;
295 enum intel_engine_id legacy_idx
;
300 intel_engine_mask_t mask
;
308 u32 uabi_capabilities
;
312 unsigned long context_tag
;
314 struct rb_node uabi_node
;
316 struct intel_sseu sseu
;
320 struct list_head requests
;
321 struct list_head hold
; /* ready requests, but on hold */
324 struct llist_head barrier_tasks
;
326 struct intel_context
*kernel_context
; /* pinned */
328 intel_engine_mask_t saturated
; /* submitting semaphores too late? */
331 struct delayed_work work
;
332 struct i915_request
*systole
;
335 unsigned long serial
;
337 unsigned long wakeref_serial
;
338 struct intel_wakeref wakeref
;
339 struct drm_i915_gem_object
*default_state
;
340 void *pinned_default_state
;
343 struct intel_ring
*ring
;
344 struct intel_timeline
*timeline
;
348 * We track the average duration of the idle pulse on parking the
349 * engine to keep an estimate of the how the fast the engine is
350 * under ideal conditions.
352 struct ewma__engine_latency latency
;
354 /* Rather than have every client wait upon all user interrupts,
355 * with the herd waking after every interrupt and each doing the
356 * heavyweight seqno dance, we delegate the task (of being the
357 * bottom-half of the user interrupt) to the first client. After
358 * every interrupt, we wake up one client, who does the heavyweight
359 * coherent seqno read and either goes back to sleep (if incomplete),
360 * or wakes up all the completed clients in parallel, before then
361 * transferring the bottom-half status to the next client in the queue.
363 * Compared to walking the entire list of waiters in a single dedicated
364 * bottom-half, we reduce the latency of the first waiter by avoiding
365 * a context switch, but incur additional coherent seqno reads when
366 * following the chain of request breadcrumbs. Since it is most likely
367 * that we have a single client waiting on each seqno, then reducing
368 * the overhead of waking that client is much preferred.
370 struct intel_breadcrumbs
{
372 struct list_head signalers
;
374 struct irq_work irq_work
; /* for use from inside irq_lock */
376 unsigned int irq_enabled
;
381 struct intel_engine_pmu
{
383 * @enable: Bitmask of enable sample events on this engine.
385 * Bits correspond to sample event types, for instance
386 * I915_SAMPLE_QUEUED is bit 0 etc.
390 * @enable_count: Reference count for the enabled samplers.
392 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
394 unsigned int enable_count
[I915_ENGINE_SAMPLE_COUNT
];
396 * @sample: Counter values for sampling events.
398 * Our internal timer stores the current counters in this field.
400 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
402 struct i915_pmu_sample sample
[I915_ENGINE_SAMPLE_COUNT
];
406 * A pool of objects to use as shadow copies of client batch buffers
407 * when the command parser is enabled. Prevents the client from
408 * modifying the batch contents after software parsing.
410 struct intel_engine_pool pool
;
412 struct intel_hw_status_page status_page
;
413 struct i915_ctx_workarounds wa_ctx
;
414 struct i915_wa_list ctx_wa_list
;
415 struct i915_wa_list wa_list
;
416 struct i915_wa_list whitelist
;
418 u32 irq_keep_mask
; /* always keep these interrupts */
419 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
420 void (*irq_enable
)(struct intel_engine_cs
*engine
);
421 void (*irq_disable
)(struct intel_engine_cs
*engine
);
423 int (*resume
)(struct intel_engine_cs
*engine
);
426 void (*prepare
)(struct intel_engine_cs
*engine
);
428 void (*rewind
)(struct intel_engine_cs
*engine
, bool stalled
);
429 void (*cancel
)(struct intel_engine_cs
*engine
);
431 void (*finish
)(struct intel_engine_cs
*engine
);
434 void (*park
)(struct intel_engine_cs
*engine
);
435 void (*unpark
)(struct intel_engine_cs
*engine
);
437 void (*set_default_submission
)(struct intel_engine_cs
*engine
);
439 const struct intel_context_ops
*cops
;
441 int (*request_alloc
)(struct i915_request
*rq
);
443 int (*emit_flush
)(struct i915_request
*request
, u32 mode
);
444 #define EMIT_INVALIDATE BIT(0)
445 #define EMIT_FLUSH BIT(1)
446 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
447 int (*emit_bb_start
)(struct i915_request
*rq
,
448 u64 offset
, u32 length
,
449 unsigned int dispatch_flags
);
450 #define I915_DISPATCH_SECURE BIT(0)
451 #define I915_DISPATCH_PINNED BIT(1)
452 int (*emit_init_breadcrumb
)(struct i915_request
*rq
);
453 u32
*(*emit_fini_breadcrumb
)(struct i915_request
*rq
,
455 unsigned int emit_fini_breadcrumb_dw
;
457 /* Pass the request to the hardware queue (e.g. directly into
458 * the legacy ringbuffer or to the end of an execlist).
460 * This is called from an atomic context with irqs disabled; must
463 void (*submit_request
)(struct i915_request
*rq
);
466 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
467 * request down to the bonded pairs.
469 void (*bond_execute
)(struct i915_request
*rq
,
470 struct dma_fence
*signal
);
473 * Call when the priority on a request has changed and it and its
474 * dependencies may need rescheduling. Note the request itself may
475 * not be ready to run!
477 void (*schedule
)(struct i915_request
*request
,
478 const struct i915_sched_attr
*attr
);
480 void (*release
)(struct intel_engine_cs
*engine
);
482 struct intel_engine_execlists execlists
;
485 * Keep track of completed timelines on this engine for early
486 * retirement with the goal of quickly enabling powersaving as
487 * soon as the engine is idle.
489 struct intel_timeline
*retire
;
490 struct work_struct retire_work
;
492 /* status_notifier: list of callbacks for context-switch changes */
493 struct atomic_notifier_head context_status_notifier
;
495 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
496 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
497 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
498 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
499 #define I915_ENGINE_HAS_TIMESLICES BIT(4)
500 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
501 #define I915_ENGINE_IS_VIRTUAL BIT(6)
502 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
503 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
507 * Table of commands the command parser needs to know about
510 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
513 * Table of registers allowed in commands that read/write registers.
515 const struct drm_i915_reg_table
*reg_tables
;
519 * Returns the bitmask for the length field of the specified command.
520 * Return 0 for an unrecognized/invalid command.
522 * If the command parser finds an entry for a command in the engine's
523 * cmd_tables, it gets the command's length based on the table entry.
524 * If not, it calls this function to determine the per-engine length
525 * field encoding for the command (i.e. different opcode ranges use
526 * certain bits to encode the command length in the header).
528 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
532 * @lock: Lock protecting the below fields.
536 * @enabled: Reference count indicating number of listeners.
538 unsigned int enabled
;
540 * @active: Number of contexts currently scheduled in.
544 * @enabled_at: Timestamp when busy stats were enabled.
548 * @start: Timestamp of the last idle to active transition.
550 * Idle is defined as active == 0, active is active > 0.
554 * @total: Total time this engine was busy.
556 * Accumulated time not counting the most recent block in cases
557 * where engine is currently busy (active > 0).
563 unsigned long heartbeat_interval_ms
;
564 unsigned long max_busywait_duration_ns
;
565 unsigned long preempt_timeout_ms
;
566 unsigned long stop_timeout_ms
;
567 unsigned long timeslice_duration_ms
;
572 intel_engine_using_cmd_parser(const struct intel_engine_cs
*engine
)
574 return engine
->flags
& I915_ENGINE_USING_CMD_PARSER
;
578 intel_engine_requires_cmd_parser(const struct intel_engine_cs
*engine
)
580 return engine
->flags
& I915_ENGINE_REQUIRES_CMD_PARSER
;
584 intel_engine_supports_stats(const struct intel_engine_cs
*engine
)
586 return engine
->flags
& I915_ENGINE_SUPPORTS_STATS
;
590 intel_engine_has_preemption(const struct intel_engine_cs
*engine
)
592 return engine
->flags
& I915_ENGINE_HAS_PREEMPTION
;
596 intel_engine_has_semaphores(const struct intel_engine_cs
*engine
)
598 return engine
->flags
& I915_ENGINE_HAS_SEMAPHORES
;
602 intel_engine_has_timeslices(const struct intel_engine_cs
*engine
)
604 if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION
))
607 return engine
->flags
& I915_ENGINE_HAS_TIMESLICES
;
611 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs
*engine
)
613 return engine
->flags
& I915_ENGINE_NEEDS_BREADCRUMB_TASKLET
;
617 intel_engine_is_virtual(const struct intel_engine_cs
*engine
)
619 return engine
->flags
& I915_ENGINE_IS_VIRTUAL
;
623 intel_engine_has_relative_mmio(const struct intel_engine_cs
* const engine
)
625 return engine
->flags
& I915_ENGINE_HAS_RELATIVE_MMIO
;
628 #define instdone_has_slice(dev_priv___, sseu___, slice___) \
629 ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
631 #define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
632 (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
633 intel_sseu_has_subslice(sseu__, 0, subslice__))
635 #define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
636 for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
637 (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
638 (slice_) += ((subslice_) == 0)) \
639 for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
640 (instdone_has_subslice(dev_priv_, sseu_, slice_, \
642 #endif /* __INTEL_ENGINE_TYPES_H__ */