2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
10 #include <linux/average.h>
11 #include <linux/hashtable.h>
12 #include <linux/irq_work.h>
13 #include <linux/kref.h>
14 #include <linux/list.h>
15 #include <linux/llist.h>
16 #include <linux/rbtree.h>
17 #include <linux/timer.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
23 #include "i915_priolist_types.h"
24 #include "i915_selftest.h"
25 #include "intel_engine_pool_types.h"
26 #include "intel_sseu.h"
27 #include "intel_timeline_types.h"
28 #include "intel_wakeref.h"
29 #include "intel_workarounds_types.h"
31 /* Legacy HW Engine ID */
42 /* Gen11+ HW Engine class + instance */
43 #define RENDER_CLASS 0
44 #define VIDEO_DECODE_CLASS 1
45 #define VIDEO_ENHANCEMENT_CLASS 2
46 #define COPY_ENGINE_CLASS 3
48 #define MAX_ENGINE_CLASS 4
49 #define MAX_ENGINE_INSTANCE 3
51 #define I915_MAX_SLICES 3
52 #define I915_MAX_SUBSLICES 8
54 #define I915_CMD_HASH_ORDER 9
57 struct drm_i915_gem_object
;
58 struct drm_i915_reg_table
;
59 struct i915_gem_context
;
61 struct i915_sched_attr
;
66 typedef u8 intel_engine_mask_t
;
67 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
69 struct intel_hw_status_page
{
74 struct intel_instdone
{
76 /* The following exist only in the RCS engine */
78 u32 slice_common_extra
[2];
79 u32 sampler
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
80 u32 row
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
84 * we use a single page to load ctx workarounds so all of these
85 * values are referred in terms of dwords
87 * struct i915_wa_ctx_bb:
88 * offset: specifies batch starting position, also helpful in case
89 * if we want to have multiple batches at different offsets based on
90 * some criteria. It is not a requirement at the moment but provides
91 * an option for future use.
92 * size: size of the batch in DWORDS
94 struct i915_ctx_workarounds
{
95 struct i915_wa_ctx_bb
{
98 } indirect_ctx
, per_ctx
;
102 #define I915_MAX_VCS 4
103 #define I915_MAX_VECS 2
106 * Engine IDs definitions.
107 * Keep instances of the same type engine together.
109 enum intel_engine_id
{
116 #define _VCS(n) (VCS0 + (n))
119 #define _VECS(n) (VECS0 + (n))
121 #define INVALID_ENGINE ((enum intel_engine_id)-1)
124 /* A simple estimator for the round-trip latency of an engine */
125 DECLARE_EWMA(_engine_latency
, 6, 4)
127 struct st_preempt_hang
{
128 struct completion completion
;
133 * struct intel_engine_execlists - execlist submission queue and port state
135 * The struct intel_engine_execlists represents the combined logical state of
136 * driver and the hardware state for execlist mode of submission.
138 struct intel_engine_execlists
{
140 * @tasklet: softirq tasklet for bottom handler
142 struct tasklet_struct tasklet
;
145 * @timer: kick the current context if its timeslice expires
147 struct timer_list timer
;
150 * @preempt: reset the current context if it fails to give way
152 struct timer_list preempt
;
155 * @default_priolist: priority list for I915_PRIORITY_NORMAL
157 struct i915_priolist default_priolist
;
160 * @error_interrupt: CS Master EIR
162 * The CS generates an interrupt when it detects an error. We capture
163 * the first error interrupt, record the EIR and schedule the tasklet.
164 * In the tasklet, we process the pending CS events to ensure we have
165 * the guilty request, and then reset the engine.
170 * @no_priolist: priority lists disabled
175 * @submit_reg: gen-specific execlist submission register
176 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
177 * the ExecList Submission Queue Contents register array for Gen11+
179 u32 __iomem
*submit_reg
;
182 * @ctrl_reg: the enhanced execlists control register, used to load the
183 * submit queue on the HW and to request preemptions to idle
185 u32 __iomem
*ctrl_reg
;
187 #define EXECLIST_MAX_PORTS 2
189 * @active: the currently known context executing on HW
191 struct i915_request
* const *active
;
193 * @inflight: the set of contexts submitted and acknowleged by HW
195 * The set of inflight contexts is managed by reading CS events
196 * from the HW. On a context-switch event (not preemption), we
197 * know the HW has transitioned from port0 to port1, and we
198 * advance our inflight/active tracking accordingly.
200 struct i915_request
*inflight
[EXECLIST_MAX_PORTS
+ 1 /* sentinel */];
202 * @pending: the next set of contexts submitted to ELSP
204 * We store the array of contexts that we submit to HW (via ELSP) and
205 * promote them to the inflight array once HW has signaled the
206 * preemption or idle-to-active event.
208 struct i915_request
*pending
[EXECLIST_MAX_PORTS
+ 1];
211 * @port_mask: number of execlist ports - 1
213 unsigned int port_mask
;
216 * @switch_priority_hint: Second context priority.
218 * We submit multiple contexts to the HW simultaneously and would
219 * like to occasionally switch between them to emulate timeslicing.
220 * To know when timeslicing is suitable, we track the priority of
221 * the context submitted second.
223 int switch_priority_hint
;
226 * @queue_priority_hint: Highest pending priority.
228 * When we add requests into the queue, or adjust the priority of
229 * executing requests, we compute the maximum priority of those
230 * pending requests. We can then use this value to determine if
231 * we need to preempt the executing requests to service the queue.
232 * However, since the we may have recorded the priority of an inflight
233 * request we wanted to preempt but since completed, at the time of
234 * dequeuing the priority hint may no longer may match the highest
235 * available request priority.
237 int queue_priority_hint
;
240 * @queue: queue of requests, in priority lists
242 struct rb_root_cached queue
;
243 struct rb_root_cached
virtual;
246 * @csb_write: control register for Context Switch buffer
248 * Note this register may be either mmio or HWSP shadow.
253 * @csb_status: status array for Context Switch buffer
255 * Note these register may be either mmio or HWSP shadow.
260 * @csb_size: context status buffer FIFO size
265 * @csb_head: context status buffer head
269 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang
;)
272 #define INTEL_ENGINE_CS_MAX_NAME 8
274 struct intel_engine_cs
{
275 struct drm_i915_private
*i915
;
277 struct intel_uncore
*uncore
;
278 char name
[INTEL_ENGINE_CS_MAX_NAME
];
280 enum intel_engine_id id
;
281 enum intel_engine_id legacy_idx
;
286 intel_engine_mask_t mask
;
294 u32 uabi_capabilities
;
298 unsigned int context_tag
;
299 #define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS)
301 struct rb_node uabi_node
;
303 struct intel_sseu sseu
;
307 struct list_head requests
;
308 struct list_head hold
; /* ready requests, but on hold */
311 struct llist_head barrier_tasks
;
313 struct intel_context
*kernel_context
; /* pinned */
315 intel_engine_mask_t saturated
; /* submitting semaphores too late? */
318 struct delayed_work work
;
319 struct i915_request
*systole
;
322 unsigned long serial
;
324 unsigned long wakeref_serial
;
325 struct intel_wakeref wakeref
;
326 struct drm_i915_gem_object
*default_state
;
327 void *pinned_default_state
;
330 struct intel_ring
*ring
;
331 struct intel_timeline
*timeline
;
335 * We track the average duration of the idle pulse on parking the
336 * engine to keep an estimate of the how the fast the engine is
337 * under ideal conditions.
339 struct ewma__engine_latency latency
;
341 /* Rather than have every client wait upon all user interrupts,
342 * with the herd waking after every interrupt and each doing the
343 * heavyweight seqno dance, we delegate the task (of being the
344 * bottom-half of the user interrupt) to the first client. After
345 * every interrupt, we wake up one client, who does the heavyweight
346 * coherent seqno read and either goes back to sleep (if incomplete),
347 * or wakes up all the completed clients in parallel, before then
348 * transferring the bottom-half status to the next client in the queue.
350 * Compared to walking the entire list of waiters in a single dedicated
351 * bottom-half, we reduce the latency of the first waiter by avoiding
352 * a context switch, but incur additional coherent seqno reads when
353 * following the chain of request breadcrumbs. Since it is most likely
354 * that we have a single client waiting on each seqno, then reducing
355 * the overhead of waking that client is much preferred.
357 struct intel_breadcrumbs
{
359 struct list_head signalers
;
361 struct irq_work irq_work
; /* for use from inside irq_lock */
363 unsigned int irq_enabled
;
368 struct intel_engine_pmu
{
370 * @enable: Bitmask of enable sample events on this engine.
372 * Bits correspond to sample event types, for instance
373 * I915_SAMPLE_QUEUED is bit 0 etc.
377 * @enable_count: Reference count for the enabled samplers.
379 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
381 unsigned int enable_count
[I915_ENGINE_SAMPLE_COUNT
];
383 * @sample: Counter values for sampling events.
385 * Our internal timer stores the current counters in this field.
387 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
389 struct i915_pmu_sample sample
[I915_ENGINE_SAMPLE_COUNT
];
393 * A pool of objects to use as shadow copies of client batch buffers
394 * when the command parser is enabled. Prevents the client from
395 * modifying the batch contents after software parsing.
397 struct intel_engine_pool pool
;
399 struct intel_hw_status_page status_page
;
400 struct i915_ctx_workarounds wa_ctx
;
401 struct i915_wa_list ctx_wa_list
;
402 struct i915_wa_list wa_list
;
403 struct i915_wa_list whitelist
;
405 u32 irq_keep_mask
; /* always keep these interrupts */
406 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
407 void (*irq_enable
)(struct intel_engine_cs
*engine
);
408 void (*irq_disable
)(struct intel_engine_cs
*engine
);
410 int (*resume
)(struct intel_engine_cs
*engine
);
413 void (*prepare
)(struct intel_engine_cs
*engine
);
415 void (*rewind
)(struct intel_engine_cs
*engine
, bool stalled
);
416 void (*cancel
)(struct intel_engine_cs
*engine
);
418 void (*finish
)(struct intel_engine_cs
*engine
);
421 void (*park
)(struct intel_engine_cs
*engine
);
422 void (*unpark
)(struct intel_engine_cs
*engine
);
424 void (*set_default_submission
)(struct intel_engine_cs
*engine
);
426 const struct intel_context_ops
*cops
;
428 int (*request_alloc
)(struct i915_request
*rq
);
430 int (*emit_flush
)(struct i915_request
*request
, u32 mode
);
431 #define EMIT_INVALIDATE BIT(0)
432 #define EMIT_FLUSH BIT(1)
433 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
434 int (*emit_bb_start
)(struct i915_request
*rq
,
435 u64 offset
, u32 length
,
436 unsigned int dispatch_flags
);
437 #define I915_DISPATCH_SECURE BIT(0)
438 #define I915_DISPATCH_PINNED BIT(1)
439 int (*emit_init_breadcrumb
)(struct i915_request
*rq
);
440 u32
*(*emit_fini_breadcrumb
)(struct i915_request
*rq
,
442 unsigned int emit_fini_breadcrumb_dw
;
444 /* Pass the request to the hardware queue (e.g. directly into
445 * the legacy ringbuffer or to the end of an execlist).
447 * This is called from an atomic context with irqs disabled; must
450 void (*submit_request
)(struct i915_request
*rq
);
453 * Called on signaling of a SUBMIT_FENCE, passing along the signaling
454 * request down to the bonded pairs.
456 void (*bond_execute
)(struct i915_request
*rq
,
457 struct dma_fence
*signal
);
460 * Call when the priority on a request has changed and it and its
461 * dependencies may need rescheduling. Note the request itself may
462 * not be ready to run!
464 void (*schedule
)(struct i915_request
*request
,
465 const struct i915_sched_attr
*attr
);
467 void (*release
)(struct intel_engine_cs
*engine
);
469 struct intel_engine_execlists execlists
;
472 * Keep track of completed timelines on this engine for early
473 * retirement with the goal of quickly enabling powersaving as
474 * soon as the engine is idle.
476 struct intel_timeline
*retire
;
477 struct work_struct retire_work
;
479 /* status_notifier: list of callbacks for context-switch changes */
480 struct atomic_notifier_head context_status_notifier
;
482 #define I915_ENGINE_USING_CMD_PARSER BIT(0)
483 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
484 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
485 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
486 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
487 #define I915_ENGINE_IS_VIRTUAL BIT(5)
488 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
489 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
493 * Table of commands the command parser needs to know about
496 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
499 * Table of registers allowed in commands that read/write registers.
501 const struct drm_i915_reg_table
*reg_tables
;
505 * Returns the bitmask for the length field of the specified command.
506 * Return 0 for an unrecognized/invalid command.
508 * If the command parser finds an entry for a command in the engine's
509 * cmd_tables, it gets the command's length based on the table entry.
510 * If not, it calls this function to determine the per-engine length
511 * field encoding for the command (i.e. different opcode ranges use
512 * certain bits to encode the command length in the header).
514 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
518 * @lock: Lock protecting the below fields.
522 * @enabled: Reference count indicating number of listeners.
524 unsigned int enabled
;
526 * @active: Number of contexts currently scheduled in.
530 * @enabled_at: Timestamp when busy stats were enabled.
534 * @start: Timestamp of the last idle to active transition.
536 * Idle is defined as active == 0, active is active > 0.
540 * @total: Total time this engine was busy.
542 * Accumulated time not counting the most recent block in cases
543 * where engine is currently busy (active > 0).
549 unsigned long heartbeat_interval_ms
;
550 unsigned long max_busywait_duration_ns
;
551 unsigned long preempt_timeout_ms
;
552 unsigned long stop_timeout_ms
;
553 unsigned long timeslice_duration_ms
;
558 intel_engine_using_cmd_parser(const struct intel_engine_cs
*engine
)
560 return engine
->flags
& I915_ENGINE_USING_CMD_PARSER
;
564 intel_engine_requires_cmd_parser(const struct intel_engine_cs
*engine
)
566 return engine
->flags
& I915_ENGINE_REQUIRES_CMD_PARSER
;
570 intel_engine_supports_stats(const struct intel_engine_cs
*engine
)
572 return engine
->flags
& I915_ENGINE_SUPPORTS_STATS
;
576 intel_engine_has_preemption(const struct intel_engine_cs
*engine
)
578 return engine
->flags
& I915_ENGINE_HAS_PREEMPTION
;
582 intel_engine_has_semaphores(const struct intel_engine_cs
*engine
)
584 return engine
->flags
& I915_ENGINE_HAS_SEMAPHORES
;
588 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs
*engine
)
590 return engine
->flags
& I915_ENGINE_NEEDS_BREADCRUMB_TASKLET
;
594 intel_engine_is_virtual(const struct intel_engine_cs
*engine
)
596 return engine
->flags
& I915_ENGINE_IS_VIRTUAL
;
600 intel_engine_has_relative_mmio(const struct intel_engine_cs
* const engine
)
602 return engine
->flags
& I915_ENGINE_HAS_RELATIVE_MMIO
;
605 #define instdone_has_slice(dev_priv___, sseu___, slice___) \
606 ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___))
608 #define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \
609 (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \
610 intel_sseu_has_subslice(sseu__, 0, subslice__))
612 #define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \
613 for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \
614 (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \
615 (slice_) += ((subslice_) == 0)) \
616 for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
617 (instdone_has_subslice(dev_priv_, sseu_, slice_, \
619 #endif /* __INTEL_ENGINE_TYPES_H__ */