2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
10 #include <linux/hashtable.h>
11 #include <linux/irq_work.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/types.h>
17 #include "i915_priolist_types.h"
18 #include "i915_selftest.h"
19 #include "i915_timeline_types.h"
20 #include "intel_workarounds_types.h"
22 #include "i915_gem_batch_pool.h"
25 #define I915_MAX_SLICES 3
26 #define I915_MAX_SUBSLICES 8
28 #define I915_CMD_HASH_ORDER 9
31 struct drm_i915_reg_table
;
32 struct i915_gem_context
;
34 struct i915_sched_attr
;
37 typedef u8 intel_engine_mask_t
;
38 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
40 struct intel_hw_status_page
{
45 struct intel_instdone
{
47 /* The following exist only in the RCS engine */
49 u32 sampler
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
50 u32 row
[I915_MAX_SLICES
][I915_MAX_SUBSLICES
];
53 struct intel_engine_hangcheck
{
57 unsigned long action_timestamp
;
58 struct intel_instdone instdone
;
66 struct i915_timeline
*timeline
;
67 struct list_head request_list
;
68 struct list_head active_link
;
80 * we use a single page to load ctx workarounds so all of these
81 * values are referred in terms of dwords
83 * struct i915_wa_ctx_bb:
84 * offset: specifies batch starting position, also helpful in case
85 * if we want to have multiple batches at different offsets based on
86 * some criteria. It is not a requirement at the moment but provides
87 * an option for future use.
88 * size: size of the batch in DWORDS
90 struct i915_ctx_workarounds
{
91 struct i915_wa_ctx_bb
{
94 } indirect_ctx
, per_ctx
;
98 #define I915_MAX_VCS 4
99 #define I915_MAX_VECS 2
102 * Engine IDs definitions.
103 * Keep instances of the same type engine together.
105 enum intel_engine_id
{
112 #define _VCS(n) (VCS0 + (n))
115 #define _VECS(n) (VECS0 + (n))
119 struct st_preempt_hang
{
120 struct completion completion
;
126 * struct intel_engine_execlists - execlist submission queue and port state
128 * The struct intel_engine_execlists represents the combined logical state of
129 * driver and the hardware state for execlist mode of submission.
131 struct intel_engine_execlists
{
133 * @tasklet: softirq tasklet for bottom handler
135 struct tasklet_struct tasklet
;
138 * @default_priolist: priority list for I915_PRIORITY_NORMAL
140 struct i915_priolist default_priolist
;
143 * @no_priolist: priority lists disabled
148 * @submit_reg: gen-specific execlist submission register
149 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
150 * the ExecList Submission Queue Contents register array for Gen11+
152 u32 __iomem
*submit_reg
;
155 * @ctrl_reg: the enhanced execlists control register, used to load the
156 * submit queue on the HW and to request preemptions to idle
158 u32 __iomem
*ctrl_reg
;
161 * @port: execlist port states
163 * For each hardware ELSP (ExecList Submission Port) we keep
164 * track of the last request and the number of times we submitted
165 * that port to hw. We then count the number of times the hw reports
166 * a context completion or preemption. As only one context can
167 * be active on hw, we limit resubmission of context to port[0]. This
168 * is called Lite Restore, of the context.
170 struct execlist_port
{
172 * @request_count: combined request and submission count
174 struct i915_request
*request_count
;
175 #define EXECLIST_COUNT_BITS 2
176 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
177 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
178 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
179 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
180 #define port_set(p, packed) ((p)->request_count = (packed))
181 #define port_isset(p) ((p)->request_count)
182 #define port_index(p, execlists) ((p) - (execlists)->port)
185 * @context_id: context ID for port
187 GEM_DEBUG_DECL(u32 context_id
);
189 #define EXECLIST_MAX_PORTS 2
190 } port
[EXECLIST_MAX_PORTS
];
193 * @active: is the HW active? We consider the HW as active after
194 * submitting any context for execution and until we have seen the
195 * last context completion event. After that, we do not expect any
196 * more events until we submit, and so can park the HW.
198 * As we have a small number of different sources from which we feed
199 * the HW, we track the state of each inside a single bitfield.
202 #define EXECLISTS_ACTIVE_USER 0
203 #define EXECLISTS_ACTIVE_PREEMPT 1
204 #define EXECLISTS_ACTIVE_HWACK 2
207 * @port_mask: number of execlist ports - 1
209 unsigned int port_mask
;
212 * @queue_priority_hint: Highest pending priority.
214 * When we add requests into the queue, or adjust the priority of
215 * executing requests, we compute the maximum priority of those
216 * pending requests. We can then use this value to determine if
217 * we need to preempt the executing requests to service the queue.
218 * However, since the we may have recorded the priority of an inflight
219 * request we wanted to preempt but since completed, at the time of
220 * dequeuing the priority hint may no longer may match the highest
221 * available request priority.
223 int queue_priority_hint
;
226 * @queue: queue of requests, in priority lists
228 struct rb_root_cached queue
;
231 * @csb_write: control register for Context Switch buffer
233 * Note this register may be either mmio or HWSP shadow.
238 * @csb_status: status array for Context Switch buffer
240 * Note these register may be either mmio or HWSP shadow.
245 * @preempt_complete_status: expected CSB upon completing preemption
247 u32 preempt_complete_status
;
250 * @csb_size: context status buffer FIFO size
255 * @csb_head: context status buffer head
259 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang
;)
262 #define INTEL_ENGINE_CS_MAX_NAME 8
264 struct intel_engine_cs
{
265 struct drm_i915_private
*i915
;
266 struct intel_uncore
*uncore
;
267 char name
[INTEL_ENGINE_CS_MAX_NAME
];
269 enum intel_engine_id id
;
272 intel_engine_mask_t mask
;
281 struct intel_ring
*buffer
;
283 struct i915_timeline timeline
;
285 struct intel_context
*kernel_context
; /* pinned */
286 struct intel_context
*preempt_context
; /* pinned; optional */
288 struct drm_i915_gem_object
*default_state
;
289 void *pinned_default_state
;
291 /* Rather than have every client wait upon all user interrupts,
292 * with the herd waking after every interrupt and each doing the
293 * heavyweight seqno dance, we delegate the task (of being the
294 * bottom-half of the user interrupt) to the first client. After
295 * every interrupt, we wake up one client, who does the heavyweight
296 * coherent seqno read and either goes back to sleep (if incomplete),
297 * or wakes up all the completed clients in parallel, before then
298 * transferring the bottom-half status to the next client in the queue.
300 * Compared to walking the entire list of waiters in a single dedicated
301 * bottom-half, we reduce the latency of the first waiter by avoiding
302 * a context switch, but incur additional coherent seqno reads when
303 * following the chain of request breadcrumbs. Since it is most likely
304 * that we have a single client waiting on each seqno, then reducing
305 * the overhead of waking that client is much preferred.
307 struct intel_breadcrumbs
{
309 struct list_head signalers
;
311 struct irq_work irq_work
; /* for use from inside irq_lock */
313 unsigned int irq_enabled
;
318 struct intel_engine_pmu
{
320 * @enable: Bitmask of enable sample events on this engine.
322 * Bits correspond to sample event types, for instance
323 * I915_SAMPLE_QUEUED is bit 0 etc.
327 * @enable_count: Reference count for the enabled samplers.
329 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
331 unsigned int enable_count
[I915_ENGINE_SAMPLE_COUNT
];
333 * @sample: Counter values for sampling events.
335 * Our internal timer stores the current counters in this field.
337 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
339 struct i915_pmu_sample sample
[I915_ENGINE_SAMPLE_COUNT
];
343 * A pool of objects to use as shadow copies of client batch buffers
344 * when the command parser is enabled. Prevents the client from
345 * modifying the batch contents after software parsing.
347 struct i915_gem_batch_pool batch_pool
;
349 struct intel_hw_status_page status_page
;
350 struct i915_ctx_workarounds wa_ctx
;
351 struct i915_wa_list ctx_wa_list
;
352 struct i915_wa_list wa_list
;
353 struct i915_wa_list whitelist
;
355 u32 irq_keep_mask
; /* always keep these interrupts */
356 u32 irq_enable_mask
; /* bitmask to enable ring interrupt */
357 void (*irq_enable
)(struct intel_engine_cs
*engine
);
358 void (*irq_disable
)(struct intel_engine_cs
*engine
);
360 int (*init_hw
)(struct intel_engine_cs
*engine
);
363 void (*prepare
)(struct intel_engine_cs
*engine
);
364 void (*reset
)(struct intel_engine_cs
*engine
, bool stalled
);
365 void (*finish
)(struct intel_engine_cs
*engine
);
368 void (*park
)(struct intel_engine_cs
*engine
);
369 void (*unpark
)(struct intel_engine_cs
*engine
);
371 void (*set_default_submission
)(struct intel_engine_cs
*engine
);
373 const struct intel_context_ops
*cops
;
375 int (*request_alloc
)(struct i915_request
*rq
);
376 int (*init_context
)(struct i915_request
*rq
);
378 int (*emit_flush
)(struct i915_request
*request
, u32 mode
);
379 #define EMIT_INVALIDATE BIT(0)
380 #define EMIT_FLUSH BIT(1)
381 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
382 int (*emit_bb_start
)(struct i915_request
*rq
,
383 u64 offset
, u32 length
,
384 unsigned int dispatch_flags
);
385 #define I915_DISPATCH_SECURE BIT(0)
386 #define I915_DISPATCH_PINNED BIT(1)
387 int (*emit_init_breadcrumb
)(struct i915_request
*rq
);
388 u32
*(*emit_fini_breadcrumb
)(struct i915_request
*rq
,
390 unsigned int emit_fini_breadcrumb_dw
;
392 /* Pass the request to the hardware queue (e.g. directly into
393 * the legacy ringbuffer or to the end of an execlist).
395 * This is called from an atomic context with irqs disabled; must
398 void (*submit_request
)(struct i915_request
*rq
);
401 * Call when the priority on a request has changed and it and its
402 * dependencies may need rescheduling. Note the request itself may
403 * not be ready to run!
405 void (*schedule
)(struct i915_request
*request
,
406 const struct i915_sched_attr
*attr
);
409 * Cancel all requests on the hardware, or queued for execution.
410 * This should only cancel the ready requests that have been
411 * submitted to the engine (via the engine->submit_request callback).
412 * This is called when marking the device as wedged.
414 void (*cancel_requests
)(struct intel_engine_cs
*engine
);
416 void (*cleanup
)(struct intel_engine_cs
*engine
);
418 struct intel_engine_execlists execlists
;
420 /* Contexts are pinned whilst they are active on the GPU. The last
421 * context executed remains active whilst the GPU is idle - the
422 * switch away and write to the context object only occurs on the
423 * next execution. Contexts are only unpinned on retirement of the
424 * following request ensuring that we can always write to the object
425 * on the context switch even after idling. Across suspend, we switch
426 * to the kernel context and trash it as the save may not happen
427 * before the hardware is powered down.
429 struct intel_context
*last_retired_context
;
431 /* status_notifier: list of callbacks for context-switch changes */
432 struct atomic_notifier_head context_status_notifier
;
434 struct intel_engine_hangcheck hangcheck
;
436 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
437 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
438 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
439 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
440 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
444 * Table of commands the command parser needs to know about
447 DECLARE_HASHTABLE(cmd_hash
, I915_CMD_HASH_ORDER
);
450 * Table of registers allowed in commands that read/write registers.
452 const struct drm_i915_reg_table
*reg_tables
;
456 * Returns the bitmask for the length field of the specified command.
457 * Return 0 for an unrecognized/invalid command.
459 * If the command parser finds an entry for a command in the engine's
460 * cmd_tables, it gets the command's length based on the table entry.
461 * If not, it calls this function to determine the per-engine length
462 * field encoding for the command (i.e. different opcode ranges use
463 * certain bits to encode the command length in the header).
465 u32 (*get_cmd_length_mask
)(u32 cmd_header
);
469 * @lock: Lock protecting the below fields.
473 * @enabled: Reference count indicating number of listeners.
475 unsigned int enabled
;
477 * @active: Number of contexts currently scheduled in.
481 * @enabled_at: Timestamp when busy stats were enabled.
485 * @start: Timestamp of the last idle to active transition.
487 * Idle is defined as active == 0, active is active > 0.
491 * @total: Total time this engine was busy.
493 * Accumulated time not counting the most recent block in cases
494 * where engine is currently busy (active > 0).
501 intel_engine_needs_cmd_parser(const struct intel_engine_cs
*engine
)
503 return engine
->flags
& I915_ENGINE_NEEDS_CMD_PARSER
;
507 intel_engine_supports_stats(const struct intel_engine_cs
*engine
)
509 return engine
->flags
& I915_ENGINE_SUPPORTS_STATS
;
513 intel_engine_has_preemption(const struct intel_engine_cs
*engine
)
515 return engine
->flags
& I915_ENGINE_HAS_PREEMPTION
;
519 intel_engine_has_semaphores(const struct intel_engine_cs
*engine
)
521 return engine
->flags
& I915_ENGINE_HAS_SEMAPHORES
;
525 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs
*engine
)
527 return engine
->flags
& I915_ENGINE_NEEDS_BREADCRUMB_TASKLET
;
530 #define instdone_slice_mask(dev_priv__) \
531 (IS_GEN(dev_priv__, 7) ? \
532 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
534 #define instdone_subslice_mask(dev_priv__) \
535 (IS_GEN(dev_priv__, 7) ? \
536 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
538 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
539 for ((slice__) = 0, (subslice__) = 0; \
540 (slice__) < I915_MAX_SLICES; \
541 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
542 (slice__) += ((subslice__) == 0)) \
543 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
544 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
546 #endif /* __INTEL_ENGINE_TYPES_H__ */