]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/intel_engine_types.h
Merge tag 'pwm/for-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / intel_engine_types.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7 #ifndef __INTEL_ENGINE_TYPES__
8 #define __INTEL_ENGINE_TYPES__
9
10 #include <linux/hashtable.h>
11 #include <linux/irq_work.h>
12 #include <linux/kref.h>
13 #include <linux/list.h>
14 #include <linux/types.h>
15
16 #include "i915_gem.h"
17 #include "i915_priolist_types.h"
18 #include "i915_selftest.h"
19 #include "i915_timeline_types.h"
20 #include "intel_workarounds_types.h"
21
22 #include "i915_gem_batch_pool.h"
23 #include "i915_pmu.h"
24
25 #define I915_MAX_SLICES 3
26 #define I915_MAX_SUBSLICES 8
27
28 #define I915_CMD_HASH_ORDER 9
29
30 struct dma_fence;
31 struct drm_i915_reg_table;
32 struct i915_gem_context;
33 struct i915_request;
34 struct i915_sched_attr;
35 struct intel_uncore;
36
37 typedef u8 intel_engine_mask_t;
38 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
39
40 struct intel_hw_status_page {
41 struct i915_vma *vma;
42 u32 *addr;
43 };
44
45 struct intel_instdone {
46 u32 instdone;
47 /* The following exist only in the RCS engine */
48 u32 slice_common;
49 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
50 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
51 };
52
53 struct intel_engine_hangcheck {
54 u64 acthd;
55 u32 last_seqno;
56 u32 next_seqno;
57 unsigned long action_timestamp;
58 struct intel_instdone instdone;
59 };
60
61 struct intel_ring {
62 struct kref ref;
63 struct i915_vma *vma;
64 void *vaddr;
65
66 struct i915_timeline *timeline;
67 struct list_head request_list;
68 struct list_head active_link;
69
70 u32 head;
71 u32 tail;
72 u32 emit;
73
74 u32 space;
75 u32 size;
76 u32 effective_size;
77 };
78
79 /*
80 * we use a single page to load ctx workarounds so all of these
81 * values are referred in terms of dwords
82 *
83 * struct i915_wa_ctx_bb:
84 * offset: specifies batch starting position, also helpful in case
85 * if we want to have multiple batches at different offsets based on
86 * some criteria. It is not a requirement at the moment but provides
87 * an option for future use.
88 * size: size of the batch in DWORDS
89 */
90 struct i915_ctx_workarounds {
91 struct i915_wa_ctx_bb {
92 u32 offset;
93 u32 size;
94 } indirect_ctx, per_ctx;
95 struct i915_vma *vma;
96 };
97
98 #define I915_MAX_VCS 4
99 #define I915_MAX_VECS 2
100
101 /*
102 * Engine IDs definitions.
103 * Keep instances of the same type engine together.
104 */
105 enum intel_engine_id {
106 RCS0 = 0,
107 BCS0,
108 VCS0,
109 VCS1,
110 VCS2,
111 VCS3,
112 #define _VCS(n) (VCS0 + (n))
113 VECS0,
114 VECS1,
115 #define _VECS(n) (VECS0 + (n))
116 I915_NUM_ENGINES
117 };
118
119 struct st_preempt_hang {
120 struct completion completion;
121 unsigned int count;
122 bool inject_hang;
123 };
124
125 /**
126 * struct intel_engine_execlists - execlist submission queue and port state
127 *
128 * The struct intel_engine_execlists represents the combined logical state of
129 * driver and the hardware state for execlist mode of submission.
130 */
131 struct intel_engine_execlists {
132 /**
133 * @tasklet: softirq tasklet for bottom handler
134 */
135 struct tasklet_struct tasklet;
136
137 /**
138 * @default_priolist: priority list for I915_PRIORITY_NORMAL
139 */
140 struct i915_priolist default_priolist;
141
142 /**
143 * @no_priolist: priority lists disabled
144 */
145 bool no_priolist;
146
147 /**
148 * @submit_reg: gen-specific execlist submission register
149 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
150 * the ExecList Submission Queue Contents register array for Gen11+
151 */
152 u32 __iomem *submit_reg;
153
154 /**
155 * @ctrl_reg: the enhanced execlists control register, used to load the
156 * submit queue on the HW and to request preemptions to idle
157 */
158 u32 __iomem *ctrl_reg;
159
160 /**
161 * @port: execlist port states
162 *
163 * For each hardware ELSP (ExecList Submission Port) we keep
164 * track of the last request and the number of times we submitted
165 * that port to hw. We then count the number of times the hw reports
166 * a context completion or preemption. As only one context can
167 * be active on hw, we limit resubmission of context to port[0]. This
168 * is called Lite Restore, of the context.
169 */
170 struct execlist_port {
171 /**
172 * @request_count: combined request and submission count
173 */
174 struct i915_request *request_count;
175 #define EXECLIST_COUNT_BITS 2
176 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
177 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
178 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
179 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
180 #define port_set(p, packed) ((p)->request_count = (packed))
181 #define port_isset(p) ((p)->request_count)
182 #define port_index(p, execlists) ((p) - (execlists)->port)
183
184 /**
185 * @context_id: context ID for port
186 */
187 GEM_DEBUG_DECL(u32 context_id);
188
189 #define EXECLIST_MAX_PORTS 2
190 } port[EXECLIST_MAX_PORTS];
191
192 /**
193 * @active: is the HW active? We consider the HW as active after
194 * submitting any context for execution and until we have seen the
195 * last context completion event. After that, we do not expect any
196 * more events until we submit, and so can park the HW.
197 *
198 * As we have a small number of different sources from which we feed
199 * the HW, we track the state of each inside a single bitfield.
200 */
201 unsigned int active;
202 #define EXECLISTS_ACTIVE_USER 0
203 #define EXECLISTS_ACTIVE_PREEMPT 1
204 #define EXECLISTS_ACTIVE_HWACK 2
205
206 /**
207 * @port_mask: number of execlist ports - 1
208 */
209 unsigned int port_mask;
210
211 /**
212 * @queue_priority_hint: Highest pending priority.
213 *
214 * When we add requests into the queue, or adjust the priority of
215 * executing requests, we compute the maximum priority of those
216 * pending requests. We can then use this value to determine if
217 * we need to preempt the executing requests to service the queue.
218 * However, since the we may have recorded the priority of an inflight
219 * request we wanted to preempt but since completed, at the time of
220 * dequeuing the priority hint may no longer may match the highest
221 * available request priority.
222 */
223 int queue_priority_hint;
224
225 /**
226 * @queue: queue of requests, in priority lists
227 */
228 struct rb_root_cached queue;
229
230 /**
231 * @csb_write: control register for Context Switch buffer
232 *
233 * Note this register may be either mmio or HWSP shadow.
234 */
235 u32 *csb_write;
236
237 /**
238 * @csb_status: status array for Context Switch buffer
239 *
240 * Note these register may be either mmio or HWSP shadow.
241 */
242 u32 *csb_status;
243
244 /**
245 * @preempt_complete_status: expected CSB upon completing preemption
246 */
247 u32 preempt_complete_status;
248
249 /**
250 * @csb_size: context status buffer FIFO size
251 */
252 u8 csb_size;
253
254 /**
255 * @csb_head: context status buffer head
256 */
257 u8 csb_head;
258
259 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
260 };
261
262 #define INTEL_ENGINE_CS_MAX_NAME 8
263
264 struct intel_engine_cs {
265 struct drm_i915_private *i915;
266 struct intel_uncore *uncore;
267 char name[INTEL_ENGINE_CS_MAX_NAME];
268
269 enum intel_engine_id id;
270 unsigned int hw_id;
271 unsigned int guc_id;
272 intel_engine_mask_t mask;
273
274 u8 uabi_class;
275
276 u8 class;
277 u8 instance;
278 u32 context_size;
279 u32 mmio_base;
280
281 struct intel_ring *buffer;
282
283 struct i915_timeline timeline;
284
285 struct intel_context *kernel_context; /* pinned */
286 struct intel_context *preempt_context; /* pinned; optional */
287
288 struct drm_i915_gem_object *default_state;
289 void *pinned_default_state;
290
291 /* Rather than have every client wait upon all user interrupts,
292 * with the herd waking after every interrupt and each doing the
293 * heavyweight seqno dance, we delegate the task (of being the
294 * bottom-half of the user interrupt) to the first client. After
295 * every interrupt, we wake up one client, who does the heavyweight
296 * coherent seqno read and either goes back to sleep (if incomplete),
297 * or wakes up all the completed clients in parallel, before then
298 * transferring the bottom-half status to the next client in the queue.
299 *
300 * Compared to walking the entire list of waiters in a single dedicated
301 * bottom-half, we reduce the latency of the first waiter by avoiding
302 * a context switch, but incur additional coherent seqno reads when
303 * following the chain of request breadcrumbs. Since it is most likely
304 * that we have a single client waiting on each seqno, then reducing
305 * the overhead of waking that client is much preferred.
306 */
307 struct intel_breadcrumbs {
308 spinlock_t irq_lock;
309 struct list_head signalers;
310
311 struct irq_work irq_work; /* for use from inside irq_lock */
312
313 unsigned int irq_enabled;
314
315 bool irq_armed;
316 } breadcrumbs;
317
318 struct intel_engine_pmu {
319 /**
320 * @enable: Bitmask of enable sample events on this engine.
321 *
322 * Bits correspond to sample event types, for instance
323 * I915_SAMPLE_QUEUED is bit 0 etc.
324 */
325 u32 enable;
326 /**
327 * @enable_count: Reference count for the enabled samplers.
328 *
329 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
330 */
331 unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
332 /**
333 * @sample: Counter values for sampling events.
334 *
335 * Our internal timer stores the current counters in this field.
336 *
337 * Index number corresponds to @enum drm_i915_pmu_engine_sample.
338 */
339 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
340 } pmu;
341
342 /*
343 * A pool of objects to use as shadow copies of client batch buffers
344 * when the command parser is enabled. Prevents the client from
345 * modifying the batch contents after software parsing.
346 */
347 struct i915_gem_batch_pool batch_pool;
348
349 struct intel_hw_status_page status_page;
350 struct i915_ctx_workarounds wa_ctx;
351 struct i915_wa_list ctx_wa_list;
352 struct i915_wa_list wa_list;
353 struct i915_wa_list whitelist;
354
355 u32 irq_keep_mask; /* always keep these interrupts */
356 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
357 void (*irq_enable)(struct intel_engine_cs *engine);
358 void (*irq_disable)(struct intel_engine_cs *engine);
359
360 int (*init_hw)(struct intel_engine_cs *engine);
361
362 struct {
363 void (*prepare)(struct intel_engine_cs *engine);
364 void (*reset)(struct intel_engine_cs *engine, bool stalled);
365 void (*finish)(struct intel_engine_cs *engine);
366 } reset;
367
368 void (*park)(struct intel_engine_cs *engine);
369 void (*unpark)(struct intel_engine_cs *engine);
370
371 void (*set_default_submission)(struct intel_engine_cs *engine);
372
373 const struct intel_context_ops *cops;
374
375 int (*request_alloc)(struct i915_request *rq);
376 int (*init_context)(struct i915_request *rq);
377
378 int (*emit_flush)(struct i915_request *request, u32 mode);
379 #define EMIT_INVALIDATE BIT(0)
380 #define EMIT_FLUSH BIT(1)
381 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
382 int (*emit_bb_start)(struct i915_request *rq,
383 u64 offset, u32 length,
384 unsigned int dispatch_flags);
385 #define I915_DISPATCH_SECURE BIT(0)
386 #define I915_DISPATCH_PINNED BIT(1)
387 int (*emit_init_breadcrumb)(struct i915_request *rq);
388 u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
389 u32 *cs);
390 unsigned int emit_fini_breadcrumb_dw;
391
392 /* Pass the request to the hardware queue (e.g. directly into
393 * the legacy ringbuffer or to the end of an execlist).
394 *
395 * This is called from an atomic context with irqs disabled; must
396 * be irq safe.
397 */
398 void (*submit_request)(struct i915_request *rq);
399
400 /*
401 * Call when the priority on a request has changed and it and its
402 * dependencies may need rescheduling. Note the request itself may
403 * not be ready to run!
404 */
405 void (*schedule)(struct i915_request *request,
406 const struct i915_sched_attr *attr);
407
408 /*
409 * Cancel all requests on the hardware, or queued for execution.
410 * This should only cancel the ready requests that have been
411 * submitted to the engine (via the engine->submit_request callback).
412 * This is called when marking the device as wedged.
413 */
414 void (*cancel_requests)(struct intel_engine_cs *engine);
415
416 void (*cleanup)(struct intel_engine_cs *engine);
417
418 struct intel_engine_execlists execlists;
419
420 /* Contexts are pinned whilst they are active on the GPU. The last
421 * context executed remains active whilst the GPU is idle - the
422 * switch away and write to the context object only occurs on the
423 * next execution. Contexts are only unpinned on retirement of the
424 * following request ensuring that we can always write to the object
425 * on the context switch even after idling. Across suspend, we switch
426 * to the kernel context and trash it as the save may not happen
427 * before the hardware is powered down.
428 */
429 struct intel_context *last_retired_context;
430
431 /* status_notifier: list of callbacks for context-switch changes */
432 struct atomic_notifier_head context_status_notifier;
433
434 struct intel_engine_hangcheck hangcheck;
435
436 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
437 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
438 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
439 #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
440 #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
441 unsigned int flags;
442
443 /*
444 * Table of commands the command parser needs to know about
445 * for this engine.
446 */
447 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
448
449 /*
450 * Table of registers allowed in commands that read/write registers.
451 */
452 const struct drm_i915_reg_table *reg_tables;
453 int reg_table_count;
454
455 /*
456 * Returns the bitmask for the length field of the specified command.
457 * Return 0 for an unrecognized/invalid command.
458 *
459 * If the command parser finds an entry for a command in the engine's
460 * cmd_tables, it gets the command's length based on the table entry.
461 * If not, it calls this function to determine the per-engine length
462 * field encoding for the command (i.e. different opcode ranges use
463 * certain bits to encode the command length in the header).
464 */
465 u32 (*get_cmd_length_mask)(u32 cmd_header);
466
467 struct {
468 /**
469 * @lock: Lock protecting the below fields.
470 */
471 seqlock_t lock;
472 /**
473 * @enabled: Reference count indicating number of listeners.
474 */
475 unsigned int enabled;
476 /**
477 * @active: Number of contexts currently scheduled in.
478 */
479 unsigned int active;
480 /**
481 * @enabled_at: Timestamp when busy stats were enabled.
482 */
483 ktime_t enabled_at;
484 /**
485 * @start: Timestamp of the last idle to active transition.
486 *
487 * Idle is defined as active == 0, active is active > 0.
488 */
489 ktime_t start;
490 /**
491 * @total: Total time this engine was busy.
492 *
493 * Accumulated time not counting the most recent block in cases
494 * where engine is currently busy (active > 0).
495 */
496 ktime_t total;
497 } stats;
498 };
499
500 static inline bool
501 intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
502 {
503 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
504 }
505
506 static inline bool
507 intel_engine_supports_stats(const struct intel_engine_cs *engine)
508 {
509 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
510 }
511
512 static inline bool
513 intel_engine_has_preemption(const struct intel_engine_cs *engine)
514 {
515 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
516 }
517
518 static inline bool
519 intel_engine_has_semaphores(const struct intel_engine_cs *engine)
520 {
521 return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
522 }
523
524 static inline bool
525 intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
526 {
527 return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
528 }
529
530 #define instdone_slice_mask(dev_priv__) \
531 (IS_GEN(dev_priv__, 7) ? \
532 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
533
534 #define instdone_subslice_mask(dev_priv__) \
535 (IS_GEN(dev_priv__, 7) ? \
536 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
537
538 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
539 for ((slice__) = 0, (subslice__) = 0; \
540 (slice__) < I915_MAX_SLICES; \
541 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
542 (slice__) += ((subslice__) == 0)) \
543 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
544 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
545
546 #endif /* __INTEL_ENGINE_TYPES_H__ */