]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/intel_ringbuffer.h
drm/i915: Stop touching forcewake following a gen6+ engine reset
[thirdparty/linux.git] / drivers / gpu / drm / i915 / intel_ringbuffer.h
CommitLineData
8187a2b7
ZN
1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
44e895a8 4#include <linux/hashtable.h>
06fbca71 5#include "i915_gem_batch_pool.h"
dcff85c8 6#include "i915_gem_request.h"
73cb9701 7#include "i915_gem_timeline.h"
f97fbf96 8#include "i915_selftest.h"
44e895a8
BV
9
10#define I915_CMD_HASH_ORDER 9
11
4712274c
OM
12/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
13 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
14 * to give some inclination as to some of the magic values used in the various
15 * workarounds!
16 */
17#define CACHELINE_BYTES 64
17ee950d 18#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
4712274c 19
57e88531
CW
20struct intel_hw_status_page {
21 struct i915_vma *vma;
22 u32 *page_addr;
23 u32 ggtt_offset;
8187a2b7
ZN
24};
25
bbdc070a
DG
26#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
27#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
cae5852d 28
bbdc070a
DG
29#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
30#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
cae5852d 31
bbdc070a
DG
32#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
33#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
cae5852d 34
bbdc070a
DG
35#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
36#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
cae5852d 37
bbdc070a
DG
38#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
39#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
870e86dd 40
bbdc070a
DG
41#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
42#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
e9fea574 43
3e78998a
BW
44/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
45 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
46 */
8c12672e
CW
47#define gen8_semaphore_seqno_size sizeof(uint64_t)
48#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
49 (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
3e78998a 50#define GEN8_SIGNAL_OFFSET(__ring, to) \
51d545d0 51 (dev_priv->semaphore->node.start + \
8c12672e 52 GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
3e78998a 53#define GEN8_WAIT_OFFSET(__ring, from) \
51d545d0 54 (dev_priv->semaphore->node.start + \
8c12672e 55 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
3e78998a 56
7e37f889 57enum intel_engine_hangcheck_action {
3fe3b030
MK
58 ENGINE_IDLE = 0,
59 ENGINE_WAIT,
60 ENGINE_ACTIVE_SEQNO,
61 ENGINE_ACTIVE_HEAD,
62 ENGINE_ACTIVE_SUBUNITS,
63 ENGINE_WAIT_KICK,
64 ENGINE_DEAD,
f2f4d82f 65};
ad8beaea 66
3fe3b030
MK
67static inline const char *
68hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
69{
70 switch (a) {
71 case ENGINE_IDLE:
72 return "idle";
73 case ENGINE_WAIT:
74 return "wait";
75 case ENGINE_ACTIVE_SEQNO:
76 return "active seqno";
77 case ENGINE_ACTIVE_HEAD:
78 return "active head";
79 case ENGINE_ACTIVE_SUBUNITS:
80 return "active subunits";
81 case ENGINE_WAIT_KICK:
82 return "wait kick";
83 case ENGINE_DEAD:
84 return "dead";
85 }
86
87 return "unknown";
88}
b6b0fac0 89
f9e61372
BW
90#define I915_MAX_SLICES 3
91#define I915_MAX_SUBSLICES 3
92
93#define instdone_slice_mask(dev_priv__) \
94 (INTEL_GEN(dev_priv__) == 7 ? \
95 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
96
97#define instdone_subslice_mask(dev_priv__) \
98 (INTEL_GEN(dev_priv__) == 7 ? \
99 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask)
100
101#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
102 for ((slice__) = 0, (subslice__) = 0; \
103 (slice__) < I915_MAX_SLICES; \
104 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
105 (slice__) += ((subslice__) == 0)) \
106 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
107 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
108
d636951e
BW
109struct intel_instdone {
110 u32 instdone;
111 /* The following exist only in the RCS engine */
112 u32 slice_common;
f9e61372
BW
113 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
114 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
d636951e
BW
115};
116
7e37f889 117struct intel_engine_hangcheck {
50877445 118 u64 acthd;
92cab734 119 u32 seqno;
7e37f889 120 enum intel_engine_hangcheck_action action;
3fe3b030 121 unsigned long action_timestamp;
4be17381 122 int deadlock;
d636951e 123 struct intel_instdone instdone;
c64992e0 124 struct drm_i915_gem_request *active_request;
3fe3b030 125 bool stalled;
92cab734
MK
126};
127
7e37f889 128struct intel_ring {
0eb973d3 129 struct i915_vma *vma;
57e88531 130 void *vaddr;
8ee14975 131
675d9ad7
CW
132 struct list_head request_list;
133
8ee14975
OM
134 u32 head;
135 u32 tail;
e6ba9992 136 u32 emit;
eca56a35 137
605d5b32
CW
138 u32 space;
139 u32 size;
140 u32 effective_size;
8ee14975
OM
141};
142
e2efd130 143struct i915_gem_context;
361b027b 144struct drm_i915_reg_table;
21076372 145
17ee950d
AS
146/*
147 * we use a single page to load ctx workarounds so all of these
148 * values are referred in terms of dwords
149 *
150 * struct i915_wa_ctx_bb:
151 * offset: specifies batch starting position, also helpful in case
152 * if we want to have multiple batches at different offsets based on
153 * some criteria. It is not a requirement at the moment but provides
154 * an option for future use.
155 * size: size of the batch in DWORDS
156 */
48bb74e4 157struct i915_ctx_workarounds {
17ee950d
AS
158 struct i915_wa_ctx_bb {
159 u32 offset;
160 u32 size;
161 } indirect_ctx, per_ctx;
48bb74e4 162 struct i915_vma *vma;
17ee950d
AS
163};
164
c81d4613 165struct drm_i915_gem_request;
4e50f082 166struct intel_render_state;
c81d4613 167
237ae7c7
MW
168/*
169 * Engine IDs definitions.
170 * Keep instances of the same type engine together.
171 */
172enum intel_engine_id {
173 RCS = 0,
174 BCS,
175 VCS,
176 VCS2,
177#define _VCS(n) (VCS + (n))
178 VECS
179};
180
6c067579
CW
181struct i915_priolist {
182 struct rb_node node;
183 struct list_head requests;
184 int priority;
185};
186
6e516148
OM
187#define INTEL_ENGINE_CS_MAX_NAME 8
188
c033666a
CW
189struct intel_engine_cs {
190 struct drm_i915_private *i915;
6e516148 191 char name[INTEL_ENGINE_CS_MAX_NAME];
237ae7c7 192 enum intel_engine_id id;
1d39f281 193 unsigned int uabi_id;
237ae7c7 194 unsigned int hw_id;
63ffbcda 195 unsigned int guc_id;
0908180b
DCS
196
197 u8 class;
198 u8 instance;
63ffbcda
JL
199 u32 context_size;
200 u32 mmio_base;
c2c7f240 201 unsigned int irq_shift;
63ffbcda 202
7e37f889 203 struct intel_ring *buffer;
73cb9701 204 struct intel_timeline *timeline;
8187a2b7 205
4e50f082
CW
206 struct intel_render_state *render_state;
207
2246bea6 208 atomic_t irq_count;
538b257d
CW
209 unsigned long irq_posted;
210#define ENGINE_IRQ_BREADCRUMB 0
f747026c 211#define ENGINE_IRQ_EXECLIST 1
538b257d 212
688e6c72
CW
213 /* Rather than have every client wait upon all user interrupts,
214 * with the herd waking after every interrupt and each doing the
215 * heavyweight seqno dance, we delegate the task (of being the
216 * bottom-half of the user interrupt) to the first client. After
217 * every interrupt, we wake up one client, who does the heavyweight
218 * coherent seqno read and either goes back to sleep (if incomplete),
219 * or wakes up all the completed clients in parallel, before then
220 * transferring the bottom-half status to the next client in the queue.
221 *
222 * Compared to walking the entire list of waiters in a single dedicated
223 * bottom-half, we reduce the latency of the first waiter by avoiding
224 * a context switch, but incur additional coherent seqno reads when
225 * following the chain of request breadcrumbs. Since it is most likely
226 * that we have a single client waiting on each seqno, then reducing
227 * the overhead of waking that client is much preferred.
228 */
229 struct intel_breadcrumbs {
61d3dc70
CW
230 spinlock_t irq_lock; /* protects irq_*; irqsafe */
231 struct intel_wait *irq_wait; /* oldest waiter by retirement */
232
233 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
688e6c72 234 struct rb_root waiters; /* sorted by retirement, priority */
c81d4613 235 struct rb_root signals; /* sorted by retirement */
c81d4613 236 struct task_struct *signaler; /* used for fence signalling */
cced5e2f 237 struct drm_i915_gem_request __rcu *first_signal;
688e6c72 238 struct timer_list fake_irq; /* used after a missed interrupt */
83348ba8
CW
239 struct timer_list hangcheck; /* detect missed interrupts */
240
2246bea6 241 unsigned int hangcheck_interrupts;
aca34b6e 242
67b807a8 243 bool irq_armed : 1;
aca34b6e 244 bool irq_enabled : 1;
f97fbf96 245 I915_SELFTEST_DECLARE(bool mock : 1);
688e6c72
CW
246 } breadcrumbs;
247
06fbca71
CW
248 /*
249 * A pool of objects to use as shadow copies of client batch buffers
250 * when the command parser is enabled. Prevents the client from
251 * modifying the batch contents after software parsing.
252 */
253 struct i915_gem_batch_pool batch_pool;
254
8187a2b7 255 struct intel_hw_status_page status_page;
17ee950d 256 struct i915_ctx_workarounds wa_ctx;
56c0f1a7 257 struct i915_vma *scratch;
8187a2b7 258
61ff75ac
CW
259 u32 irq_keep_mask; /* always keep these interrupts */
260 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
38a0f2db
DG
261 void (*irq_enable)(struct intel_engine_cs *engine);
262 void (*irq_disable)(struct intel_engine_cs *engine);
8187a2b7 263
38a0f2db 264 int (*init_hw)(struct intel_engine_cs *engine);
821ed7df
CW
265 void (*reset_hw)(struct intel_engine_cs *engine,
266 struct drm_i915_gem_request *req);
8187a2b7 267
ff44ad51
CW
268 void (*set_default_submission)(struct intel_engine_cs *engine);
269
266a240b
CW
270 struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
271 struct i915_gem_context *ctx);
e8a9c58f
CW
272 void (*context_unpin)(struct intel_engine_cs *engine,
273 struct i915_gem_context *ctx);
f73e7399 274 int (*request_alloc)(struct drm_i915_gem_request *req);
8753181e 275 int (*init_context)(struct drm_i915_gem_request *req);
86d7f238 276
ddd66c51
CW
277 int (*emit_flush)(struct drm_i915_gem_request *request,
278 u32 mode);
279#define EMIT_INVALIDATE BIT(0)
280#define EMIT_FLUSH BIT(1)
281#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
282 int (*emit_bb_start)(struct drm_i915_gem_request *req,
283 u64 offset, u32 length,
284 unsigned int dispatch_flags);
285#define I915_DISPATCH_SECURE BIT(0)
286#define I915_DISPATCH_PINNED BIT(1)
287#define I915_DISPATCH_RS BIT(2)
caddfe71 288 void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
73dec95e 289 u32 *cs);
98f29e8d 290 int emit_breadcrumb_sz;
5590af3e
CW
291
292 /* Pass the request to the hardware queue (e.g. directly into
293 * the legacy ringbuffer or to the end of an execlist).
294 *
295 * This is called from an atomic context with irqs disabled; must
296 * be irq safe.
297 */
ddd66c51 298 void (*submit_request)(struct drm_i915_gem_request *req);
5590af3e 299
0de9136d
CW
300 /* Call when the priority on a request has changed and it and its
301 * dependencies may need rescheduling. Note the request itself may
302 * not be ready to run!
303 *
304 * Called under the struct_mutex.
305 */
306 void (*schedule)(struct drm_i915_gem_request *request,
307 int priority);
308
b2eadbc8
CW
309 /* Some chipsets are not quite as coherent as advertised and need
310 * an expensive kick to force a true read of the up-to-date seqno.
311 * However, the up-to-date seqno is not always required and the last
312 * seen value is good enough. Note that the seqno will always be
313 * monotonic, even if not coherent.
314 */
38a0f2db 315 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
38a0f2db 316 void (*cleanup)(struct intel_engine_cs *engine);
ebc348b2 317
3e78998a
BW
318 /* GEN8 signal/wait table - never trust comments!
319 * signal to signal to signal to signal to signal to
320 * RCS VCS BCS VECS VCS2
321 * --------------------------------------------------------------------
322 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
323 * |-------------------------------------------------------------------
324 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
325 * |-------------------------------------------------------------------
326 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
327 * |-------------------------------------------------------------------
328 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
329 * |-------------------------------------------------------------------
330 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
331 * |-------------------------------------------------------------------
332 *
333 * Generalization:
334 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
335 * ie. transpose of g(x, y)
336 *
337 * sync from sync from sync from sync from sync from
338 * RCS VCS BCS VECS VCS2
339 * --------------------------------------------------------------------
340 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
341 * |-------------------------------------------------------------------
342 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
343 * |-------------------------------------------------------------------
344 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
345 * |-------------------------------------------------------------------
346 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
347 * |-------------------------------------------------------------------
348 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
349 * |-------------------------------------------------------------------
350 *
351 * Generalization:
352 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
353 * ie. transpose of f(x, y)
354 */
ebc348b2 355 struct {
3e78998a 356 union {
318f89ca
TU
357#define GEN6_SEMAPHORE_LAST VECS_HW
358#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
359#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
3e78998a
BW
360 struct {
361 /* our mbox written by others */
318f89ca 362 u32 wait[GEN6_NUM_SEMAPHORES];
3e78998a 363 /* mboxes this ring signals to */
318f89ca 364 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
3e78998a 365 } mbox;
666796da 366 u64 signal_ggtt[I915_NUM_ENGINES];
3e78998a 367 };
78325f2d
BW
368
369 /* AKA wait() */
ad7bdb2b
CW
370 int (*sync_to)(struct drm_i915_gem_request *req,
371 struct drm_i915_gem_request *signal);
73dec95e 372 u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
ebc348b2 373 } semaphore;
ad776f8b 374
4da46e1e 375 /* Execlists */
27af5eea 376 struct tasklet_struct irq_tasklet;
6c067579
CW
377 struct i915_priolist default_priolist;
378 bool no_priolist;
70c2a24d 379 struct execlist_port {
77f0d0e9
CW
380 struct drm_i915_gem_request *request_count;
381#define EXECLIST_COUNT_BITS 2
382#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
383#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
384#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
385#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
386#define port_set(p, packed) ((p)->request_count = (packed))
387#define port_isset(p) ((p)->request_count)
388#define port_index(p, e) ((p) - (e)->execlist_port)
ae9a043b 389 GEM_DEBUG_DECL(u32 context_id);
70c2a24d 390 } execlist_port[2];
20311bd3
CW
391 struct rb_root execlist_queue;
392 struct rb_node *execlist_first;
3756685a 393 unsigned int fw_domains;
4da46e1e 394
e8a9c58f
CW
395 /* Contexts are pinned whilst they are active on the GPU. The last
396 * context executed remains active whilst the GPU is idle - the
397 * switch away and write to the context object only occurs on the
398 * next execution. Contexts are only unpinned on retirement of the
399 * following request ensuring that we can always write to the object
400 * on the context switch even after idling. Across suspend, we switch
401 * to the kernel context and trash it as the save may not happen
402 * before the hardware is powered down.
403 */
404 struct i915_gem_context *last_retired_context;
405
406 /* We track the current MI_SET_CONTEXT in order to eliminate
407 * redudant context switches. This presumes that requests are not
408 * reordered! Or when they are the tracking is updated along with
409 * the emission of individual requests into the legacy command
410 * stream (ring).
411 */
412 struct i915_gem_context *legacy_active_context;
40521054 413
3fc03069
CD
414 /* status_notifier: list of callbacks for context-switch changes */
415 struct atomic_notifier_head context_status_notifier;
416
7e37f889 417 struct intel_engine_hangcheck hangcheck;
92cab734 418
44e895a8
BV
419 bool needs_cmd_parser;
420
351e3db2 421 /*
44e895a8 422 * Table of commands the command parser needs to know about
33a051a5 423 * for this engine.
351e3db2 424 */
44e895a8 425 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
351e3db2
BV
426
427 /*
428 * Table of registers allowed in commands that read/write registers.
429 */
361b027b
JJ
430 const struct drm_i915_reg_table *reg_tables;
431 int reg_table_count;
351e3db2
BV
432
433 /*
434 * Returns the bitmask for the length field of the specified command.
435 * Return 0 for an unrecognized/invalid command.
436 *
33a051a5 437 * If the command parser finds an entry for a command in the engine's
351e3db2 438 * cmd_tables, it gets the command's length based on the table entry.
33a051a5
CW
439 * If not, it calls this function to determine the per-engine length
440 * field encoding for the command (i.e. different opcode ranges use
441 * certain bits to encode the command length in the header).
351e3db2
BV
442 */
443 u32 (*get_cmd_length_mask)(u32 cmd_header);
8187a2b7
ZN
444};
445
59ce1310 446static inline unsigned int
67d97da3 447intel_engine_flag(const struct intel_engine_cs *engine)
96154f2f 448{
59ce1310 449 return BIT(engine->id);
96154f2f
DV
450}
451
8187a2b7 452static inline u32
5dd8e50c 453intel_read_status_page(struct intel_engine_cs *engine, int reg)
8187a2b7 454{
4225d0f2 455 /* Ensure that the compiler doesn't optimize away the load. */
5dd8e50c 456 return READ_ONCE(engine->status_page.page_addr[reg]);
8187a2b7
ZN
457}
458
b70ec5bf 459static inline void
9a29dd85 460intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
b70ec5bf 461{
9a29dd85
CW
462 /* Writing into the status page should be done sparingly. Since
463 * we do when we are uncertain of the device state, we take a bit
464 * of extra paranoia to try and ensure that the HWS takes the value
465 * we give and that it doesn't end up trapped inside the CPU!
466 */
467 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
468 mb();
469 clflush(&engine->status_page.page_addr[reg]);
470 engine->status_page.page_addr[reg] = value;
471 clflush(&engine->status_page.page_addr[reg]);
472 mb();
473 } else {
474 WRITE_ONCE(engine->status_page.page_addr[reg], value);
475 }
b70ec5bf
MK
476}
477
e2828914 478/*
311bd68e
CW
479 * Reads a dword out of the status page, which is written to from the command
480 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
481 * MI_STORE_DATA_IMM.
482 *
483 * The following dwords have a reserved meaning:
484 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
485 * 0x04: ring 0 head pointer
486 * 0x05: ring 1 head pointer (915-class)
487 * 0x06: ring 2 head pointer (915-class)
488 * 0x10-0x1b: Context status DWords (GM45)
489 * 0x1f: Last written status offset. (GM45)
b07da53c 490 * 0x20-0x2f: Reserved (Gen6+)
311bd68e 491 *
b07da53c 492 * The area from dword 0x30 to 0x3ff is available for driver usage.
311bd68e 493 */
b07da53c 494#define I915_GEM_HWS_INDEX 0x30
7c17d377 495#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
b07da53c 496#define I915_GEM_HWS_SCRATCH_INDEX 0x40
9a289771 497#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
311bd68e 498
7e37f889
CW
499struct intel_ring *
500intel_engine_create_ring(struct intel_engine_cs *engine, int size);
d822bb18
CW
501int intel_ring_pin(struct intel_ring *ring,
502 struct drm_i915_private *i915,
503 unsigned int offset_bias);
e6ba9992 504void intel_ring_reset(struct intel_ring *ring, u32 tail);
95aebcb2 505unsigned int intel_ring_update_space(struct intel_ring *ring);
aad29fbb 506void intel_ring_unpin(struct intel_ring *ring);
7e37f889 507void intel_ring_free(struct intel_ring *ring);
84c2377f 508
7e37f889
CW
509void intel_engine_stop(struct intel_engine_cs *engine);
510void intel_engine_cleanup(struct intel_engine_cs *engine);
96f298aa 511
821ed7df
CW
512void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
513
bba09b12 514int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
406ea8d2 515
5e5655c3
CW
516u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
517 unsigned int n);
406ea8d2 518
73dec95e
TU
519static inline void
520intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
09246732 521{
8f942018
CW
522 /* Dummy function.
523 *
524 * This serves as a placeholder in the code so that the reader
525 * can compare against the preceding intel_ring_begin() and
526 * check that the number of dwords emitted matches the space
527 * reserved for the command packet (i.e. the value passed to
528 * intel_ring_begin()).
c5efa1ad 529 */
e6ba9992 530 GEM_BUG_ON((req->ring->vaddr + req->ring->emit) != cs);
8f942018
CW
531}
532
73dec95e 533static inline u32
450362d3
CW
534intel_ring_wrap(const struct intel_ring *ring, u32 pos)
535{
536 return pos & (ring->size - 1);
537}
538
539static inline u32
540intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
8f942018
CW
541{
542 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
73dec95e
TU
543 u32 offset = addr - req->ring->vaddr;
544 GEM_BUG_ON(offset > req->ring->size);
450362d3 545 return intel_ring_wrap(req->ring, offset);
09246732 546}
406ea8d2 547
ed1501d4
CW
548static inline void
549assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
550{
551 /* We could combine these into a single tail operation, but keeping
552 * them as seperate tests will help identify the cause should one
553 * ever fire.
554 */
555 GEM_BUG_ON(!IS_ALIGNED(tail, 8));
556 GEM_BUG_ON(tail >= ring->size);
605d5b32
CW
557
558 /*
559 * "Ring Buffer Use"
560 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
561 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
562 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
563 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
564 * same cacheline, the Head Pointer must not be greater than the Tail
565 * Pointer."
566 *
567 * We use ring->head as the last known location of the actual RING_HEAD,
568 * it may have advanced but in the worst case it is equally the same
569 * as ring->head and so we should never program RING_TAIL to advance
570 * into the same cacheline as ring->head.
571 */
572#define cacheline(a) round_down(a, CACHELINE_BYTES)
573 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
574 tail < ring->head);
575#undef cacheline
ed1501d4
CW
576}
577
e6ba9992
CW
578static inline unsigned int
579intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
580{
581 /* Whilst writes to the tail are strictly order, there is no
582 * serialisation between readers and the writers. The tail may be
583 * read by i915_gem_request_retire() just as it is being updated
584 * by execlists, as although the breadcrumb is complete, the context
585 * switch hasn't been seen.
586 */
587 assert_ring_tail_valid(ring, tail);
588 ring->tail = tail;
589 return tail;
590}
09246732 591
73cb9701 592void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
8187a2b7 593
019bf277
TU
594void intel_engine_setup_common(struct intel_engine_cs *engine);
595int intel_engine_init_common(struct intel_engine_cs *engine);
adc320c4 596int intel_engine_create_scratch(struct intel_engine_cs *engine, int size);
96a945aa 597void intel_engine_cleanup_common(struct intel_engine_cs *engine);
019bf277 598
8b3e2d36
TU
599int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
600int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
8b3e2d36
TU
601int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
602int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
8187a2b7 603
7e37f889 604u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
1b36595f
CW
605u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
606
1b7744e7
CW
607static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
608{
609 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
610}
79f321b7 611
cb399eab
CW
612static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
613{
614 /* We are only peeking at the tail of the submit queue (and not the
615 * queue itself) in order to gain a hint as to the current active
616 * state of the engine. Callers are not expected to be taking
617 * engine->timeline->lock, nor are they expected to be concerned
618 * wtih serialising this hint with anything, so document it as
619 * a hint and nothing more.
620 */
9b6586ae 621 return READ_ONCE(engine->timeline->seqno);
cb399eab
CW
622}
623
0bc40be8 624int init_workarounds_ring(struct intel_engine_cs *engine);
4ac9659e 625int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
771b9a53 626
0e704476
CW
627void intel_engine_get_instdone(struct intel_engine_cs *engine,
628 struct intel_instdone *instdone);
629
29b1b415
JH
630/*
631 * Arbitrary size for largest possible 'add request' sequence. The code paths
632 * are complex and variable. Empirical measurement shows that the worst case
596e5efc
CW
633 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
634 * we need to allocate double the largest single packet within that emission
635 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
29b1b415 636 */
596e5efc 637#define MIN_SPACE_FOR_ADD_REQUEST 336
29b1b415 638
a58c01aa
CW
639static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
640{
57e88531 641 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
a58c01aa
CW
642}
643
688e6c72 644/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
688e6c72
CW
645int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
646
56299fb7
CW
647static inline void intel_wait_init(struct intel_wait *wait,
648 struct drm_i915_gem_request *rq)
688e6c72
CW
649{
650 wait->tsk = current;
56299fb7 651 wait->request = rq;
754c9fd5
CW
652}
653
654static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
655{
656 wait->tsk = current;
657 wait->seqno = seqno;
658}
659
660static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
661{
662 return wait->seqno;
663}
664
665static inline bool
666intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
667{
688e6c72 668 wait->seqno = seqno;
754c9fd5
CW
669 return intel_wait_has_seqno(wait);
670}
671
672static inline bool
673intel_wait_update_request(struct intel_wait *wait,
674 const struct drm_i915_gem_request *rq)
675{
676 return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
677}
678
679static inline bool
680intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
681{
682 return wait->seqno == seqno;
683}
684
685static inline bool
686intel_wait_check_request(const struct intel_wait *wait,
687 const struct drm_i915_gem_request *rq)
688{
689 return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
688e6c72
CW
690}
691
692static inline bool intel_wait_complete(const struct intel_wait *wait)
693{
694 return RB_EMPTY_NODE(&wait->node);
695}
696
697bool intel_engine_add_wait(struct intel_engine_cs *engine,
698 struct intel_wait *wait);
699void intel_engine_remove_wait(struct intel_engine_cs *engine,
700 struct intel_wait *wait);
f7b02a52
CW
701void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
702 bool wakeup);
9eb143bb 703void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
688e6c72 704
dbd6ef29 705static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
688e6c72 706{
61d3dc70 707 return READ_ONCE(engine->breadcrumbs.irq_wait);
688e6c72
CW
708}
709
8d769ea7
CW
710unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
711#define ENGINE_WAKEUP_WAITER BIT(0)
67b807a8
CW
712#define ENGINE_WAKEUP_ASLEEP BIT(1)
713
714void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
715void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 716
ad07dfcd 717void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
688e6c72 718void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
9b6586ae 719bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
688e6c72 720
9f235dfa
TU
721static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
722{
723 memset(batch, 0, 6 * sizeof(u32));
724
725 batch[0] = GFX_OP_PIPE_CONTROL(6);
726 batch[1] = flags;
727 batch[2] = offset;
728
729 return batch + 6;
730}
731
5400367a 732bool intel_engine_is_idle(struct intel_engine_cs *engine);
05425249 733bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
5400367a 734
6c067579 735void intel_engines_mark_idle(struct drm_i915_private *i915);
ff44ad51
CW
736void intel_engines_reset_default_submission(struct drm_i915_private *i915);
737
8187a2b7 738#endif /* _INTEL_RINGBUFFER_H_ */