]>
Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
44e895a8 | 4 | #include <linux/hashtable.h> |
06fbca71 | 5 | #include "i915_gem_batch_pool.h" |
dcff85c8 | 6 | #include "i915_gem_request.h" |
44e895a8 BV |
7 | |
8 | #define I915_CMD_HASH_ORDER 9 | |
9 | ||
4712274c OM |
10 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, |
11 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just | |
12 | * to give some inclination as to some of the magic values used in the various | |
13 | * workarounds! | |
14 | */ | |
15 | #define CACHELINE_BYTES 64 | |
17ee950d | 16 | #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t)) |
4712274c | 17 | |
633cf8f5 VS |
18 | /* |
19 | * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use" | |
20 | * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use" | |
21 | * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use" | |
22 | * | |
23 | * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same | |
24 | * cacheline, the Head Pointer must not be greater than the Tail | |
25 | * Pointer." | |
26 | */ | |
27 | #define I915_RING_FREE_SPACE 64 | |
28 | ||
57e88531 CW |
29 | struct intel_hw_status_page { |
30 | struct i915_vma *vma; | |
31 | u32 *page_addr; | |
32 | u32 ggtt_offset; | |
8187a2b7 ZN |
33 | }; |
34 | ||
bbdc070a DG |
35 | #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) |
36 | #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) | |
cae5852d | 37 | |
bbdc070a DG |
38 | #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) |
39 | #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) | |
cae5852d | 40 | |
bbdc070a DG |
41 | #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) |
42 | #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) | |
cae5852d | 43 | |
bbdc070a DG |
44 | #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) |
45 | #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) | |
cae5852d | 46 | |
bbdc070a DG |
47 | #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) |
48 | #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) | |
870e86dd | 49 | |
bbdc070a DG |
50 | #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) |
51 | #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) | |
e9fea574 | 52 | |
3e78998a BW |
53 | /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to |
54 | * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. | |
55 | */ | |
8c12672e CW |
56 | #define gen8_semaphore_seqno_size sizeof(uint64_t) |
57 | #define GEN8_SEMAPHORE_OFFSET(__from, __to) \ | |
58 | (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) | |
3e78998a | 59 | #define GEN8_SIGNAL_OFFSET(__ring, to) \ |
51d545d0 | 60 | (dev_priv->semaphore->node.start + \ |
8c12672e | 61 | GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) |
3e78998a | 62 | #define GEN8_WAIT_OFFSET(__ring, from) \ |
51d545d0 | 63 | (dev_priv->semaphore->node.start + \ |
8c12672e | 64 | GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) |
3e78998a | 65 | |
7e37f889 | 66 | enum intel_engine_hangcheck_action { |
da661464 | 67 | HANGCHECK_IDLE = 0, |
f2f4d82f JN |
68 | HANGCHECK_WAIT, |
69 | HANGCHECK_ACTIVE, | |
70 | HANGCHECK_KICK, | |
71 | HANGCHECK_HUNG, | |
72 | }; | |
ad8beaea | 73 | |
b6b0fac0 MK |
74 | #define HANGCHECK_SCORE_RING_HUNG 31 |
75 | ||
f9e61372 BW |
76 | #define I915_MAX_SLICES 3 |
77 | #define I915_MAX_SUBSLICES 3 | |
78 | ||
79 | #define instdone_slice_mask(dev_priv__) \ | |
80 | (INTEL_GEN(dev_priv__) == 7 ? \ | |
81 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) | |
82 | ||
83 | #define instdone_subslice_mask(dev_priv__) \ | |
84 | (INTEL_GEN(dev_priv__) == 7 ? \ | |
85 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask) | |
86 | ||
87 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ | |
88 | for ((slice__) = 0, (subslice__) = 0; \ | |
89 | (slice__) < I915_MAX_SLICES; \ | |
90 | (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ | |
91 | (slice__) += ((subslice__) == 0)) \ | |
92 | for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ | |
93 | (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) | |
94 | ||
d636951e BW |
95 | struct intel_instdone { |
96 | u32 instdone; | |
97 | /* The following exist only in the RCS engine */ | |
98 | u32 slice_common; | |
f9e61372 BW |
99 | u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; |
100 | u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; | |
d636951e BW |
101 | }; |
102 | ||
7e37f889 | 103 | struct intel_engine_hangcheck { |
50877445 | 104 | u64 acthd; |
92cab734 | 105 | u32 seqno; |
05407ff8 | 106 | int score; |
7e37f889 | 107 | enum intel_engine_hangcheck_action action; |
4be17381 | 108 | int deadlock; |
d636951e | 109 | struct intel_instdone instdone; |
92cab734 MK |
110 | }; |
111 | ||
7e37f889 | 112 | struct intel_ring { |
0eb973d3 | 113 | struct i915_vma *vma; |
57e88531 | 114 | void *vaddr; |
8ee14975 | 115 | |
4a570db5 | 116 | struct intel_engine_cs *engine; |
0c7dd53b | 117 | |
675d9ad7 CW |
118 | struct list_head request_list; |
119 | ||
8ee14975 OM |
120 | u32 head; |
121 | u32 tail; | |
122 | int space; | |
123 | int size; | |
124 | int effective_size; | |
125 | ||
126 | /** We track the position of the requests in the ring buffer, and | |
127 | * when each is retired we increment last_retired_head as the GPU | |
128 | * must have finished processing the request and so we know we | |
129 | * can advance the ringbuffer up to that position. | |
130 | * | |
131 | * last_retired_head is set to -1 after the value is consumed so | |
132 | * we can detect new retirements. | |
133 | */ | |
134 | u32 last_retired_head; | |
135 | }; | |
136 | ||
e2efd130 | 137 | struct i915_gem_context; |
361b027b | 138 | struct drm_i915_reg_table; |
21076372 | 139 | |
17ee950d AS |
140 | /* |
141 | * we use a single page to load ctx workarounds so all of these | |
142 | * values are referred in terms of dwords | |
143 | * | |
144 | * struct i915_wa_ctx_bb: | |
145 | * offset: specifies batch starting position, also helpful in case | |
146 | * if we want to have multiple batches at different offsets based on | |
147 | * some criteria. It is not a requirement at the moment but provides | |
148 | * an option for future use. | |
149 | * size: size of the batch in DWORDS | |
150 | */ | |
48bb74e4 | 151 | struct i915_ctx_workarounds { |
17ee950d AS |
152 | struct i915_wa_ctx_bb { |
153 | u32 offset; | |
154 | u32 size; | |
155 | } indirect_ctx, per_ctx; | |
48bb74e4 | 156 | struct i915_vma *vma; |
17ee950d AS |
157 | }; |
158 | ||
c81d4613 CW |
159 | struct drm_i915_gem_request; |
160 | ||
c033666a CW |
161 | struct intel_engine_cs { |
162 | struct drm_i915_private *i915; | |
8187a2b7 | 163 | const char *name; |
117897f4 | 164 | enum intel_engine_id { |
de1add36 | 165 | RCS = 0, |
96154f2f | 166 | BCS, |
de1add36 TU |
167 | VCS, |
168 | VCS2, /* Keep instances of the same type engine together. */ | |
169 | VECS | |
9220434a | 170 | } id; |
666796da | 171 | #define I915_NUM_ENGINES 5 |
de1add36 | 172 | #define _VCS(n) (VCS + (n)) |
426960be | 173 | unsigned int exec_id; |
5ec2cf7e TU |
174 | enum intel_engine_hw_id { |
175 | RCS_HW = 0, | |
176 | VCS_HW, | |
177 | BCS_HW, | |
178 | VECS_HW, | |
179 | VCS2_HW | |
180 | } hw_id; | |
181 | enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */ | |
04769652 | 182 | u64 fence_context; |
333e9fe9 | 183 | u32 mmio_base; |
c2c7f240 | 184 | unsigned int irq_shift; |
7e37f889 | 185 | struct intel_ring *buffer; |
8187a2b7 | 186 | |
688e6c72 CW |
187 | /* Rather than have every client wait upon all user interrupts, |
188 | * with the herd waking after every interrupt and each doing the | |
189 | * heavyweight seqno dance, we delegate the task (of being the | |
190 | * bottom-half of the user interrupt) to the first client. After | |
191 | * every interrupt, we wake up one client, who does the heavyweight | |
192 | * coherent seqno read and either goes back to sleep (if incomplete), | |
193 | * or wakes up all the completed clients in parallel, before then | |
194 | * transferring the bottom-half status to the next client in the queue. | |
195 | * | |
196 | * Compared to walking the entire list of waiters in a single dedicated | |
197 | * bottom-half, we reduce the latency of the first waiter by avoiding | |
198 | * a context switch, but incur additional coherent seqno reads when | |
199 | * following the chain of request breadcrumbs. Since it is most likely | |
200 | * that we have a single client waiting on each seqno, then reducing | |
201 | * the overhead of waking that client is much preferred. | |
202 | */ | |
203 | struct intel_breadcrumbs { | |
dbd6ef29 | 204 | struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */ |
aca34b6e CW |
205 | bool irq_posted; |
206 | ||
688e6c72 CW |
207 | spinlock_t lock; /* protects the lists of requests */ |
208 | struct rb_root waiters; /* sorted by retirement, priority */ | |
c81d4613 | 209 | struct rb_root signals; /* sorted by retirement */ |
688e6c72 | 210 | struct intel_wait *first_wait; /* oldest waiter by retirement */ |
c81d4613 | 211 | struct task_struct *signaler; /* used for fence signalling */ |
b3850855 | 212 | struct drm_i915_gem_request *first_signal; |
688e6c72 | 213 | struct timer_list fake_irq; /* used after a missed interrupt */ |
83348ba8 CW |
214 | struct timer_list hangcheck; /* detect missed interrupts */ |
215 | ||
216 | unsigned long timeout; | |
aca34b6e CW |
217 | |
218 | bool irq_enabled : 1; | |
219 | bool rpm_wakelock : 1; | |
688e6c72 CW |
220 | } breadcrumbs; |
221 | ||
06fbca71 CW |
222 | /* |
223 | * A pool of objects to use as shadow copies of client batch buffers | |
224 | * when the command parser is enabled. Prevents the client from | |
225 | * modifying the batch contents after software parsing. | |
226 | */ | |
227 | struct i915_gem_batch_pool batch_pool; | |
228 | ||
8187a2b7 | 229 | struct intel_hw_status_page status_page; |
17ee950d | 230 | struct i915_ctx_workarounds wa_ctx; |
56c0f1a7 | 231 | struct i915_vma *scratch; |
8187a2b7 | 232 | |
61ff75ac CW |
233 | u32 irq_keep_mask; /* always keep these interrupts */ |
234 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | |
38a0f2db DG |
235 | void (*irq_enable)(struct intel_engine_cs *engine); |
236 | void (*irq_disable)(struct intel_engine_cs *engine); | |
8187a2b7 | 237 | |
38a0f2db | 238 | int (*init_hw)(struct intel_engine_cs *engine); |
821ed7df CW |
239 | void (*reset_hw)(struct intel_engine_cs *engine, |
240 | struct drm_i915_gem_request *req); | |
8187a2b7 | 241 | |
8753181e | 242 | int (*init_context)(struct drm_i915_gem_request *req); |
86d7f238 | 243 | |
ddd66c51 CW |
244 | int (*emit_flush)(struct drm_i915_gem_request *request, |
245 | u32 mode); | |
246 | #define EMIT_INVALIDATE BIT(0) | |
247 | #define EMIT_FLUSH BIT(1) | |
248 | #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) | |
249 | int (*emit_bb_start)(struct drm_i915_gem_request *req, | |
250 | u64 offset, u32 length, | |
251 | unsigned int dispatch_flags); | |
252 | #define I915_DISPATCH_SECURE BIT(0) | |
253 | #define I915_DISPATCH_PINNED BIT(1) | |
254 | #define I915_DISPATCH_RS BIT(2) | |
255 | int (*emit_request)(struct drm_i915_gem_request *req); | |
5590af3e CW |
256 | |
257 | /* Pass the request to the hardware queue (e.g. directly into | |
258 | * the legacy ringbuffer or to the end of an execlist). | |
259 | * | |
260 | * This is called from an atomic context with irqs disabled; must | |
261 | * be irq safe. | |
262 | */ | |
ddd66c51 | 263 | void (*submit_request)(struct drm_i915_gem_request *req); |
5590af3e | 264 | |
b2eadbc8 CW |
265 | /* Some chipsets are not quite as coherent as advertised and need |
266 | * an expensive kick to force a true read of the up-to-date seqno. | |
267 | * However, the up-to-date seqno is not always required and the last | |
268 | * seen value is good enough. Note that the seqno will always be | |
269 | * monotonic, even if not coherent. | |
270 | */ | |
38a0f2db | 271 | void (*irq_seqno_barrier)(struct intel_engine_cs *engine); |
38a0f2db | 272 | void (*cleanup)(struct intel_engine_cs *engine); |
ebc348b2 | 273 | |
3e78998a BW |
274 | /* GEN8 signal/wait table - never trust comments! |
275 | * signal to signal to signal to signal to signal to | |
276 | * RCS VCS BCS VECS VCS2 | |
277 | * -------------------------------------------------------------------- | |
278 | * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) | | |
279 | * |------------------------------------------------------------------- | |
280 | * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) | | |
281 | * |------------------------------------------------------------------- | |
282 | * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) | | |
283 | * |------------------------------------------------------------------- | |
284 | * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) | | |
285 | * |------------------------------------------------------------------- | |
286 | * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) | | |
287 | * |------------------------------------------------------------------- | |
288 | * | |
289 | * Generalization: | |
290 | * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id) | |
291 | * ie. transpose of g(x, y) | |
292 | * | |
293 | * sync from sync from sync from sync from sync from | |
294 | * RCS VCS BCS VECS VCS2 | |
295 | * -------------------------------------------------------------------- | |
296 | * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) | | |
297 | * |------------------------------------------------------------------- | |
298 | * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) | | |
299 | * |------------------------------------------------------------------- | |
300 | * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) | | |
301 | * |------------------------------------------------------------------- | |
302 | * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) | | |
303 | * |------------------------------------------------------------------- | |
304 | * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) | | |
305 | * |------------------------------------------------------------------- | |
306 | * | |
307 | * Generalization: | |
308 | * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id) | |
309 | * ie. transpose of f(x, y) | |
310 | */ | |
ebc348b2 | 311 | struct { |
666796da | 312 | u32 sync_seqno[I915_NUM_ENGINES-1]; |
78325f2d | 313 | |
3e78998a | 314 | union { |
318f89ca TU |
315 | #define GEN6_SEMAPHORE_LAST VECS_HW |
316 | #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1) | |
317 | #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0) | |
3e78998a BW |
318 | struct { |
319 | /* our mbox written by others */ | |
318f89ca | 320 | u32 wait[GEN6_NUM_SEMAPHORES]; |
3e78998a | 321 | /* mboxes this ring signals to */ |
318f89ca | 322 | i915_reg_t signal[GEN6_NUM_SEMAPHORES]; |
3e78998a | 323 | } mbox; |
666796da | 324 | u64 signal_ggtt[I915_NUM_ENGINES]; |
3e78998a | 325 | }; |
78325f2d BW |
326 | |
327 | /* AKA wait() */ | |
ad7bdb2b CW |
328 | int (*sync_to)(struct drm_i915_gem_request *req, |
329 | struct drm_i915_gem_request *signal); | |
330 | int (*signal)(struct drm_i915_gem_request *req); | |
ebc348b2 | 331 | } semaphore; |
ad776f8b | 332 | |
4da46e1e | 333 | /* Execlists */ |
27af5eea TU |
334 | struct tasklet_struct irq_tasklet; |
335 | spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ | |
70c2a24d CW |
336 | struct execlist_port { |
337 | struct drm_i915_gem_request *request; | |
338 | unsigned int count; | |
339 | } execlist_port[2]; | |
acdd884a | 340 | struct list_head execlist_queue; |
3756685a | 341 | unsigned int fw_domains; |
ca82580c | 342 | bool disable_lite_restore_wa; |
70c2a24d | 343 | bool preempt_wa; |
ca82580c | 344 | u32 ctx_desc_template; |
4da46e1e | 345 | |
8187a2b7 ZN |
346 | /** |
347 | * List of breadcrumbs associated with GPU requests currently | |
348 | * outstanding. | |
349 | */ | |
350 | struct list_head request_list; | |
351 | ||
94f7bbe1 TE |
352 | /** |
353 | * Seqno of request most recently submitted to request_list. | |
354 | * Used exclusively by hang checker to avoid grabbing lock while | |
355 | * inspecting request list. | |
356 | */ | |
357 | u32 last_submitted_seqno; | |
358 | ||
dcff85c8 CW |
359 | /* An RCU guarded pointer to the last request. No reference is |
360 | * held to the request, users must carefully acquire a reference to | |
1426f715 | 361 | * the request using i915_gem_active_get_rcu(), or hold the |
dcff85c8 CW |
362 | * struct_mutex. |
363 | */ | |
364 | struct i915_gem_active last_request; | |
365 | ||
e2efd130 | 366 | struct i915_gem_context *last_context; |
40521054 | 367 | |
7e37f889 | 368 | struct intel_engine_hangcheck hangcheck; |
92cab734 | 369 | |
44e895a8 BV |
370 | bool needs_cmd_parser; |
371 | ||
351e3db2 | 372 | /* |
44e895a8 | 373 | * Table of commands the command parser needs to know about |
33a051a5 | 374 | * for this engine. |
351e3db2 | 375 | */ |
44e895a8 | 376 | DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); |
351e3db2 BV |
377 | |
378 | /* | |
379 | * Table of registers allowed in commands that read/write registers. | |
380 | */ | |
361b027b JJ |
381 | const struct drm_i915_reg_table *reg_tables; |
382 | int reg_table_count; | |
351e3db2 BV |
383 | |
384 | /* | |
385 | * Returns the bitmask for the length field of the specified command. | |
386 | * Return 0 for an unrecognized/invalid command. | |
387 | * | |
33a051a5 | 388 | * If the command parser finds an entry for a command in the engine's |
351e3db2 | 389 | * cmd_tables, it gets the command's length based on the table entry. |
33a051a5 CW |
390 | * If not, it calls this function to determine the per-engine length |
391 | * field encoding for the command (i.e. different opcode ranges use | |
392 | * certain bits to encode the command length in the header). | |
351e3db2 BV |
393 | */ |
394 | u32 (*get_cmd_length_mask)(u32 cmd_header); | |
8187a2b7 ZN |
395 | }; |
396 | ||
b0366a54 | 397 | static inline bool |
67d97da3 | 398 | intel_engine_initialized(const struct intel_engine_cs *engine) |
b0366a54 | 399 | { |
c033666a | 400 | return engine->i915 != NULL; |
b0366a54 | 401 | } |
b4519513 | 402 | |
96154f2f | 403 | static inline unsigned |
67d97da3 | 404 | intel_engine_flag(const struct intel_engine_cs *engine) |
96154f2f | 405 | { |
0bc40be8 | 406 | return 1 << engine->id; |
96154f2f DV |
407 | } |
408 | ||
1ec14ad3 | 409 | static inline u32 |
7e37f889 CW |
410 | intel_engine_sync_index(struct intel_engine_cs *engine, |
411 | struct intel_engine_cs *other) | |
1ec14ad3 CW |
412 | { |
413 | int idx; | |
414 | ||
415 | /* | |
ddd4dbc6 RV |
416 | * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2; |
417 | * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs; | |
418 | * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs; | |
419 | * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs; | |
420 | * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; | |
1ec14ad3 CW |
421 | */ |
422 | ||
0bc40be8 | 423 | idx = (other - engine) - 1; |
1ec14ad3 | 424 | if (idx < 0) |
666796da | 425 | idx += I915_NUM_ENGINES; |
1ec14ad3 CW |
426 | |
427 | return idx; | |
428 | } | |
429 | ||
319404df | 430 | static inline void |
0bc40be8 | 431 | intel_flush_status_page(struct intel_engine_cs *engine, int reg) |
319404df | 432 | { |
0d317ce9 CW |
433 | mb(); |
434 | clflush(&engine->status_page.page_addr[reg]); | |
435 | mb(); | |
319404df ID |
436 | } |
437 | ||
8187a2b7 | 438 | static inline u32 |
5dd8e50c | 439 | intel_read_status_page(struct intel_engine_cs *engine, int reg) |
8187a2b7 | 440 | { |
4225d0f2 | 441 | /* Ensure that the compiler doesn't optimize away the load. */ |
5dd8e50c | 442 | return READ_ONCE(engine->status_page.page_addr[reg]); |
8187a2b7 ZN |
443 | } |
444 | ||
b70ec5bf | 445 | static inline void |
0bc40be8 | 446 | intel_write_status_page(struct intel_engine_cs *engine, |
b70ec5bf MK |
447 | int reg, u32 value) |
448 | { | |
0bc40be8 | 449 | engine->status_page.page_addr[reg] = value; |
b70ec5bf MK |
450 | } |
451 | ||
e2828914 | 452 | /* |
311bd68e CW |
453 | * Reads a dword out of the status page, which is written to from the command |
454 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or | |
455 | * MI_STORE_DATA_IMM. | |
456 | * | |
457 | * The following dwords have a reserved meaning: | |
458 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. | |
459 | * 0x04: ring 0 head pointer | |
460 | * 0x05: ring 1 head pointer (915-class) | |
461 | * 0x06: ring 2 head pointer (915-class) | |
462 | * 0x10-0x1b: Context status DWords (GM45) | |
463 | * 0x1f: Last written status offset. (GM45) | |
b07da53c | 464 | * 0x20-0x2f: Reserved (Gen6+) |
311bd68e | 465 | * |
b07da53c | 466 | * The area from dword 0x30 to 0x3ff is available for driver usage. |
311bd68e | 467 | */ |
b07da53c | 468 | #define I915_GEM_HWS_INDEX 0x30 |
7c17d377 | 469 | #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
b07da53c | 470 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
9a289771 | 471 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
311bd68e | 472 | |
7e37f889 CW |
473 | struct intel_ring * |
474 | intel_engine_create_ring(struct intel_engine_cs *engine, int size); | |
aad29fbb CW |
475 | int intel_ring_pin(struct intel_ring *ring); |
476 | void intel_ring_unpin(struct intel_ring *ring); | |
7e37f889 | 477 | void intel_ring_free(struct intel_ring *ring); |
84c2377f | 478 | |
7e37f889 CW |
479 | void intel_engine_stop(struct intel_engine_cs *engine); |
480 | void intel_engine_cleanup(struct intel_engine_cs *engine); | |
96f298aa | 481 | |
821ed7df CW |
482 | void intel_legacy_submission_resume(struct drm_i915_private *dev_priv); |
483 | ||
6689cb2b JH |
484 | int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); |
485 | ||
5fb9de1a | 486 | int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); |
bba09b12 | 487 | int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); |
406ea8d2 | 488 | |
7e37f889 | 489 | static inline void intel_ring_emit(struct intel_ring *ring, u32 data) |
406ea8d2 | 490 | { |
b5321f30 CW |
491 | *(uint32_t *)(ring->vaddr + ring->tail) = data; |
492 | ring->tail += 4; | |
406ea8d2 CW |
493 | } |
494 | ||
7e37f889 | 495 | static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg) |
f92a9162 | 496 | { |
b5321f30 | 497 | intel_ring_emit(ring, i915_mmio_reg_offset(reg)); |
f92a9162 | 498 | } |
406ea8d2 | 499 | |
7e37f889 | 500 | static inline void intel_ring_advance(struct intel_ring *ring) |
09246732 | 501 | { |
8f942018 CW |
502 | /* Dummy function. |
503 | * | |
504 | * This serves as a placeholder in the code so that the reader | |
505 | * can compare against the preceding intel_ring_begin() and | |
506 | * check that the number of dwords emitted matches the space | |
507 | * reserved for the command packet (i.e. the value passed to | |
508 | * intel_ring_begin()). | |
c5efa1ad | 509 | */ |
8f942018 CW |
510 | } |
511 | ||
512 | static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value) | |
513 | { | |
514 | /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ | |
515 | return value & (ring->size - 1); | |
09246732 | 516 | } |
406ea8d2 | 517 | |
82e104cc | 518 | int __intel_ring_space(int head, int tail, int size); |
32c04f16 | 519 | void intel_ring_update_space(struct intel_ring *ring); |
09246732 | 520 | |
7e37f889 | 521 | void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); |
821ed7df | 522 | void intel_engine_reset_irq(struct intel_engine_cs *engine); |
8187a2b7 | 523 | |
019bf277 TU |
524 | void intel_engine_setup_common(struct intel_engine_cs *engine); |
525 | int intel_engine_init_common(struct intel_engine_cs *engine); | |
adc320c4 | 526 | int intel_engine_create_scratch(struct intel_engine_cs *engine, int size); |
96a945aa | 527 | void intel_engine_cleanup_common(struct intel_engine_cs *engine); |
019bf277 | 528 | |
dcff85c8 | 529 | static inline int intel_engine_idle(struct intel_engine_cs *engine, |
ea746f36 | 530 | unsigned int flags) |
dcff85c8 CW |
531 | { |
532 | /* Wait upon the last request to be completed */ | |
533 | return i915_gem_active_wait_unlocked(&engine->last_request, | |
ea746f36 | 534 | flags, NULL, NULL); |
dcff85c8 CW |
535 | } |
536 | ||
8b3e2d36 TU |
537 | int intel_init_render_ring_buffer(struct intel_engine_cs *engine); |
538 | int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine); | |
539 | int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine); | |
540 | int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); | |
541 | int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); | |
8187a2b7 | 542 | |
7e37f889 | 543 | u64 intel_engine_get_active_head(struct intel_engine_cs *engine); |
1b7744e7 CW |
544 | static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) |
545 | { | |
546 | return intel_read_status_page(engine, I915_GEM_HWS_INDEX); | |
547 | } | |
79f321b7 | 548 | |
0bc40be8 | 549 | int init_workarounds_ring(struct intel_engine_cs *engine); |
771b9a53 | 550 | |
29b1b415 JH |
551 | /* |
552 | * Arbitrary size for largest possible 'add request' sequence. The code paths | |
553 | * are complex and variable. Empirical measurement shows that the worst case | |
596e5efc CW |
554 | * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However, |
555 | * we need to allocate double the largest single packet within that emission | |
556 | * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW). | |
29b1b415 | 557 | */ |
596e5efc | 558 | #define MIN_SPACE_FOR_ADD_REQUEST 336 |
29b1b415 | 559 | |
a58c01aa CW |
560 | static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) |
561 | { | |
57e88531 | 562 | return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR; |
a58c01aa CW |
563 | } |
564 | ||
688e6c72 | 565 | /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */ |
688e6c72 CW |
566 | int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); |
567 | ||
568 | static inline void intel_wait_init(struct intel_wait *wait, u32 seqno) | |
569 | { | |
570 | wait->tsk = current; | |
571 | wait->seqno = seqno; | |
572 | } | |
573 | ||
574 | static inline bool intel_wait_complete(const struct intel_wait *wait) | |
575 | { | |
576 | return RB_EMPTY_NODE(&wait->node); | |
577 | } | |
578 | ||
579 | bool intel_engine_add_wait(struct intel_engine_cs *engine, | |
580 | struct intel_wait *wait); | |
581 | void intel_engine_remove_wait(struct intel_engine_cs *engine, | |
582 | struct intel_wait *wait); | |
b3850855 | 583 | void intel_engine_enable_signaling(struct drm_i915_gem_request *request); |
688e6c72 | 584 | |
dbd6ef29 | 585 | static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine) |
688e6c72 | 586 | { |
dbd6ef29 | 587 | return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh); |
688e6c72 CW |
588 | } |
589 | ||
dbd6ef29 | 590 | static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine) |
688e6c72 CW |
591 | { |
592 | bool wakeup = false; | |
dbd6ef29 | 593 | |
688e6c72 | 594 | /* Note that for this not to dangerously chase a dangling pointer, |
dbd6ef29 | 595 | * we must hold the rcu_read_lock here. |
688e6c72 CW |
596 | * |
597 | * Also note that tsk is likely to be in !TASK_RUNNING state so an | |
598 | * early test for tsk->state != TASK_RUNNING before wake_up_process() | |
599 | * is unlikely to be beneficial. | |
600 | */ | |
dbd6ef29 CW |
601 | if (intel_engine_has_waiter(engine)) { |
602 | struct task_struct *tsk; | |
603 | ||
604 | rcu_read_lock(); | |
605 | tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh); | |
606 | if (tsk) | |
607 | wakeup = wake_up_process(tsk); | |
608 | rcu_read_unlock(); | |
609 | } | |
610 | ||
688e6c72 CW |
611 | return wakeup; |
612 | } | |
613 | ||
688e6c72 CW |
614 | void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); |
615 | unsigned int intel_kick_waiters(struct drm_i915_private *i915); | |
c81d4613 | 616 | unsigned int intel_kick_signalers(struct drm_i915_private *i915); |
688e6c72 | 617 | |
dcff85c8 CW |
618 | static inline bool intel_engine_is_active(struct intel_engine_cs *engine) |
619 | { | |
620 | return i915_gem_active_isset(&engine->last_request); | |
621 | } | |
622 | ||
8187a2b7 | 623 | #endif /* _INTEL_RINGBUFFER_H_ */ |