]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/gt/gen8_engine_cs.c
KVM: x86/mmu: Remove unnecessary ‘NULL’ values from sptep
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / gt / gen8_engine_cs.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2014 Intel Corporation
4 */
5
6 #include "gen8_engine_cs.h"
7 #include "i915_drv.h"
8 #include "intel_engine_regs.h"
9 #include "intel_gpu_commands.h"
10 #include "intel_lrc.h"
11 #include "intel_ring.h"
12
13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
14 {
15 bool vf_flush_wa = false, dc_flush_wa = false;
16 u32 *cs, flags = 0;
17 int len;
18
19 flags |= PIPE_CONTROL_CS_STALL;
20
21 if (mode & EMIT_FLUSH) {
22 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
23 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
24 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
25 flags |= PIPE_CONTROL_FLUSH_ENABLE;
26 }
27
28 if (mode & EMIT_INVALIDATE) {
29 flags |= PIPE_CONTROL_TLB_INVALIDATE;
30 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
31 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
32 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
33 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
34 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
35 flags |= PIPE_CONTROL_QW_WRITE;
36 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
37
38 /*
39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
40 * pipe control.
41 */
42 if (GRAPHICS_VER(rq->i915) == 9)
43 vf_flush_wa = true;
44
45 /* WaForGAMHang:kbl */
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
47 dc_flush_wa = true;
48 }
49
50 len = 6;
51
52 if (vf_flush_wa)
53 len += 6;
54
55 if (dc_flush_wa)
56 len += 12;
57
58 cs = intel_ring_begin(rq, len);
59 if (IS_ERR(cs))
60 return PTR_ERR(cs);
61
62 if (vf_flush_wa)
63 cs = gen8_emit_pipe_control(cs, 0, 0);
64
65 if (dc_flush_wa)
66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
67 0);
68
69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
70
71 if (dc_flush_wa)
72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
73
74 intel_ring_advance(rq, cs);
75
76 return 0;
77 }
78
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
80 {
81 u32 cmd, *cs;
82
83 cs = intel_ring_begin(rq, 4);
84 if (IS_ERR(cs))
85 return PTR_ERR(cs);
86
87 cmd = MI_FLUSH_DW + 1;
88
89 /*
90 * We always require a command barrier so that subsequent
91 * commands, such as breadcrumb interrupts, are strictly ordered
92 * wrt the contents of the write cache being flushed to memory
93 * (and thus being coherent from the CPU).
94 */
95 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
96
97 if (mode & EMIT_INVALIDATE) {
98 cmd |= MI_INVALIDATE_TLB;
99 if (rq->engine->class == VIDEO_DECODE_CLASS)
100 cmd |= MI_INVALIDATE_BSD;
101 }
102
103 *cs++ = cmd;
104 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
105 *cs++ = 0; /* upper addr */
106 *cs++ = 0; /* value */
107 intel_ring_advance(rq, cs);
108
109 return 0;
110 }
111
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
113 {
114 if (mode & EMIT_FLUSH) {
115 u32 *cs;
116 u32 flags = 0;
117
118 flags |= PIPE_CONTROL_CS_STALL;
119
120 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
121 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
122 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
123 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
124 flags |= PIPE_CONTROL_FLUSH_ENABLE;
125 flags |= PIPE_CONTROL_QW_WRITE;
126 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
127
128 cs = intel_ring_begin(rq, 6);
129 if (IS_ERR(cs))
130 return PTR_ERR(cs);
131
132 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
133 intel_ring_advance(rq, cs);
134 }
135
136 if (mode & EMIT_INVALIDATE) {
137 u32 *cs;
138 u32 flags = 0;
139
140 flags |= PIPE_CONTROL_CS_STALL;
141
142 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
143 flags |= PIPE_CONTROL_TLB_INVALIDATE;
144 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
145 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
146 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
147 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
148 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
149 flags |= PIPE_CONTROL_QW_WRITE;
150 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
151
152 cs = intel_ring_begin(rq, 6);
153 if (IS_ERR(cs))
154 return PTR_ERR(cs);
155
156 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
157 intel_ring_advance(rq, cs);
158 }
159
160 return 0;
161 }
162
163 static u32 preparser_disable(bool state)
164 {
165 return MI_ARB_CHECK | 1 << 8 | state;
166 }
167
168 static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
169 {
170 switch (engine->id) {
171 case RCS0:
172 return GEN12_CCS_AUX_INV;
173 case BCS0:
174 return GEN12_BCS0_AUX_INV;
175 case VCS0:
176 return GEN12_VD0_AUX_INV;
177 case VCS2:
178 return GEN12_VD2_AUX_INV;
179 case VECS0:
180 return GEN12_VE0_AUX_INV;
181 case CCS0:
182 return GEN12_CCS0_AUX_INV;
183 default:
184 return INVALID_MMIO_REG;
185 }
186 }
187
188 static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
189 {
190 i915_reg_t reg = gen12_get_aux_inv_reg(engine);
191
192 if (IS_PONTEVECCHIO(engine->i915))
193 return false;
194
195 /*
196 * So far platforms supported by i915 having flat ccs do not require
197 * AUX invalidation. Check also whether the engine requires it.
198 */
199 return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
200 }
201
202 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
203 {
204 i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
205 u32 gsi_offset = engine->gt->uncore->gsi_offset;
206
207 if (!gen12_needs_ccs_aux_inv(engine))
208 return cs;
209
210 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
211 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
212 *cs++ = AUX_INV;
213
214 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
215 MI_SEMAPHORE_REGISTER_POLL |
216 MI_SEMAPHORE_POLL |
217 MI_SEMAPHORE_SAD_EQ_SDD;
218 *cs++ = 0;
219 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
220 *cs++ = 0;
221 *cs++ = 0;
222
223 return cs;
224 }
225
226 static int mtl_dummy_pipe_control(struct i915_request *rq)
227 {
228 /* Wa_14016712196 */
229 if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
230 IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
231 u32 *cs;
232
233 /* dummy PIPE_CONTROL + depth flush */
234 cs = intel_ring_begin(rq, 6);
235 if (IS_ERR(cs))
236 return PTR_ERR(cs);
237 cs = gen12_emit_pipe_control(cs,
238 0,
239 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
240 LRC_PPHWSP_SCRATCH_ADDR);
241 intel_ring_advance(rq, cs);
242 }
243
244 return 0;
245 }
246
247 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
248 {
249 struct intel_engine_cs *engine = rq->engine;
250
251 /*
252 * On Aux CCS platforms the invalidation of the Aux
253 * table requires quiescing memory traffic beforehand
254 */
255 if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
256 u32 bit_group_0 = 0;
257 u32 bit_group_1 = 0;
258 int err;
259 u32 *cs;
260
261 err = mtl_dummy_pipe_control(rq);
262 if (err)
263 return err;
264
265 bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
266
267 /*
268 * When required, in MTL and beyond platforms we
269 * need to set the CCS_FLUSH bit in the pipe control
270 */
271 if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
272 bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
273
274 bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
275 bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
276 bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
277 bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
278 /* Wa_1409600907:tgl,adl-p */
279 bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
280 bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
281 bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
282
283 bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
284 bit_group_1 |= PIPE_CONTROL_QW_WRITE;
285
286 bit_group_1 |= PIPE_CONTROL_CS_STALL;
287
288 if (!HAS_3D_PIPELINE(engine->i915))
289 bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
290 else if (engine->class == COMPUTE_CLASS)
291 bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
292
293 cs = intel_ring_begin(rq, 6);
294 if (IS_ERR(cs))
295 return PTR_ERR(cs);
296
297 cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
298 LRC_PPHWSP_SCRATCH_ADDR);
299 intel_ring_advance(rq, cs);
300 }
301
302 if (mode & EMIT_INVALIDATE) {
303 u32 flags = 0;
304 u32 *cs, count;
305 int err;
306
307 err = mtl_dummy_pipe_control(rq);
308 if (err)
309 return err;
310
311 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
312 flags |= PIPE_CONTROL_TLB_INVALIDATE;
313 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
318
319 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
320 flags |= PIPE_CONTROL_QW_WRITE;
321
322 flags |= PIPE_CONTROL_CS_STALL;
323
324 if (!HAS_3D_PIPELINE(engine->i915))
325 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
326 else if (engine->class == COMPUTE_CLASS)
327 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
328
329 count = 8;
330 if (gen12_needs_ccs_aux_inv(rq->engine))
331 count += 8;
332
333 cs = intel_ring_begin(rq, count);
334 if (IS_ERR(cs))
335 return PTR_ERR(cs);
336
337 /*
338 * Prevent the pre-parser from skipping past the TLB
339 * invalidate and loading a stale page for the batch
340 * buffer / request payload.
341 */
342 *cs++ = preparser_disable(true);
343
344 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
345
346 cs = gen12_emit_aux_table_inv(engine, cs);
347
348 *cs++ = preparser_disable(false);
349 intel_ring_advance(rq, cs);
350 }
351
352 return 0;
353 }
354
355 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
356 {
357 u32 cmd = 4;
358 u32 *cs;
359
360 if (mode & EMIT_INVALIDATE) {
361 cmd += 2;
362
363 if (gen12_needs_ccs_aux_inv(rq->engine))
364 cmd += 8;
365 }
366
367 cs = intel_ring_begin(rq, cmd);
368 if (IS_ERR(cs))
369 return PTR_ERR(cs);
370
371 if (mode & EMIT_INVALIDATE)
372 *cs++ = preparser_disable(true);
373
374 cmd = MI_FLUSH_DW + 1;
375
376 /*
377 * We always require a command barrier so that subsequent
378 * commands, such as breadcrumb interrupts, are strictly ordered
379 * wrt the contents of the write cache being flushed to memory
380 * (and thus being coherent from the CPU).
381 */
382 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
383
384 if (mode & EMIT_INVALIDATE) {
385 cmd |= MI_INVALIDATE_TLB;
386 if (rq->engine->class == VIDEO_DECODE_CLASS)
387 cmd |= MI_INVALIDATE_BSD;
388
389 if (gen12_needs_ccs_aux_inv(rq->engine) &&
390 rq->engine->class == COPY_ENGINE_CLASS)
391 cmd |= MI_FLUSH_DW_CCS;
392 }
393
394 *cs++ = cmd;
395 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
396 *cs++ = 0; /* upper addr */
397 *cs++ = 0; /* value */
398
399 cs = gen12_emit_aux_table_inv(rq->engine, cs);
400
401 if (mode & EMIT_INVALIDATE)
402 *cs++ = preparser_disable(false);
403
404 intel_ring_advance(rq, cs);
405
406 return 0;
407 }
408
409 static u32 preempt_address(struct intel_engine_cs *engine)
410 {
411 return (i915_ggtt_offset(engine->status_page.vma) +
412 I915_GEM_HWS_PREEMPT_ADDR);
413 }
414
415 static u32 hwsp_offset(const struct i915_request *rq)
416 {
417 const struct intel_timeline *tl;
418
419 /* Before the request is executed, the timeline is fixed */
420 tl = rcu_dereference_protected(rq->timeline,
421 !i915_request_signaled(rq));
422
423 /* See the comment in i915_request_active_seqno(). */
424 return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
425 }
426
427 int gen8_emit_init_breadcrumb(struct i915_request *rq)
428 {
429 u32 *cs;
430
431 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
432 if (!i915_request_timeline(rq)->has_initial_breadcrumb)
433 return 0;
434
435 cs = intel_ring_begin(rq, 6);
436 if (IS_ERR(cs))
437 return PTR_ERR(cs);
438
439 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
440 *cs++ = hwsp_offset(rq);
441 *cs++ = 0;
442 *cs++ = rq->fence.seqno - 1;
443
444 /*
445 * Check if we have been preempted before we even get started.
446 *
447 * After this point i915_request_started() reports true, even if
448 * we get preempted and so are no longer running.
449 *
450 * i915_request_started() is used during preemption processing
451 * to decide if the request is currently inside the user payload
452 * or spinning on a kernel semaphore (or earlier). For no-preemption
453 * requests, we do allow preemption on the semaphore before the user
454 * payload, but do not allow preemption once the request is started.
455 *
456 * i915_request_started() is similarly used during GPU hangs to
457 * determine if the user's payload was guilty, and if so, the
458 * request is banned. Before the request is started, it is assumed
459 * to be unharmed and an innocent victim of another's hang.
460 */
461 *cs++ = MI_NOOP;
462 *cs++ = MI_ARB_CHECK;
463
464 intel_ring_advance(rq, cs);
465
466 /* Record the updated position of the request's payload */
467 rq->infix = intel_ring_offset(rq, cs);
468
469 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
470
471 return 0;
472 }
473
474 static int __xehp_emit_bb_start(struct i915_request *rq,
475 u64 offset, u32 len,
476 const unsigned int flags,
477 u32 arb)
478 {
479 struct intel_context *ce = rq->context;
480 u32 wa_offset = lrc_indirect_bb(ce);
481 u32 *cs;
482
483 GEM_BUG_ON(!ce->wa_bb_page);
484
485 cs = intel_ring_begin(rq, 12);
486 if (IS_ERR(cs))
487 return PTR_ERR(cs);
488
489 *cs++ = MI_ARB_ON_OFF | arb;
490
491 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
492 MI_SRM_LRM_GLOBAL_GTT |
493 MI_LRI_LRM_CS_MMIO;
494 *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
495 *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
496 *cs++ = 0;
497
498 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
499 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
500 *cs++ = lower_32_bits(offset);
501 *cs++ = upper_32_bits(offset);
502
503 /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
504 *cs++ = MI_BATCH_BUFFER_START_GEN8;
505 *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
506 *cs++ = 0;
507
508 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
509
510 intel_ring_advance(rq, cs);
511
512 return 0;
513 }
514
515 int xehp_emit_bb_start_noarb(struct i915_request *rq,
516 u64 offset, u32 len,
517 const unsigned int flags)
518 {
519 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
520 }
521
522 int xehp_emit_bb_start(struct i915_request *rq,
523 u64 offset, u32 len,
524 const unsigned int flags)
525 {
526 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
527 }
528
529 int gen8_emit_bb_start_noarb(struct i915_request *rq,
530 u64 offset, u32 len,
531 const unsigned int flags)
532 {
533 u32 *cs;
534
535 cs = intel_ring_begin(rq, 4);
536 if (IS_ERR(cs))
537 return PTR_ERR(cs);
538
539 /*
540 * WaDisableCtxRestoreArbitration:bdw,chv
541 *
542 * We don't need to perform MI_ARB_ENABLE as often as we do (in
543 * particular all the gen that do not need the w/a at all!), if we
544 * took care to make sure that on every switch into this context
545 * (both ordinary and for preemption) that arbitrartion was enabled
546 * we would be fine. However, for gen8 there is another w/a that
547 * requires us to not preempt inside GPGPU execution, so we keep
548 * arbitration disabled for gen8 batches. Arbitration will be
549 * re-enabled before we close the request
550 * (engine->emit_fini_breadcrumb).
551 */
552 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
553
554 /* FIXME(BDW+): Address space and security selectors. */
555 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
556 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
557 *cs++ = lower_32_bits(offset);
558 *cs++ = upper_32_bits(offset);
559
560 intel_ring_advance(rq, cs);
561
562 return 0;
563 }
564
565 int gen8_emit_bb_start(struct i915_request *rq,
566 u64 offset, u32 len,
567 const unsigned int flags)
568 {
569 u32 *cs;
570
571 if (unlikely(i915_request_has_nopreempt(rq)))
572 return gen8_emit_bb_start_noarb(rq, offset, len, flags);
573
574 cs = intel_ring_begin(rq, 6);
575 if (IS_ERR(cs))
576 return PTR_ERR(cs);
577
578 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
579
580 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
581 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
582 *cs++ = lower_32_bits(offset);
583 *cs++ = upper_32_bits(offset);
584
585 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
586 *cs++ = MI_NOOP;
587
588 intel_ring_advance(rq, cs);
589
590 return 0;
591 }
592
593 static void assert_request_valid(struct i915_request *rq)
594 {
595 struct intel_ring *ring __maybe_unused = rq->ring;
596
597 /* Can we unwind this request without appearing to go forwards? */
598 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
599 }
600
601 /*
602 * Reserve space for 2 NOOPs at the end of each request to be
603 * used as a workaround for not being allowed to do lite
604 * restore with HEAD==TAIL (WaIdleLiteRestore).
605 */
606 static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
607 {
608 /* Ensure there's always at least one preemption point per-request. */
609 *cs++ = MI_ARB_CHECK;
610 *cs++ = MI_NOOP;
611 rq->wa_tail = intel_ring_offset(rq, cs);
612
613 /* Check that entire request is less than half the ring */
614 assert_request_valid(rq);
615
616 return cs;
617 }
618
619 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
620 {
621 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
622 *cs++ = MI_SEMAPHORE_WAIT |
623 MI_SEMAPHORE_GLOBAL_GTT |
624 MI_SEMAPHORE_POLL |
625 MI_SEMAPHORE_SAD_EQ_SDD;
626 *cs++ = 0;
627 *cs++ = preempt_address(rq->engine);
628 *cs++ = 0;
629 *cs++ = MI_NOOP;
630
631 return cs;
632 }
633
634 static __always_inline u32*
635 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
636 {
637 *cs++ = MI_USER_INTERRUPT;
638
639 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
640 if (intel_engine_has_semaphores(rq->engine) &&
641 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
642 cs = emit_preempt_busywait(rq, cs);
643
644 rq->tail = intel_ring_offset(rq, cs);
645 assert_ring_tail_valid(rq->ring, rq->tail);
646
647 return gen8_emit_wa_tail(rq, cs);
648 }
649
650 static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
651 {
652 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
653 }
654
655 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
656 {
657 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
658 }
659
660 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
661 {
662 cs = gen8_emit_pipe_control(cs,
663 PIPE_CONTROL_CS_STALL |
664 PIPE_CONTROL_TLB_INVALIDATE |
665 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
666 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
667 PIPE_CONTROL_DC_FLUSH_ENABLE,
668 0);
669
670 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
671 cs = gen8_emit_ggtt_write_rcs(cs,
672 rq->fence.seqno,
673 hwsp_offset(rq),
674 PIPE_CONTROL_FLUSH_ENABLE |
675 PIPE_CONTROL_CS_STALL);
676
677 return gen8_emit_fini_breadcrumb_tail(rq, cs);
678 }
679
680 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
681 {
682 cs = gen8_emit_pipe_control(cs,
683 PIPE_CONTROL_CS_STALL |
684 PIPE_CONTROL_TLB_INVALIDATE |
685 PIPE_CONTROL_TILE_CACHE_FLUSH |
686 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
687 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
688 PIPE_CONTROL_DC_FLUSH_ENABLE,
689 0);
690
691 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
692 cs = gen8_emit_ggtt_write_rcs(cs,
693 rq->fence.seqno,
694 hwsp_offset(rq),
695 PIPE_CONTROL_FLUSH_ENABLE |
696 PIPE_CONTROL_CS_STALL);
697
698 return gen8_emit_fini_breadcrumb_tail(rq, cs);
699 }
700
701 /*
702 * Note that the CS instruction pre-parser will not stall on the breadcrumb
703 * flush and will continue pre-fetching the instructions after it before the
704 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
705 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
706 * of the next request before the memory has been flushed, we're guaranteed that
707 * we won't access the batch itself too early.
708 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
709 * so, if the current request is modifying an instruction in the next request on
710 * the same intel_context, we might pre-fetch and then execute the pre-update
711 * instruction. To avoid this, the users of self-modifying code should either
712 * disable the parser around the code emitting the memory writes, via a new flag
713 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
714 * the in-kernel use-cases we've opted to use a separate context, see
715 * reloc_gpu() as an example.
716 * All the above applies only to the instructions themselves. Non-inline data
717 * used by the instructions is not pre-fetched.
718 */
719
720 static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
721 {
722 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
723 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
724 MI_SEMAPHORE_GLOBAL_GTT |
725 MI_SEMAPHORE_POLL |
726 MI_SEMAPHORE_SAD_EQ_SDD;
727 *cs++ = 0;
728 *cs++ = preempt_address(rq->engine);
729 *cs++ = 0;
730 *cs++ = 0;
731
732 return cs;
733 }
734
735 /* Wa_14014475959:dg2 */
736 #define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
737 static u32 ccs_semaphore_offset(struct i915_request *rq)
738 {
739 return i915_ggtt_offset(rq->context->state) +
740 (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
741 }
742
743 /* Wa_14014475959:dg2 */
744 static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
745 {
746 int i;
747
748 *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
749 MI_ATOMIC_MOVE;
750 *cs++ = ccs_semaphore_offset(rq);
751 *cs++ = 0;
752 *cs++ = 1;
753
754 /*
755 * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
756 * to align. 4 DWs above + 8 filler DWs here.
757 */
758 for (i = 0; i < 8; ++i)
759 *cs++ = 0;
760
761 *cs++ = MI_SEMAPHORE_WAIT |
762 MI_SEMAPHORE_GLOBAL_GTT |
763 MI_SEMAPHORE_POLL |
764 MI_SEMAPHORE_SAD_EQ_SDD;
765 *cs++ = 0;
766 *cs++ = ccs_semaphore_offset(rq);
767 *cs++ = 0;
768
769 return cs;
770 }
771
772 static __always_inline u32*
773 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
774 {
775 *cs++ = MI_USER_INTERRUPT;
776
777 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
778 if (intel_engine_has_semaphores(rq->engine) &&
779 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
780 cs = gen12_emit_preempt_busywait(rq, cs);
781
782 /* Wa_14014475959:dg2 */
783 if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
784 cs = ccs_emit_wa_busywait(rq, cs);
785
786 rq->tail = intel_ring_offset(rq, cs);
787 assert_ring_tail_valid(rq->ring, rq->tail);
788
789 return gen8_emit_wa_tail(rq, cs);
790 }
791
792 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
793 {
794 /* XXX Stalling flush before seqno write; post-sync not */
795 cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
796 return gen12_emit_fini_breadcrumb_tail(rq, cs);
797 }
798
799 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
800 {
801 struct drm_i915_private *i915 = rq->i915;
802 u32 flags = (PIPE_CONTROL_CS_STALL |
803 PIPE_CONTROL_TLB_INVALIDATE |
804 PIPE_CONTROL_TILE_CACHE_FLUSH |
805 PIPE_CONTROL_FLUSH_L3 |
806 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
807 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
808 PIPE_CONTROL_DC_FLUSH_ENABLE |
809 PIPE_CONTROL_FLUSH_ENABLE);
810
811 /* Wa_14016712196 */
812 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
813 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
814 /* dummy PIPE_CONTROL + depth flush */
815 cs = gen12_emit_pipe_control(cs, 0,
816 PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
817
818 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
819 /* Wa_1409600907 */
820 flags |= PIPE_CONTROL_DEPTH_STALL;
821
822 if (!HAS_3D_PIPELINE(rq->i915))
823 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
824 else if (rq->engine->class == COMPUTE_CLASS)
825 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
826
827 cs = gen12_emit_pipe_control(cs, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0);
828
829 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
830 cs = gen12_emit_ggtt_write_rcs(cs,
831 rq->fence.seqno,
832 hwsp_offset(rq),
833 0,
834 PIPE_CONTROL_FLUSH_ENABLE |
835 PIPE_CONTROL_CS_STALL);
836
837 return gen12_emit_fini_breadcrumb_tail(rq, cs);
838 }