2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
32 #include <drm/i915_drm.h>
35 #include "i915_gem_render_state.h"
36 #include "i915_reset.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static inline u32
intel_hws_seqno_address(struct intel_engine_cs
*engine
)
48 return (i915_ggtt_offset(engine
->status_page
.vma
) +
49 I915_GEM_HWS_INDEX_ADDR
);
52 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
56 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
63 gen2_render_ring_flush(struct i915_request
*rq
, u32 mode
)
65 unsigned int num_store_dw
;
70 if (mode
& EMIT_INVALIDATE
)
72 if (mode
& EMIT_FLUSH
)
75 cs
= intel_ring_begin(rq
, 2 + 3 * num_store_dw
);
80 while (num_store_dw
--) {
81 *cs
++ = MI_STORE_DWORD_IMM
| MI_MEM_VIRTUAL
;
82 *cs
++ = i915_scratch_offset(rq
->i915
);
85 *cs
++ = MI_FLUSH
| MI_NO_WRITE_FLUSH
;
87 intel_ring_advance(rq
, cs
);
93 gen4_render_ring_flush(struct i915_request
*rq
, u32 mode
)
101 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
102 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
103 * also flushed at 2d versus 3d pipeline switches.
107 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
108 * MI_READ_FLUSH is set, and is always flushed on 965.
110 * I915_GEM_DOMAIN_COMMAND may not exist?
112 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
113 * invalidated when MI_EXE_FLUSH is set.
115 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
116 * invalidated with every MI_FLUSH.
120 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
121 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
122 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
123 * are flushed at any MI_FLUSH.
127 if (mode
& EMIT_INVALIDATE
) {
129 if (IS_G4X(rq
->i915
) || IS_GEN(rq
->i915
, 5))
130 cmd
|= MI_INVALIDATE_ISP
;
134 if (mode
& EMIT_INVALIDATE
)
137 cs
= intel_ring_begin(rq
, i
);
144 * A random delay to let the CS invalidate take effect? Without this
145 * delay, the GPU relocation path fails as the CS does not see
146 * the updated contents. Just as important, if we apply the flushes
147 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
148 * write and before the invalidate on the next batch), the relocations
149 * still fail. This implies that is a delay following invalidation
150 * that is required to reset the caches as opposed to a delay to
151 * ensure the memory is written.
153 if (mode
& EMIT_INVALIDATE
) {
154 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
155 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
159 for (i
= 0; i
< 12; i
++)
162 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
163 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
170 intel_ring_advance(rq
, cs
);
176 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
177 * implementing two workarounds on gen6. From section 1.4.7.1
178 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
180 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
181 * produced by non-pipelined state commands), software needs to first
182 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
185 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
186 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
188 * And the workaround for these two requires this workaround first:
190 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
191 * BEFORE the pipe-control with a post-sync op and no write-cache
194 * And this last workaround is tricky because of the requirements on
195 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
198 * "1 of the following must also be set:
199 * - Render Target Cache Flush Enable ([12] of DW1)
200 * - Depth Cache Flush Enable ([0] of DW1)
201 * - Stall at Pixel Scoreboard ([1] of DW1)
202 * - Depth Stall ([13] of DW1)
203 * - Post-Sync Operation ([13] of DW1)
204 * - Notify Enable ([8] of DW1)"
206 * The cache flushes require the workaround flush that triggered this
207 * one, so we can't use it. Depth stall would trigger the same.
208 * Post-sync nonzero is what triggered this second workaround, so we
209 * can't use that one either. Notify enable is IRQs, which aren't
210 * really our business. That leaves only stall at scoreboard.
213 gen6_emit_post_sync_nonzero_flush(struct i915_request
*rq
)
215 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
218 cs
= intel_ring_begin(rq
, 6);
222 *cs
++ = GFX_OP_PIPE_CONTROL(5);
223 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
224 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
225 *cs
++ = 0; /* low dword */
226 *cs
++ = 0; /* high dword */
228 intel_ring_advance(rq
, cs
);
230 cs
= intel_ring_begin(rq
, 6);
234 *cs
++ = GFX_OP_PIPE_CONTROL(5);
235 *cs
++ = PIPE_CONTROL_QW_WRITE
;
236 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
240 intel_ring_advance(rq
, cs
);
246 gen6_render_ring_flush(struct i915_request
*rq
, u32 mode
)
248 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
252 /* Force SNB workarounds for PIPE_CONTROL flushes */
253 ret
= gen6_emit_post_sync_nonzero_flush(rq
);
257 /* Just flush everything. Experiments have shown that reducing the
258 * number of bits based on the write domains has little performance
261 if (mode
& EMIT_FLUSH
) {
262 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
263 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
265 * Ensure that any following seqno writes only happen
266 * when the render cache is indeed flushed.
268 flags
|= PIPE_CONTROL_CS_STALL
;
270 if (mode
& EMIT_INVALIDATE
) {
271 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
272 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
273 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
274 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
275 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
276 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
278 * TLB invalidate requires a post-sync write.
280 flags
|= PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
283 cs
= intel_ring_begin(rq
, 4);
287 *cs
++ = GFX_OP_PIPE_CONTROL(4);
289 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
291 intel_ring_advance(rq
, cs
);
296 static u32
*gen6_rcs_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
298 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
299 *cs
++ = GFX_OP_PIPE_CONTROL(4);
300 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
304 *cs
++ = GFX_OP_PIPE_CONTROL(4);
305 *cs
++ = PIPE_CONTROL_QW_WRITE
;
306 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
309 /* Finally we can flush and with it emit the breadcrumb */
310 *cs
++ = GFX_OP_PIPE_CONTROL(4);
311 *cs
++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
|
312 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
313 PIPE_CONTROL_DC_FLUSH_ENABLE
|
314 PIPE_CONTROL_QW_WRITE
|
315 PIPE_CONTROL_CS_STALL
);
316 *cs
++ = rq
->timeline
->hwsp_offset
| PIPE_CONTROL_GLOBAL_GTT
;
317 *cs
++ = rq
->fence
.seqno
;
319 *cs
++ = GFX_OP_PIPE_CONTROL(4);
320 *cs
++ = PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
321 *cs
++ = intel_hws_seqno_address(rq
->engine
) | PIPE_CONTROL_GLOBAL_GTT
;
322 *cs
++ = rq
->global_seqno
;
324 *cs
++ = MI_USER_INTERRUPT
;
327 rq
->tail
= intel_ring_offset(rq
, cs
);
328 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
334 gen7_render_ring_cs_stall_wa(struct i915_request
*rq
)
338 cs
= intel_ring_begin(rq
, 4);
342 *cs
++ = GFX_OP_PIPE_CONTROL(4);
343 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
346 intel_ring_advance(rq
, cs
);
352 gen7_render_ring_flush(struct i915_request
*rq
, u32 mode
)
354 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
358 * Ensure that any following seqno writes only happen when the render
359 * cache is indeed flushed.
361 * Workaround: 4th PIPE_CONTROL command (except the ones with only
362 * read-cache invalidate bits set) must have the CS_STALL bit set. We
363 * don't try to be clever and just set it unconditionally.
365 flags
|= PIPE_CONTROL_CS_STALL
;
367 /* Just flush everything. Experiments have shown that reducing the
368 * number of bits based on the write domains has little performance
371 if (mode
& EMIT_FLUSH
) {
372 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
373 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
374 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
375 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
377 if (mode
& EMIT_INVALIDATE
) {
378 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
379 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
380 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
381 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
382 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
383 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
384 flags
|= PIPE_CONTROL_MEDIA_STATE_CLEAR
;
386 * TLB invalidate requires a post-sync write.
388 flags
|= PIPE_CONTROL_QW_WRITE
;
389 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
391 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
393 /* Workaround: we must issue a pipe_control with CS-stall bit
394 * set before a pipe_control command that has the state cache
395 * invalidate bit set. */
396 gen7_render_ring_cs_stall_wa(rq
);
399 cs
= intel_ring_begin(rq
, 4);
403 *cs
++ = GFX_OP_PIPE_CONTROL(4);
405 *cs
++ = scratch_addr
;
407 intel_ring_advance(rq
, cs
);
412 static u32
*gen7_rcs_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
414 *cs
++ = GFX_OP_PIPE_CONTROL(4);
415 *cs
++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
|
416 PIPE_CONTROL_DEPTH_CACHE_FLUSH
|
417 PIPE_CONTROL_DC_FLUSH_ENABLE
|
418 PIPE_CONTROL_FLUSH_ENABLE
|
419 PIPE_CONTROL_QW_WRITE
|
420 PIPE_CONTROL_GLOBAL_GTT_IVB
|
421 PIPE_CONTROL_CS_STALL
);
422 *cs
++ = rq
->timeline
->hwsp_offset
;
423 *cs
++ = rq
->fence
.seqno
;
425 *cs
++ = GFX_OP_PIPE_CONTROL(4);
426 *cs
++ = (PIPE_CONTROL_QW_WRITE
|
427 PIPE_CONTROL_GLOBAL_GTT_IVB
|
428 PIPE_CONTROL_CS_STALL
);
429 *cs
++ = intel_hws_seqno_address(rq
->engine
);
430 *cs
++ = rq
->global_seqno
;
432 *cs
++ = MI_USER_INTERRUPT
;
435 rq
->tail
= intel_ring_offset(rq
, cs
);
436 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
441 static u32
*gen6_xcs_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
443 GEM_BUG_ON(rq
->timeline
->hwsp_ggtt
!= rq
->engine
->status_page
.vma
);
444 GEM_BUG_ON(offset_in_page(rq
->timeline
->hwsp_offset
) != I915_GEM_HWS_SEQNO_ADDR
);
446 *cs
++ = MI_FLUSH_DW
| MI_FLUSH_DW_OP_STOREDW
| MI_FLUSH_DW_STORE_INDEX
;
447 *cs
++ = I915_GEM_HWS_SEQNO_ADDR
| MI_FLUSH_DW_USE_GTT
;
448 *cs
++ = rq
->fence
.seqno
;
450 *cs
++ = MI_FLUSH_DW
| MI_FLUSH_DW_OP_STOREDW
| MI_FLUSH_DW_STORE_INDEX
;
451 *cs
++ = I915_GEM_HWS_INDEX_ADDR
| MI_FLUSH_DW_USE_GTT
;
452 *cs
++ = rq
->global_seqno
;
454 *cs
++ = MI_USER_INTERRUPT
;
457 rq
->tail
= intel_ring_offset(rq
, cs
);
458 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
463 #define GEN7_XCS_WA 32
464 static u32
*gen7_xcs_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
468 GEM_BUG_ON(rq
->timeline
->hwsp_ggtt
!= rq
->engine
->status_page
.vma
);
469 GEM_BUG_ON(offset_in_page(rq
->timeline
->hwsp_offset
) != I915_GEM_HWS_SEQNO_ADDR
);
471 *cs
++ = MI_FLUSH_DW
| MI_FLUSH_DW_OP_STOREDW
| MI_FLUSH_DW_STORE_INDEX
;
472 *cs
++ = I915_GEM_HWS_SEQNO_ADDR
| MI_FLUSH_DW_USE_GTT
;
473 *cs
++ = rq
->fence
.seqno
;
475 *cs
++ = MI_FLUSH_DW
| MI_FLUSH_DW_OP_STOREDW
| MI_FLUSH_DW_STORE_INDEX
;
476 *cs
++ = I915_GEM_HWS_INDEX_ADDR
| MI_FLUSH_DW_USE_GTT
;
477 *cs
++ = rq
->global_seqno
;
479 for (i
= 0; i
< GEN7_XCS_WA
; i
++) {
480 *cs
++ = MI_STORE_DWORD_INDEX
;
481 *cs
++ = I915_GEM_HWS_SEQNO_ADDR
;
482 *cs
++ = rq
->fence
.seqno
;
489 *cs
++ = MI_USER_INTERRUPT
;
491 rq
->tail
= intel_ring_offset(rq
, cs
);
492 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
498 static void set_hwstam(struct intel_engine_cs
*engine
, u32 mask
)
501 * Keep the render interrupt unmasked as this papers over
502 * lost interrupts following a reset.
504 if (engine
->class == RENDER_CLASS
) {
505 if (INTEL_GEN(engine
->i915
) >= 6)
508 mask
&= ~I915_USER_INTERRUPT
;
511 intel_engine_set_hwsp_writemask(engine
, mask
);
514 static void set_hws_pga(struct intel_engine_cs
*engine
, phys_addr_t phys
)
516 struct drm_i915_private
*dev_priv
= engine
->i915
;
519 addr
= lower_32_bits(phys
);
520 if (INTEL_GEN(dev_priv
) >= 4)
521 addr
|= (phys
>> 28) & 0xf0;
523 I915_WRITE(HWS_PGA
, addr
);
526 static struct page
*status_page(struct intel_engine_cs
*engine
)
528 struct drm_i915_gem_object
*obj
= engine
->status_page
.vma
->obj
;
530 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj
));
531 return sg_page(obj
->mm
.pages
->sgl
);
534 static void ring_setup_phys_status_page(struct intel_engine_cs
*engine
)
536 set_hws_pga(engine
, PFN_PHYS(page_to_pfn(status_page(engine
))));
537 set_hwstam(engine
, ~0u);
540 static void set_hwsp(struct intel_engine_cs
*engine
, u32 offset
)
542 struct drm_i915_private
*dev_priv
= engine
->i915
;
546 * The ring status page addresses are no longer next to the rest of
547 * the ring registers as of gen7.
549 if (IS_GEN(dev_priv
, 7)) {
550 switch (engine
->id
) {
552 * No more rings exist on Gen7. Default case is only to shut up
553 * gcc switch check warning.
556 GEM_BUG_ON(engine
->id
);
558 hwsp
= RENDER_HWS_PGA_GEN7
;
561 hwsp
= BLT_HWS_PGA_GEN7
;
564 hwsp
= BSD_HWS_PGA_GEN7
;
567 hwsp
= VEBOX_HWS_PGA_GEN7
;
570 } else if (IS_GEN(dev_priv
, 6)) {
571 hwsp
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
573 hwsp
= RING_HWS_PGA(engine
->mmio_base
);
576 I915_WRITE(hwsp
, offset
);
580 static void flush_cs_tlb(struct intel_engine_cs
*engine
)
582 struct drm_i915_private
*dev_priv
= engine
->i915
;
583 i915_reg_t instpm
= RING_INSTPM(engine
->mmio_base
);
585 if (!IS_GEN_RANGE(dev_priv
, 6, 7))
588 /* ring should be idle before issuing a sync flush*/
589 WARN_ON((I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
592 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE
|
594 if (intel_wait_for_register(dev_priv
,
595 instpm
, INSTPM_SYNC_FLUSH
, 0,
597 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
601 static void ring_setup_status_page(struct intel_engine_cs
*engine
)
603 set_hwsp(engine
, i915_ggtt_offset(engine
->status_page
.vma
));
604 set_hwstam(engine
, ~0u);
606 flush_cs_tlb(engine
);
609 static bool stop_ring(struct intel_engine_cs
*engine
)
611 struct drm_i915_private
*dev_priv
= engine
->i915
;
613 if (INTEL_GEN(dev_priv
) > 2) {
614 I915_WRITE_MODE(engine
, _MASKED_BIT_ENABLE(STOP_RING
));
615 if (intel_wait_for_register(dev_priv
,
616 RING_MI_MODE(engine
->mmio_base
),
620 DRM_ERROR("%s : timed out trying to stop ring\n",
622 /* Sometimes we observe that the idle flag is not
623 * set even though the ring is empty. So double
624 * check before giving up.
626 if (I915_READ_HEAD(engine
) != I915_READ_TAIL(engine
))
631 I915_WRITE_HEAD(engine
, I915_READ_TAIL(engine
));
633 I915_WRITE_HEAD(engine
, 0);
634 I915_WRITE_TAIL(engine
, 0);
636 /* The ring must be empty before it is disabled */
637 I915_WRITE_CTL(engine
, 0);
639 return (I915_READ_HEAD(engine
) & HEAD_ADDR
) == 0;
642 static int init_ring_common(struct intel_engine_cs
*engine
)
644 struct drm_i915_private
*dev_priv
= engine
->i915
;
645 struct intel_ring
*ring
= engine
->buffer
;
648 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
650 if (!stop_ring(engine
)) {
651 /* G45 ring initialization often fails to reset head to zero */
652 DRM_DEBUG_DRIVER("%s head not reset to zero "
653 "ctl %08x head %08x tail %08x start %08x\n",
655 I915_READ_CTL(engine
),
656 I915_READ_HEAD(engine
),
657 I915_READ_TAIL(engine
),
658 I915_READ_START(engine
));
660 if (!stop_ring(engine
)) {
661 DRM_ERROR("failed to set %s head to zero "
662 "ctl %08x head %08x tail %08x start %08x\n",
664 I915_READ_CTL(engine
),
665 I915_READ_HEAD(engine
),
666 I915_READ_TAIL(engine
),
667 I915_READ_START(engine
));
673 if (HWS_NEEDS_PHYSICAL(dev_priv
))
674 ring_setup_phys_status_page(engine
);
676 ring_setup_status_page(engine
);
678 intel_engine_reset_breadcrumbs(engine
);
680 /* Enforce ordering by reading HEAD register back */
681 I915_READ_HEAD(engine
);
683 /* Initialize the ring. This must happen _after_ we've cleared the ring
684 * registers with the above sequence (the readback of the HEAD registers
685 * also enforces ordering), otherwise the hw might lose the new ring
686 * register values. */
687 I915_WRITE_START(engine
, i915_ggtt_offset(ring
->vma
));
689 /* WaClearRingBufHeadRegAtInit:ctg,elk */
690 if (I915_READ_HEAD(engine
))
691 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
692 engine
->name
, I915_READ_HEAD(engine
));
694 /* Check that the ring offsets point within the ring! */
695 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->head
));
696 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->tail
));
697 intel_ring_update_space(ring
);
699 /* First wake the ring up to an empty/idle ring */
700 I915_WRITE_HEAD(engine
, ring
->head
);
701 I915_WRITE_TAIL(engine
, ring
->head
);
702 (void)I915_READ_TAIL(engine
);
704 I915_WRITE_CTL(engine
, RING_CTL_SIZE(ring
->size
) | RING_VALID
);
706 /* If the head is still not zero, the ring is dead */
707 if (intel_wait_for_register(dev_priv
, RING_CTL(engine
->mmio_base
),
708 RING_VALID
, RING_VALID
,
710 DRM_ERROR("%s initialization failed "
711 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
713 I915_READ_CTL(engine
),
714 I915_READ_CTL(engine
) & RING_VALID
,
715 I915_READ_HEAD(engine
), ring
->head
,
716 I915_READ_TAIL(engine
), ring
->tail
,
717 I915_READ_START(engine
),
718 i915_ggtt_offset(ring
->vma
));
723 if (INTEL_GEN(dev_priv
) > 2)
724 I915_WRITE_MODE(engine
, _MASKED_BIT_DISABLE(STOP_RING
));
726 /* Now awake, let it get started */
727 if (ring
->tail
!= ring
->head
) {
728 I915_WRITE_TAIL(engine
, ring
->tail
);
729 (void)I915_READ_TAIL(engine
);
732 /* Papering over lost _interrupts_ immediately following the restart */
733 intel_engine_queue_breadcrumbs(engine
);
735 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
740 static void reset_prepare(struct intel_engine_cs
*engine
)
742 intel_engine_stop_cs(engine
);
745 static void reset_ring(struct intel_engine_cs
*engine
, bool stalled
)
747 struct i915_timeline
*tl
= &engine
->timeline
;
748 struct i915_request
*pos
, *rq
;
753 spin_lock_irqsave(&tl
->lock
, flags
);
754 list_for_each_entry(pos
, &tl
->requests
, link
) {
755 if (!i915_request_completed(pos
)) {
761 GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
763 rq
? rq
->global_seqno
: 0,
764 intel_engine_get_seqno(engine
),
767 * The guilty request will get skipped on a hung engine.
769 * Users of client default contexts do not rely on logical
770 * state preserved between batches so it is safe to execute
771 * queued requests following the hang. Non default contexts
772 * rely on preserved state, so skipping a batch loses the
773 * evolution of the state and it needs to be considered corrupted.
774 * Executing more queued batches on top of corrupted state is
775 * risky. But we take the risk by trying to advance through
776 * the queued requests in order to make the client behaviour
777 * more predictable around resets, by not throwing away random
778 * amount of batches it has prepared for execution. Sophisticated
779 * clients can use gem_reset_stats_ioctl and dma fence status
780 * (exported via sync_file info ioctl on explicit fences) to observe
781 * when it loses the context state and should rebuild accordingly.
783 * The context ban, and ultimately the client ban, mechanism are safety
784 * valves if client submission ends up resulting in nothing more than
790 * Try to restore the logical GPU state to match the
791 * continuation of the request queue. If we skip the
792 * context/PD restore, then the next request may try to execute
793 * assuming that its context is valid and loaded on the GPU and
794 * so may try to access invalid memory, prompting repeated GPU
797 * If the request was guilty, we still restore the logical
798 * state in case the next request requires it (e.g. the
799 * aliasing ppgtt), but skip over the hung batch.
801 * If the request was innocent, we try to replay the request
802 * with the restored context.
804 i915_reset_request(rq
, stalled
);
806 GEM_BUG_ON(rq
->ring
!= engine
->buffer
);
809 head
= engine
->buffer
->tail
;
811 engine
->buffer
->head
= intel_ring_wrap(engine
->buffer
, head
);
813 spin_unlock_irqrestore(&tl
->lock
, flags
);
816 static void reset_finish(struct intel_engine_cs
*engine
)
820 static int intel_rcs_ctx_init(struct i915_request
*rq
)
824 ret
= intel_engine_emit_ctx_wa(rq
);
828 ret
= i915_gem_render_state_emit(rq
);
835 static int init_render_ring(struct intel_engine_cs
*engine
)
837 struct drm_i915_private
*dev_priv
= engine
->i915
;
838 int ret
= init_ring_common(engine
);
842 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
843 if (IS_GEN_RANGE(dev_priv
, 4, 6))
844 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH
));
846 /* We need to disable the AsyncFlip performance optimisations in order
847 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
848 * programmed to '1' on all products.
850 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
852 if (IS_GEN_RANGE(dev_priv
, 6, 7))
853 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE
));
855 /* Required for the hardware to program scanline values for waiting */
856 /* WaEnableFlushTlbInvalidationMode:snb */
857 if (IS_GEN(dev_priv
, 6))
859 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
));
861 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
862 if (IS_GEN(dev_priv
, 7))
863 I915_WRITE(GFX_MODE_GEN7
,
864 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
) |
865 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE
));
867 if (IS_GEN(dev_priv
, 6)) {
868 /* From the Sandybridge PRM, volume 1 part 3, page 24:
869 * "If this bit is set, STCunit will have LRA as replacement
870 * policy. [...] This bit must be reset. LRA replacement
871 * policy is not supported."
873 I915_WRITE(CACHE_MODE_0
,
874 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
877 if (IS_GEN_RANGE(dev_priv
, 6, 7))
878 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING
));
880 if (INTEL_GEN(dev_priv
) >= 6)
881 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
886 static void cancel_requests(struct intel_engine_cs
*engine
)
888 struct i915_request
*request
;
891 spin_lock_irqsave(&engine
->timeline
.lock
, flags
);
893 /* Mark all submitted requests as skipped. */
894 list_for_each_entry(request
, &engine
->timeline
.requests
, link
) {
895 GEM_BUG_ON(!request
->global_seqno
);
897 if (!i915_request_signaled(request
))
898 dma_fence_set_error(&request
->fence
, -EIO
);
900 i915_request_mark_complete(request
);
903 intel_write_status_page(engine
,
905 intel_engine_last_submit(engine
));
907 /* Remaining _unready_ requests will be nop'ed when submitted */
909 spin_unlock_irqrestore(&engine
->timeline
.lock
, flags
);
912 static void i9xx_submit_request(struct i915_request
*request
)
914 struct drm_i915_private
*dev_priv
= request
->i915
;
916 i915_request_submit(request
);
918 I915_WRITE_TAIL(request
->engine
,
919 intel_ring_set_tail(request
->ring
, request
->tail
));
922 static u32
*i9xx_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
924 GEM_BUG_ON(rq
->timeline
->hwsp_ggtt
!= rq
->engine
->status_page
.vma
);
925 GEM_BUG_ON(offset_in_page(rq
->timeline
->hwsp_offset
) != I915_GEM_HWS_SEQNO_ADDR
);
929 *cs
++ = MI_STORE_DWORD_INDEX
;
930 *cs
++ = I915_GEM_HWS_SEQNO_ADDR
;
931 *cs
++ = rq
->fence
.seqno
;
933 *cs
++ = MI_STORE_DWORD_INDEX
;
934 *cs
++ = I915_GEM_HWS_INDEX_ADDR
;
935 *cs
++ = rq
->global_seqno
;
937 *cs
++ = MI_USER_INTERRUPT
;
939 rq
->tail
= intel_ring_offset(rq
, cs
);
940 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
945 #define GEN5_WA_STORES 8 /* must be at least 1! */
946 static u32
*gen5_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
950 GEM_BUG_ON(rq
->timeline
->hwsp_ggtt
!= rq
->engine
->status_page
.vma
);
951 GEM_BUG_ON(offset_in_page(rq
->timeline
->hwsp_offset
) != I915_GEM_HWS_SEQNO_ADDR
);
955 *cs
++ = MI_STORE_DWORD_INDEX
;
956 *cs
++ = I915_GEM_HWS_SEQNO_ADDR
;
957 *cs
++ = rq
->fence
.seqno
;
959 BUILD_BUG_ON(GEN5_WA_STORES
< 1);
960 for (i
= 0; i
< GEN5_WA_STORES
; i
++) {
961 *cs
++ = MI_STORE_DWORD_INDEX
;
962 *cs
++ = I915_GEM_HWS_INDEX_ADDR
;
963 *cs
++ = rq
->global_seqno
;
966 *cs
++ = MI_USER_INTERRUPT
;
969 rq
->tail
= intel_ring_offset(rq
, cs
);
970 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
974 #undef GEN5_WA_STORES
977 gen5_irq_enable(struct intel_engine_cs
*engine
)
979 gen5_enable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
983 gen5_irq_disable(struct intel_engine_cs
*engine
)
985 gen5_disable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
989 i9xx_irq_enable(struct intel_engine_cs
*engine
)
991 struct drm_i915_private
*dev_priv
= engine
->i915
;
993 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
994 I915_WRITE(IMR
, dev_priv
->irq_mask
);
995 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
999 i9xx_irq_disable(struct intel_engine_cs
*engine
)
1001 struct drm_i915_private
*dev_priv
= engine
->i915
;
1003 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
1004 I915_WRITE(IMR
, dev_priv
->irq_mask
);
1008 i8xx_irq_enable(struct intel_engine_cs
*engine
)
1010 struct drm_i915_private
*dev_priv
= engine
->i915
;
1012 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
1013 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
1014 POSTING_READ16(RING_IMR(engine
->mmio_base
));
1018 i8xx_irq_disable(struct intel_engine_cs
*engine
)
1020 struct drm_i915_private
*dev_priv
= engine
->i915
;
1022 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
1023 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
1027 bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
1031 cs
= intel_ring_begin(rq
, 2);
1037 intel_ring_advance(rq
, cs
);
1042 gen6_irq_enable(struct intel_engine_cs
*engine
)
1044 struct drm_i915_private
*dev_priv
= engine
->i915
;
1046 I915_WRITE_IMR(engine
,
1047 ~(engine
->irq_enable_mask
|
1048 engine
->irq_keep_mask
));
1050 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1051 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
1053 gen5_enable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1057 gen6_irq_disable(struct intel_engine_cs
*engine
)
1059 struct drm_i915_private
*dev_priv
= engine
->i915
;
1061 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
1062 gen5_disable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
1066 hsw_vebox_irq_enable(struct intel_engine_cs
*engine
)
1068 struct drm_i915_private
*dev_priv
= engine
->i915
;
1070 I915_WRITE_IMR(engine
, ~engine
->irq_enable_mask
);
1072 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1073 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
1075 gen6_unmask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1079 hsw_vebox_irq_disable(struct intel_engine_cs
*engine
)
1081 struct drm_i915_private
*dev_priv
= engine
->i915
;
1083 I915_WRITE_IMR(engine
, ~0);
1084 gen6_mask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
1088 i965_emit_bb_start(struct i915_request
*rq
,
1089 u64 offset
, u32 length
,
1090 unsigned int dispatch_flags
)
1094 cs
= intel_ring_begin(rq
, 2);
1098 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| (dispatch_flags
&
1099 I915_DISPATCH_SECURE
? 0 : MI_BATCH_NON_SECURE_I965
);
1101 intel_ring_advance(rq
, cs
);
1106 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1107 #define I830_BATCH_LIMIT SZ_256K
1108 #define I830_TLB_ENTRIES (2)
1109 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1111 i830_emit_bb_start(struct i915_request
*rq
,
1112 u64 offset
, u32 len
,
1113 unsigned int dispatch_flags
)
1115 u32
*cs
, cs_offset
= i915_scratch_offset(rq
->i915
);
1117 GEM_BUG_ON(rq
->i915
->gt
.scratch
->size
< I830_WA_SIZE
);
1119 cs
= intel_ring_begin(rq
, 6);
1123 /* Evict the invalid PTE TLBs */
1124 *cs
++ = COLOR_BLT_CMD
| BLT_WRITE_RGBA
;
1125 *cs
++ = BLT_DEPTH_32
| BLT_ROP_COLOR_COPY
| 4096;
1126 *cs
++ = I830_TLB_ENTRIES
<< 16 | 4; /* load each page */
1130 intel_ring_advance(rq
, cs
);
1132 if ((dispatch_flags
& I915_DISPATCH_PINNED
) == 0) {
1133 if (len
> I830_BATCH_LIMIT
)
1136 cs
= intel_ring_begin(rq
, 6 + 2);
1140 /* Blit the batch (which has now all relocs applied) to the
1141 * stable batch scratch bo area (so that the CS never
1142 * stumbles over its tlb invalidation bug) ...
1144 *cs
++ = SRC_COPY_BLT_CMD
| BLT_WRITE_RGBA
;
1145 *cs
++ = BLT_DEPTH_32
| BLT_ROP_SRC_COPY
| 4096;
1146 *cs
++ = DIV_ROUND_UP(len
, 4096) << 16 | 4096;
1153 intel_ring_advance(rq
, cs
);
1155 /* ... and execute it. */
1159 cs
= intel_ring_begin(rq
, 2);
1163 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1164 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1165 MI_BATCH_NON_SECURE
);
1166 intel_ring_advance(rq
, cs
);
1172 i915_emit_bb_start(struct i915_request
*rq
,
1173 u64 offset
, u32 len
,
1174 unsigned int dispatch_flags
)
1178 cs
= intel_ring_begin(rq
, 2);
1182 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1183 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1184 MI_BATCH_NON_SECURE
);
1185 intel_ring_advance(rq
, cs
);
1190 int intel_ring_pin(struct intel_ring
*ring
)
1192 struct i915_vma
*vma
= ring
->vma
;
1193 enum i915_map_type map
= i915_coherent_map_type(vma
->vm
->i915
);
1198 GEM_BUG_ON(ring
->vaddr
);
1200 ret
= i915_timeline_pin(ring
->timeline
);
1206 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1207 flags
|= PIN_OFFSET_BIAS
| i915_ggtt_pin_bias(vma
);
1209 if (vma
->obj
->stolen
)
1210 flags
|= PIN_MAPPABLE
;
1214 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1215 if (flags
& PIN_MAPPABLE
|| map
== I915_MAP_WC
)
1216 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1218 ret
= i915_gem_object_set_to_cpu_domain(vma
->obj
, true);
1220 goto unpin_timeline
;
1223 ret
= i915_vma_pin(vma
, 0, 0, flags
);
1225 goto unpin_timeline
;
1227 if (i915_vma_is_map_and_fenceable(vma
))
1228 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
1230 addr
= i915_gem_object_pin_map(vma
->obj
, map
);
1232 ret
= PTR_ERR(addr
);
1236 vma
->obj
->pin_global
++;
1242 i915_vma_unpin(vma
);
1244 i915_timeline_unpin(ring
->timeline
);
1248 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
1250 GEM_BUG_ON(!intel_ring_offset_valid(ring
, tail
));
1255 intel_ring_update_space(ring
);
1258 void intel_ring_unpin(struct intel_ring
*ring
)
1260 GEM_BUG_ON(!ring
->vma
);
1261 GEM_BUG_ON(!ring
->vaddr
);
1263 /* Discard any unused bytes beyond that submitted to hw. */
1264 intel_ring_reset(ring
, ring
->tail
);
1266 if (i915_vma_is_map_and_fenceable(ring
->vma
))
1267 i915_vma_unpin_iomap(ring
->vma
);
1269 i915_gem_object_unpin_map(ring
->vma
->obj
);
1272 ring
->vma
->obj
->pin_global
--;
1273 i915_vma_unpin(ring
->vma
);
1275 i915_timeline_unpin(ring
->timeline
);
1278 static struct i915_vma
*
1279 intel_ring_create_vma(struct drm_i915_private
*dev_priv
, int size
)
1281 struct i915_address_space
*vm
= &dev_priv
->ggtt
.vm
;
1282 struct drm_i915_gem_object
*obj
;
1283 struct i915_vma
*vma
;
1285 obj
= i915_gem_object_create_stolen(dev_priv
, size
);
1287 obj
= i915_gem_object_create_internal(dev_priv
, size
);
1289 return ERR_CAST(obj
);
1292 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1293 * if supported by the platform's GGTT.
1295 if (vm
->has_read_only
)
1296 i915_gem_object_set_readonly(obj
);
1298 vma
= i915_vma_instance(obj
, vm
, NULL
);
1305 i915_gem_object_put(obj
);
1310 intel_engine_create_ring(struct intel_engine_cs
*engine
,
1311 struct i915_timeline
*timeline
,
1314 struct intel_ring
*ring
;
1315 struct i915_vma
*vma
;
1317 GEM_BUG_ON(!is_power_of_2(size
));
1318 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
1319 GEM_BUG_ON(timeline
== &engine
->timeline
);
1320 lockdep_assert_held(&engine
->i915
->drm
.struct_mutex
);
1322 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1324 return ERR_PTR(-ENOMEM
);
1326 INIT_LIST_HEAD(&ring
->request_list
);
1327 ring
->timeline
= i915_timeline_get(timeline
);
1330 /* Workaround an erratum on the i830 which causes a hang if
1331 * the TAIL pointer points to within the last 2 cachelines
1334 ring
->effective_size
= size
;
1335 if (IS_I830(engine
->i915
) || IS_I845G(engine
->i915
))
1336 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
1338 intel_ring_update_space(ring
);
1340 vma
= intel_ring_create_vma(engine
->i915
, size
);
1343 return ERR_CAST(vma
);
1351 intel_ring_free(struct intel_ring
*ring
)
1353 struct drm_i915_gem_object
*obj
= ring
->vma
->obj
;
1355 i915_vma_close(ring
->vma
);
1356 __i915_gem_object_release_unless_active(obj
);
1358 i915_timeline_put(ring
->timeline
);
1362 static void intel_ring_context_destroy(struct intel_context
*ce
)
1364 GEM_BUG_ON(ce
->pin_count
);
1369 GEM_BUG_ON(i915_gem_object_is_active(ce
->state
->obj
));
1370 i915_gem_object_put(ce
->state
->obj
);
1373 static int __context_pin_ppgtt(struct i915_gem_context
*ctx
)
1375 struct i915_hw_ppgtt
*ppgtt
;
1378 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1380 err
= gen6_ppgtt_pin(ppgtt
);
1385 static void __context_unpin_ppgtt(struct i915_gem_context
*ctx
)
1387 struct i915_hw_ppgtt
*ppgtt
;
1389 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1391 gen6_ppgtt_unpin(ppgtt
);
1394 static int __context_pin(struct intel_context
*ce
)
1396 struct i915_vma
*vma
;
1404 * Clear this page out of any CPU caches for coherent swap-in/out.
1405 * We only want to do this on the first bind so that we do not stall
1406 * on an active context (which by nature is already on the GPU).
1408 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1409 err
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1414 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
1419 * And mark is as a globally pinned object to let the shrinker know
1420 * it cannot reclaim the object until we release it.
1422 vma
->obj
->pin_global
++;
1427 static void __context_unpin(struct intel_context
*ce
)
1429 struct i915_vma
*vma
;
1435 vma
->obj
->pin_global
--;
1436 i915_vma_unpin(vma
);
1439 static void intel_ring_context_unpin(struct intel_context
*ce
)
1441 __context_unpin_ppgtt(ce
->gem_context
);
1442 __context_unpin(ce
);
1444 i915_gem_context_put(ce
->gem_context
);
1447 static struct i915_vma
*
1448 alloc_context_vma(struct intel_engine_cs
*engine
)
1450 struct drm_i915_private
*i915
= engine
->i915
;
1451 struct drm_i915_gem_object
*obj
;
1452 struct i915_vma
*vma
;
1455 obj
= i915_gem_object_create(i915
, engine
->context_size
);
1457 return ERR_CAST(obj
);
1459 if (engine
->default_state
) {
1460 void *defaults
, *vaddr
;
1462 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1463 if (IS_ERR(vaddr
)) {
1464 err
= PTR_ERR(vaddr
);
1468 defaults
= i915_gem_object_pin_map(engine
->default_state
,
1470 if (IS_ERR(defaults
)) {
1471 err
= PTR_ERR(defaults
);
1475 memcpy(vaddr
, defaults
, engine
->context_size
);
1477 i915_gem_object_unpin_map(engine
->default_state
);
1478 i915_gem_object_unpin_map(obj
);
1482 * Try to make the context utilize L3 as well as LLC.
1484 * On VLV we don't have L3 controls in the PTEs so we
1485 * shouldn't touch the cache level, especially as that
1486 * would make the object snooped which might have a
1487 * negative performance impact.
1489 * Snooping is required on non-llc platforms in execlist
1490 * mode, but since all GGTT accesses use PAT entry 0 we
1491 * get snooping anyway regardless of cache_level.
1493 * This is only applicable for Ivy Bridge devices since
1494 * later platforms don't have L3 control bits in the PTE.
1496 if (IS_IVYBRIDGE(i915
)) {
1497 /* Ignore any error, regard it as a simple optimisation */
1498 i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
1501 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1510 i915_gem_object_unpin_map(obj
);
1512 i915_gem_object_put(obj
);
1513 return ERR_PTR(err
);
1516 static struct intel_context
*
1517 __ring_context_pin(struct intel_engine_cs
*engine
,
1518 struct i915_gem_context
*ctx
,
1519 struct intel_context
*ce
)
1523 if (!ce
->state
&& engine
->context_size
) {
1524 struct i915_vma
*vma
;
1526 vma
= alloc_context_vma(engine
);
1535 err
= __context_pin(ce
);
1539 err
= __context_pin_ppgtt(ce
->gem_context
);
1543 i915_gem_context_get(ctx
);
1545 /* One ringbuffer to rule them all */
1546 GEM_BUG_ON(!engine
->buffer
);
1547 ce
->ring
= engine
->buffer
;
1552 __context_unpin(ce
);
1555 return ERR_PTR(err
);
1558 static const struct intel_context_ops ring_context_ops
= {
1559 .unpin
= intel_ring_context_unpin
,
1560 .destroy
= intel_ring_context_destroy
,
1563 static struct intel_context
*
1564 intel_ring_context_pin(struct intel_engine_cs
*engine
,
1565 struct i915_gem_context
*ctx
)
1567 struct intel_context
*ce
= to_intel_context(ctx
, engine
);
1569 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1571 if (likely(ce
->pin_count
++))
1573 GEM_BUG_ON(!ce
->pin_count
); /* no overflow please! */
1575 ce
->ops
= &ring_context_ops
;
1577 return __ring_context_pin(engine
, ctx
, ce
);
1580 static int intel_init_ring_buffer(struct intel_engine_cs
*engine
)
1582 struct i915_timeline
*timeline
;
1583 struct intel_ring
*ring
;
1586 err
= intel_engine_setup_common(engine
);
1590 timeline
= i915_timeline_create(engine
->i915
,
1592 engine
->status_page
.vma
);
1593 if (IS_ERR(timeline
)) {
1594 err
= PTR_ERR(timeline
);
1597 GEM_BUG_ON(timeline
->has_initial_breadcrumb
);
1599 ring
= intel_engine_create_ring(engine
, timeline
, 32 * PAGE_SIZE
);
1600 i915_timeline_put(timeline
);
1602 err
= PTR_ERR(ring
);
1606 err
= intel_ring_pin(ring
);
1610 GEM_BUG_ON(engine
->buffer
);
1611 engine
->buffer
= ring
;
1613 err
= intel_engine_init_common(engine
);
1617 GEM_BUG_ON(ring
->timeline
->hwsp_ggtt
!= engine
->status_page
.vma
);
1622 intel_ring_unpin(ring
);
1624 intel_ring_free(ring
);
1626 intel_engine_cleanup_common(engine
);
1630 void intel_engine_cleanup(struct intel_engine_cs
*engine
)
1632 struct drm_i915_private
*dev_priv
= engine
->i915
;
1634 WARN_ON(INTEL_GEN(dev_priv
) > 2 &&
1635 (I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
1637 intel_ring_unpin(engine
->buffer
);
1638 intel_ring_free(engine
->buffer
);
1640 if (engine
->cleanup
)
1641 engine
->cleanup(engine
);
1643 intel_engine_cleanup_common(engine
);
1645 dev_priv
->engine
[engine
->id
] = NULL
;
1649 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
)
1651 struct intel_engine_cs
*engine
;
1652 enum intel_engine_id id
;
1654 /* Restart from the beginning of the rings for convenience */
1655 for_each_engine(engine
, dev_priv
, id
)
1656 intel_ring_reset(engine
->buffer
, 0);
1659 static int load_pd_dir(struct i915_request
*rq
,
1660 const struct i915_hw_ppgtt
*ppgtt
)
1662 const struct intel_engine_cs
* const engine
= rq
->engine
;
1665 cs
= intel_ring_begin(rq
, 6);
1669 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1670 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine
));
1671 *cs
++ = PP_DIR_DCLV_2G
;
1673 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1674 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1675 *cs
++ = ppgtt
->pd
.base
.ggtt_offset
<< 10;
1677 intel_ring_advance(rq
, cs
);
1682 static int flush_pd_dir(struct i915_request
*rq
)
1684 const struct intel_engine_cs
* const engine
= rq
->engine
;
1687 cs
= intel_ring_begin(rq
, 4);
1691 /* Stall until the page table load is complete */
1692 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1693 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1694 *cs
++ = i915_scratch_offset(rq
->i915
);
1697 intel_ring_advance(rq
, cs
);
1701 static inline int mi_set_context(struct i915_request
*rq
, u32 flags
)
1703 struct drm_i915_private
*i915
= rq
->i915
;
1704 struct intel_engine_cs
*engine
= rq
->engine
;
1705 enum intel_engine_id id
;
1706 const int num_rings
=
1707 IS_HSW_GT1(i915
) ? RUNTIME_INFO(i915
)->num_rings
- 1 : 0;
1708 bool force_restore
= false;
1712 flags
|= MI_MM_SPACE_GTT
;
1713 if (IS_HASWELL(i915
))
1714 /* These flags are for resource streamer on HSW+ */
1715 flags
|= HSW_MI_RS_SAVE_STATE_EN
| HSW_MI_RS_RESTORE_STATE_EN
;
1717 flags
|= MI_SAVE_EXT_STATE_EN
| MI_RESTORE_EXT_STATE_EN
;
1720 if (IS_GEN(i915
, 7))
1721 len
+= 2 + (num_rings
? 4*num_rings
+ 6 : 0);
1722 if (flags
& MI_FORCE_RESTORE
) {
1723 GEM_BUG_ON(flags
& MI_RESTORE_INHIBIT
);
1724 flags
&= ~MI_FORCE_RESTORE
;
1725 force_restore
= true;
1729 cs
= intel_ring_begin(rq
, len
);
1733 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1734 if (IS_GEN(i915
, 7)) {
1735 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
1737 struct intel_engine_cs
*signaller
;
1739 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1740 for_each_engine(signaller
, i915
, id
) {
1741 if (signaller
== engine
)
1744 *cs
++ = i915_mmio_reg_offset(
1745 RING_PSMI_CTL(signaller
->mmio_base
));
1746 *cs
++ = _MASKED_BIT_ENABLE(
1747 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1752 if (force_restore
) {
1754 * The HW doesn't handle being told to restore the current
1755 * context very well. Quite often it likes goes to go off and
1756 * sulk, especially when it is meant to be reloading PP_DIR.
1757 * A very simple fix to force the reload is to simply switch
1758 * away from the current context and back again.
1760 * Note that the kernel_context will contain random state
1761 * following the INHIBIT_RESTORE. We accept this since we
1762 * never use the kernel_context state; it is merely a
1763 * placeholder we use to flush other contexts.
1765 *cs
++ = MI_SET_CONTEXT
;
1766 *cs
++ = i915_ggtt_offset(to_intel_context(i915
->kernel_context
,
1773 *cs
++ = MI_SET_CONTEXT
;
1774 *cs
++ = i915_ggtt_offset(rq
->hw_context
->state
) | flags
;
1776 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1777 * WaMiSetContext_Hang:snb,ivb,vlv
1781 if (IS_GEN(i915
, 7)) {
1783 struct intel_engine_cs
*signaller
;
1784 i915_reg_t last_reg
= {}; /* keep gcc quiet */
1786 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1787 for_each_engine(signaller
, i915
, id
) {
1788 if (signaller
== engine
)
1791 last_reg
= RING_PSMI_CTL(signaller
->mmio_base
);
1792 *cs
++ = i915_mmio_reg_offset(last_reg
);
1793 *cs
++ = _MASKED_BIT_DISABLE(
1794 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1797 /* Insert a delay before the next switch! */
1798 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1799 *cs
++ = i915_mmio_reg_offset(last_reg
);
1800 *cs
++ = i915_scratch_offset(rq
->i915
);
1803 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
1806 intel_ring_advance(rq
, cs
);
1811 static int remap_l3(struct i915_request
*rq
, int slice
)
1813 u32
*cs
, *remap_info
= rq
->i915
->l3_parity
.remap_info
[slice
];
1819 cs
= intel_ring_begin(rq
, GEN7_L3LOG_SIZE
/4 * 2 + 2);
1824 * Note: We do not worry about the concurrent register cacheline hang
1825 * here because no other code should access these registers other than
1826 * at initialization time.
1828 *cs
++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE
/4);
1829 for (i
= 0; i
< GEN7_L3LOG_SIZE
/4; i
++) {
1830 *cs
++ = i915_mmio_reg_offset(GEN7_L3LOG(slice
, i
));
1831 *cs
++ = remap_info
[i
];
1834 intel_ring_advance(rq
, cs
);
1839 static int switch_context(struct i915_request
*rq
)
1841 struct intel_engine_cs
*engine
= rq
->engine
;
1842 struct i915_gem_context
*ctx
= rq
->gem_context
;
1843 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
?: rq
->i915
->mm
.aliasing_ppgtt
;
1844 unsigned int unwind_mm
= 0;
1848 lockdep_assert_held(&rq
->i915
->drm
.struct_mutex
);
1849 GEM_BUG_ON(HAS_EXECLISTS(rq
->i915
));
1855 * Baytail takes a little more convincing that it really needs
1856 * to reload the PD between contexts. It is not just a little
1857 * longer, as adding more stalls after the load_pd_dir (i.e.
1858 * adding a long loop around flush_pd_dir) is not as effective
1859 * as reloading the PD umpteen times. 32 is derived from
1860 * experimentation (gem_exec_parallel/fds) and has no good
1864 if (engine
->id
== BCS
&& IS_VALLEYVIEW(engine
->i915
))
1868 ret
= load_pd_dir(rq
, ppgtt
);
1873 if (intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
) {
1874 unwind_mm
= intel_engine_flag(engine
);
1875 ppgtt
->pd_dirty_rings
&= ~unwind_mm
;
1876 hw_flags
= MI_FORCE_RESTORE
;
1880 if (rq
->hw_context
->state
) {
1881 GEM_BUG_ON(engine
->id
!= RCS
);
1884 * The kernel context(s) is treated as pure scratch and is not
1885 * expected to retain any state (as we sacrifice it during
1886 * suspend and on resume it may be corrupted). This is ok,
1887 * as nothing actually executes using the kernel context; it
1888 * is purely used for flushing user contexts.
1890 if (i915_gem_context_is_kernel(ctx
))
1891 hw_flags
= MI_RESTORE_INHIBIT
;
1893 ret
= mi_set_context(rq
, hw_flags
);
1899 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1903 ret
= flush_pd_dir(rq
);
1908 * Not only do we need a full barrier (post-sync write) after
1909 * invalidating the TLBs, but we need to wait a little bit
1910 * longer. Whether this is merely delaying us, or the
1911 * subsequent flush is a key part of serialising with the
1912 * post-sync op, this extra pass appears vital before a
1915 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1919 ret
= engine
->emit_flush(rq
, EMIT_FLUSH
);
1924 if (ctx
->remap_slice
) {
1925 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
1926 if (!(ctx
->remap_slice
& BIT(i
)))
1929 ret
= remap_l3(rq
, i
);
1934 ctx
->remap_slice
= 0;
1941 ppgtt
->pd_dirty_rings
|= unwind_mm
;
1946 static int ring_request_alloc(struct i915_request
*request
)
1950 GEM_BUG_ON(!request
->hw_context
->pin_count
);
1951 GEM_BUG_ON(request
->timeline
->has_initial_breadcrumb
);
1954 * Flush enough space to reduce the likelihood of waiting after
1955 * we start building the request - in which case we will just
1956 * have to repeat work.
1958 request
->reserved_space
+= LEGACY_REQUEST_SIZE
;
1960 ret
= switch_context(request
);
1964 /* Unconditionally invalidate GPU caches and TLBs. */
1965 ret
= request
->engine
->emit_flush(request
, EMIT_INVALIDATE
);
1969 request
->reserved_space
-= LEGACY_REQUEST_SIZE
;
1973 static noinline
int wait_for_space(struct intel_ring
*ring
, unsigned int bytes
)
1975 struct i915_request
*target
;
1978 lockdep_assert_held(&ring
->vma
->vm
->i915
->drm
.struct_mutex
);
1980 if (intel_ring_update_space(ring
) >= bytes
)
1983 GEM_BUG_ON(list_empty(&ring
->request_list
));
1984 list_for_each_entry(target
, &ring
->request_list
, ring_link
) {
1985 /* Would completion of this request free enough space? */
1986 if (bytes
<= __intel_ring_space(target
->postfix
,
1987 ring
->emit
, ring
->size
))
1991 if (WARN_ON(&target
->ring_link
== &ring
->request_list
))
1994 timeout
= i915_request_wait(target
,
1995 I915_WAIT_INTERRUPTIBLE
| I915_WAIT_LOCKED
,
1996 MAX_SCHEDULE_TIMEOUT
);
2000 i915_request_retire_upto(target
);
2002 intel_ring_update_space(ring
);
2003 GEM_BUG_ON(ring
->space
< bytes
);
2007 u32
*intel_ring_begin(struct i915_request
*rq
, unsigned int num_dwords
)
2009 struct intel_ring
*ring
= rq
->ring
;
2010 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
2011 const unsigned int bytes
= num_dwords
* sizeof(u32
);
2012 unsigned int need_wrap
= 0;
2013 unsigned int total_bytes
;
2016 /* Packets must be qword aligned. */
2017 GEM_BUG_ON(num_dwords
& 1);
2019 total_bytes
= bytes
+ rq
->reserved_space
;
2020 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
2022 if (unlikely(total_bytes
> remain_usable
)) {
2023 const int remain_actual
= ring
->size
- ring
->emit
;
2025 if (bytes
> remain_usable
) {
2027 * Not enough space for the basic request. So need to
2028 * flush out the remainder and then wait for
2031 total_bytes
+= remain_actual
;
2032 need_wrap
= remain_actual
| 1;
2035 * The base request will fit but the reserved space
2036 * falls off the end. So we don't need an immediate
2037 * wrap and only need to effectively wait for the
2038 * reserved size from the start of ringbuffer.
2040 total_bytes
= rq
->reserved_space
+ remain_actual
;
2044 if (unlikely(total_bytes
> ring
->space
)) {
2048 * Space is reserved in the ringbuffer for finalising the
2049 * request, as that cannot be allowed to fail. During request
2050 * finalisation, reserved_space is set to 0 to stop the
2051 * overallocation and the assumption is that then we never need
2052 * to wait (which has the risk of failing with EINTR).
2054 * See also i915_request_alloc() and i915_request_add().
2056 GEM_BUG_ON(!rq
->reserved_space
);
2058 ret
= wait_for_space(ring
, total_bytes
);
2060 return ERR_PTR(ret
);
2063 if (unlikely(need_wrap
)) {
2065 GEM_BUG_ON(need_wrap
> ring
->space
);
2066 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
2067 GEM_BUG_ON(!IS_ALIGNED(need_wrap
, sizeof(u64
)));
2069 /* Fill the tail with MI_NOOP */
2070 memset64(ring
->vaddr
+ ring
->emit
, 0, need_wrap
/ sizeof(u64
));
2071 ring
->space
-= need_wrap
;
2075 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
2076 GEM_BUG_ON(ring
->space
< bytes
);
2077 cs
= ring
->vaddr
+ ring
->emit
;
2078 GEM_DEBUG_EXEC(memset32(cs
, POISON_INUSE
, bytes
/ sizeof(*cs
)));
2079 ring
->emit
+= bytes
;
2080 ring
->space
-= bytes
;
2085 /* Align the ring tail to a cacheline boundary */
2086 int intel_ring_cacheline_align(struct i915_request
*rq
)
2091 num_dwords
= (rq
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(u32
);
2092 if (num_dwords
== 0)
2095 num_dwords
= CACHELINE_DWORDS
- num_dwords
;
2096 GEM_BUG_ON(num_dwords
& 1);
2098 cs
= intel_ring_begin(rq
, num_dwords
);
2102 memset64(cs
, (u64
)MI_NOOP
<< 32 | MI_NOOP
, num_dwords
/ 2);
2103 intel_ring_advance(rq
, cs
);
2105 GEM_BUG_ON(rq
->ring
->emit
& (CACHELINE_BYTES
- 1));
2109 static void gen6_bsd_submit_request(struct i915_request
*request
)
2111 struct drm_i915_private
*dev_priv
= request
->i915
;
2113 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2115 /* Every tail move must follow the sequence below */
2117 /* Disable notification that the ring is IDLE. The GT
2118 * will then assume that it is busy and bring it out of rc6.
2120 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
2121 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
2123 /* Clear the context id. Here be magic! */
2124 I915_WRITE64_FW(GEN6_BSD_RNCID
, 0x0);
2126 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2127 if (__intel_wait_for_register_fw(dev_priv
,
2128 GEN6_BSD_SLEEP_PSMI_CONTROL
,
2129 GEN6_BSD_SLEEP_INDICATOR
,
2132 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2134 /* Now that the ring is fully powered up, update the tail */
2135 i9xx_submit_request(request
);
2137 /* Let the ring send IDLE messages to the GT again,
2138 * and so let it sleep to conserve power when idle.
2140 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
2141 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
2143 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2146 static int mi_flush_dw(struct i915_request
*rq
, u32 flags
)
2150 cs
= intel_ring_begin(rq
, 4);
2157 * We always require a command barrier so that subsequent
2158 * commands, such as breadcrumb interrupts, are strictly ordered
2159 * wrt the contents of the write cache being flushed to memory
2160 * (and thus being coherent from the CPU).
2162 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
2165 * Bspec vol 1c.3 - blitter engine command streamer:
2166 * "If ENABLED, all TLBs will be invalidated once the flush
2167 * operation is complete. This bit is only valid when the
2168 * Post-Sync Operation field is a value of 1h or 3h."
2173 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
2177 intel_ring_advance(rq
, cs
);
2182 static int gen6_flush_dw(struct i915_request
*rq
, u32 mode
, u32 invflags
)
2184 return mi_flush_dw(rq
, mode
& EMIT_INVALIDATE
? invflags
: 0);
2187 static int gen6_bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
2189 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
| MI_INVALIDATE_BSD
);
2193 hsw_emit_bb_start(struct i915_request
*rq
,
2194 u64 offset
, u32 len
,
2195 unsigned int dispatch_flags
)
2199 cs
= intel_ring_begin(rq
, 2);
2203 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2204 0 : MI_BATCH_PPGTT_HSW
| MI_BATCH_NON_SECURE_HSW
);
2205 /* bit0-7 is the length on GEN6+ */
2207 intel_ring_advance(rq
, cs
);
2213 gen6_emit_bb_start(struct i915_request
*rq
,
2214 u64 offset
, u32 len
,
2215 unsigned int dispatch_flags
)
2219 cs
= intel_ring_begin(rq
, 2);
2223 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2224 0 : MI_BATCH_NON_SECURE_I965
);
2225 /* bit0-7 is the length on GEN6+ */
2227 intel_ring_advance(rq
, cs
);
2232 /* Blitter support (SandyBridge+) */
2234 static int gen6_ring_flush(struct i915_request
*rq
, u32 mode
)
2236 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
);
2239 static void intel_ring_init_irq(struct drm_i915_private
*dev_priv
,
2240 struct intel_engine_cs
*engine
)
2242 if (INTEL_GEN(dev_priv
) >= 6) {
2243 engine
->irq_enable
= gen6_irq_enable
;
2244 engine
->irq_disable
= gen6_irq_disable
;
2245 } else if (INTEL_GEN(dev_priv
) >= 5) {
2246 engine
->irq_enable
= gen5_irq_enable
;
2247 engine
->irq_disable
= gen5_irq_disable
;
2248 } else if (INTEL_GEN(dev_priv
) >= 3) {
2249 engine
->irq_enable
= i9xx_irq_enable
;
2250 engine
->irq_disable
= i9xx_irq_disable
;
2252 engine
->irq_enable
= i8xx_irq_enable
;
2253 engine
->irq_disable
= i8xx_irq_disable
;
2257 static void i9xx_set_default_submission(struct intel_engine_cs
*engine
)
2259 engine
->submit_request
= i9xx_submit_request
;
2260 engine
->cancel_requests
= cancel_requests
;
2262 engine
->park
= NULL
;
2263 engine
->unpark
= NULL
;
2266 static void gen6_bsd_set_default_submission(struct intel_engine_cs
*engine
)
2268 i9xx_set_default_submission(engine
);
2269 engine
->submit_request
= gen6_bsd_submit_request
;
2272 static void intel_ring_default_vfuncs(struct drm_i915_private
*dev_priv
,
2273 struct intel_engine_cs
*engine
)
2275 /* gen8+ are only supported with execlists */
2276 GEM_BUG_ON(INTEL_GEN(dev_priv
) >= 8);
2278 intel_ring_init_irq(dev_priv
, engine
);
2280 engine
->init_hw
= init_ring_common
;
2281 engine
->reset
.prepare
= reset_prepare
;
2282 engine
->reset
.reset
= reset_ring
;
2283 engine
->reset
.finish
= reset_finish
;
2285 engine
->context_pin
= intel_ring_context_pin
;
2286 engine
->request_alloc
= ring_request_alloc
;
2289 * Using a global execution timeline; the previous final breadcrumb is
2290 * equivalent to our next initial bread so we can elide
2291 * engine->emit_init_breadcrumb().
2293 engine
->emit_fini_breadcrumb
= i9xx_emit_breadcrumb
;
2294 if (IS_GEN(dev_priv
, 5))
2295 engine
->emit_fini_breadcrumb
= gen5_emit_breadcrumb
;
2297 engine
->set_default_submission
= i9xx_set_default_submission
;
2299 if (INTEL_GEN(dev_priv
) >= 6)
2300 engine
->emit_bb_start
= gen6_emit_bb_start
;
2301 else if (INTEL_GEN(dev_priv
) >= 4)
2302 engine
->emit_bb_start
= i965_emit_bb_start
;
2303 else if (IS_I830(dev_priv
) || IS_I845G(dev_priv
))
2304 engine
->emit_bb_start
= i830_emit_bb_start
;
2306 engine
->emit_bb_start
= i915_emit_bb_start
;
2309 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
)
2311 struct drm_i915_private
*dev_priv
= engine
->i915
;
2314 intel_ring_default_vfuncs(dev_priv
, engine
);
2316 if (HAS_L3_DPF(dev_priv
))
2317 engine
->irq_keep_mask
= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2319 engine
->irq_enable_mask
= GT_RENDER_USER_INTERRUPT
;
2321 if (INTEL_GEN(dev_priv
) >= 7) {
2322 engine
->init_context
= intel_rcs_ctx_init
;
2323 engine
->emit_flush
= gen7_render_ring_flush
;
2324 engine
->emit_fini_breadcrumb
= gen7_rcs_emit_breadcrumb
;
2325 } else if (IS_GEN(dev_priv
, 6)) {
2326 engine
->init_context
= intel_rcs_ctx_init
;
2327 engine
->emit_flush
= gen6_render_ring_flush
;
2328 engine
->emit_fini_breadcrumb
= gen6_rcs_emit_breadcrumb
;
2329 } else if (IS_GEN(dev_priv
, 5)) {
2330 engine
->emit_flush
= gen4_render_ring_flush
;
2332 if (INTEL_GEN(dev_priv
) < 4)
2333 engine
->emit_flush
= gen2_render_ring_flush
;
2335 engine
->emit_flush
= gen4_render_ring_flush
;
2336 engine
->irq_enable_mask
= I915_USER_INTERRUPT
;
2339 if (IS_HASWELL(dev_priv
))
2340 engine
->emit_bb_start
= hsw_emit_bb_start
;
2342 engine
->init_hw
= init_render_ring
;
2344 ret
= intel_init_ring_buffer(engine
);
2351 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
)
2353 struct drm_i915_private
*dev_priv
= engine
->i915
;
2355 intel_ring_default_vfuncs(dev_priv
, engine
);
2357 if (INTEL_GEN(dev_priv
) >= 6) {
2358 /* gen6 bsd needs a special wa for tail updates */
2359 if (IS_GEN(dev_priv
, 6))
2360 engine
->set_default_submission
= gen6_bsd_set_default_submission
;
2361 engine
->emit_flush
= gen6_bsd_ring_flush
;
2362 engine
->irq_enable_mask
= GT_BSD_USER_INTERRUPT
;
2364 if (IS_GEN(dev_priv
, 6))
2365 engine
->emit_fini_breadcrumb
= gen6_xcs_emit_breadcrumb
;
2367 engine
->emit_fini_breadcrumb
= gen7_xcs_emit_breadcrumb
;
2369 engine
->emit_flush
= bsd_ring_flush
;
2370 if (IS_GEN(dev_priv
, 5))
2371 engine
->irq_enable_mask
= ILK_BSD_USER_INTERRUPT
;
2373 engine
->irq_enable_mask
= I915_BSD_USER_INTERRUPT
;
2376 return intel_init_ring_buffer(engine
);
2379 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
)
2381 struct drm_i915_private
*dev_priv
= engine
->i915
;
2383 GEM_BUG_ON(INTEL_GEN(dev_priv
) < 6);
2385 intel_ring_default_vfuncs(dev_priv
, engine
);
2387 engine
->emit_flush
= gen6_ring_flush
;
2388 engine
->irq_enable_mask
= GT_BLT_USER_INTERRUPT
;
2390 if (IS_GEN(dev_priv
, 6))
2391 engine
->emit_fini_breadcrumb
= gen6_xcs_emit_breadcrumb
;
2393 engine
->emit_fini_breadcrumb
= gen7_xcs_emit_breadcrumb
;
2395 return intel_init_ring_buffer(engine
);
2398 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
)
2400 struct drm_i915_private
*dev_priv
= engine
->i915
;
2402 GEM_BUG_ON(INTEL_GEN(dev_priv
) < 7);
2404 intel_ring_default_vfuncs(dev_priv
, engine
);
2406 engine
->emit_flush
= gen6_ring_flush
;
2407 engine
->irq_enable_mask
= PM_VEBOX_USER_INTERRUPT
;
2408 engine
->irq_enable
= hsw_vebox_irq_enable
;
2409 engine
->irq_disable
= hsw_vebox_irq_disable
;
2411 engine
->emit_fini_breadcrumb
= gen7_xcs_emit_breadcrumb
;
2413 return intel_init_ring_buffer(engine
);