2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
36 #include "i915_gem_render_state.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static unsigned int __intel_ring_space(unsigned int head
,
51 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
52 * same cacheline, the Head Pointer must not be greater than the Tail
55 GEM_BUG_ON(!is_power_of_2(size
));
56 return (head
- tail
- CACHELINE_BYTES
) & (size
- 1);
59 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
63 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
70 gen2_render_ring_flush(struct i915_request
*rq
, u32 mode
)
72 unsigned int num_store_dw
;
77 if (mode
& EMIT_INVALIDATE
)
79 if (mode
& EMIT_FLUSH
)
82 cs
= intel_ring_begin(rq
, 2 + 3 * num_store_dw
);
87 while (num_store_dw
--) {
88 *cs
++ = MI_STORE_DWORD_IMM
| MI_MEM_VIRTUAL
;
89 *cs
++ = i915_scratch_offset(rq
->i915
);
92 *cs
++ = MI_FLUSH
| MI_NO_WRITE_FLUSH
;
94 intel_ring_advance(rq
, cs
);
100 gen4_render_ring_flush(struct i915_request
*rq
, u32 mode
)
108 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
109 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
110 * also flushed at 2d versus 3d pipeline switches.
114 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
115 * MI_READ_FLUSH is set, and is always flushed on 965.
117 * I915_GEM_DOMAIN_COMMAND may not exist?
119 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
120 * invalidated when MI_EXE_FLUSH is set.
122 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
123 * invalidated with every MI_FLUSH.
127 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
128 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
129 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
130 * are flushed at any MI_FLUSH.
134 if (mode
& EMIT_INVALIDATE
) {
136 if (IS_G4X(rq
->i915
) || IS_GEN5(rq
->i915
))
137 cmd
|= MI_INVALIDATE_ISP
;
141 if (mode
& EMIT_INVALIDATE
)
144 cs
= intel_ring_begin(rq
, i
);
151 * A random delay to let the CS invalidate take effect? Without this
152 * delay, the GPU relocation path fails as the CS does not see
153 * the updated contents. Just as important, if we apply the flushes
154 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
155 * write and before the invalidate on the next batch), the relocations
156 * still fail. This implies that is a delay following invalidation
157 * that is required to reset the caches as opposed to a delay to
158 * ensure the memory is written.
160 if (mode
& EMIT_INVALIDATE
) {
161 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
162 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
166 for (i
= 0; i
< 12; i
++)
169 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
170 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
177 intel_ring_advance(rq
, cs
);
183 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
184 * implementing two workarounds on gen6. From section 1.4.7.1
185 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
187 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
188 * produced by non-pipelined state commands), software needs to first
189 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
192 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
193 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
195 * And the workaround for these two requires this workaround first:
197 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
198 * BEFORE the pipe-control with a post-sync op and no write-cache
201 * And this last workaround is tricky because of the requirements on
202 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
205 * "1 of the following must also be set:
206 * - Render Target Cache Flush Enable ([12] of DW1)
207 * - Depth Cache Flush Enable ([0] of DW1)
208 * - Stall at Pixel Scoreboard ([1] of DW1)
209 * - Depth Stall ([13] of DW1)
210 * - Post-Sync Operation ([13] of DW1)
211 * - Notify Enable ([8] of DW1)"
213 * The cache flushes require the workaround flush that triggered this
214 * one, so we can't use it. Depth stall would trigger the same.
215 * Post-sync nonzero is what triggered this second workaround, so we
216 * can't use that one either. Notify enable is IRQs, which aren't
217 * really our business. That leaves only stall at scoreboard.
220 intel_emit_post_sync_nonzero_flush(struct i915_request
*rq
)
222 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
225 cs
= intel_ring_begin(rq
, 6);
229 *cs
++ = GFX_OP_PIPE_CONTROL(5);
230 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
231 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
232 *cs
++ = 0; /* low dword */
233 *cs
++ = 0; /* high dword */
235 intel_ring_advance(rq
, cs
);
237 cs
= intel_ring_begin(rq
, 6);
241 *cs
++ = GFX_OP_PIPE_CONTROL(5);
242 *cs
++ = PIPE_CONTROL_QW_WRITE
;
243 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
247 intel_ring_advance(rq
, cs
);
253 gen6_render_ring_flush(struct i915_request
*rq
, u32 mode
)
255 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
259 /* Force SNB workarounds for PIPE_CONTROL flushes */
260 ret
= intel_emit_post_sync_nonzero_flush(rq
);
264 /* Just flush everything. Experiments have shown that reducing the
265 * number of bits based on the write domains has little performance
268 if (mode
& EMIT_FLUSH
) {
269 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
270 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
272 * Ensure that any following seqno writes only happen
273 * when the render cache is indeed flushed.
275 flags
|= PIPE_CONTROL_CS_STALL
;
277 if (mode
& EMIT_INVALIDATE
) {
278 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
279 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
280 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
281 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
282 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
283 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
285 * TLB invalidate requires a post-sync write.
287 flags
|= PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
290 cs
= intel_ring_begin(rq
, 4);
294 *cs
++ = GFX_OP_PIPE_CONTROL(4);
296 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
298 intel_ring_advance(rq
, cs
);
304 gen7_render_ring_cs_stall_wa(struct i915_request
*rq
)
308 cs
= intel_ring_begin(rq
, 4);
312 *cs
++ = GFX_OP_PIPE_CONTROL(4);
313 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
316 intel_ring_advance(rq
, cs
);
322 gen7_render_ring_flush(struct i915_request
*rq
, u32 mode
)
324 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
328 * Ensure that any following seqno writes only happen when the render
329 * cache is indeed flushed.
331 * Workaround: 4th PIPE_CONTROL command (except the ones with only
332 * read-cache invalidate bits set) must have the CS_STALL bit set. We
333 * don't try to be clever and just set it unconditionally.
335 flags
|= PIPE_CONTROL_CS_STALL
;
337 /* Just flush everything. Experiments have shown that reducing the
338 * number of bits based on the write domains has little performance
341 if (mode
& EMIT_FLUSH
) {
342 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
343 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
344 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
345 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
347 if (mode
& EMIT_INVALIDATE
) {
348 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
349 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
350 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
351 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
352 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
353 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
354 flags
|= PIPE_CONTROL_MEDIA_STATE_CLEAR
;
356 * TLB invalidate requires a post-sync write.
358 flags
|= PIPE_CONTROL_QW_WRITE
;
359 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
361 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
363 /* Workaround: we must issue a pipe_control with CS-stall bit
364 * set before a pipe_control command that has the state cache
365 * invalidate bit set. */
366 gen7_render_ring_cs_stall_wa(rq
);
369 cs
= intel_ring_begin(rq
, 4);
373 *cs
++ = GFX_OP_PIPE_CONTROL(4);
375 *cs
++ = scratch_addr
;
377 intel_ring_advance(rq
, cs
);
382 static void ring_setup_phys_status_page(struct intel_engine_cs
*engine
)
384 struct drm_i915_private
*dev_priv
= engine
->i915
;
385 struct page
*page
= virt_to_page(engine
->status_page
.page_addr
);
386 phys_addr_t phys
= PFN_PHYS(page_to_pfn(page
));
389 addr
= lower_32_bits(phys
);
390 if (INTEL_GEN(dev_priv
) >= 4)
391 addr
|= (phys
>> 28) & 0xf0;
393 I915_WRITE(HWS_PGA
, addr
);
396 static void intel_ring_setup_status_page(struct intel_engine_cs
*engine
)
398 struct drm_i915_private
*dev_priv
= engine
->i915
;
401 /* The ring status page addresses are no longer next to the rest of
402 * the ring registers as of gen7.
404 if (IS_GEN7(dev_priv
)) {
405 switch (engine
->id
) {
407 * No more rings exist on Gen7. Default case is only to shut up
408 * gcc switch check warning.
411 GEM_BUG_ON(engine
->id
);
413 mmio
= RENDER_HWS_PGA_GEN7
;
416 mmio
= BLT_HWS_PGA_GEN7
;
419 mmio
= BSD_HWS_PGA_GEN7
;
422 mmio
= VEBOX_HWS_PGA_GEN7
;
425 } else if (IS_GEN6(dev_priv
)) {
426 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
428 mmio
= RING_HWS_PGA(engine
->mmio_base
);
431 if (INTEL_GEN(dev_priv
) >= 6) {
435 * Keep the render interrupt unmasked as this papers over
436 * lost interrupts following a reset.
438 if (engine
->id
== RCS
)
441 I915_WRITE(RING_HWSTAM(engine
->mmio_base
), mask
);
444 I915_WRITE(mmio
, engine
->status_page
.ggtt_offset
);
447 /* Flush the TLB for this page */
448 if (IS_GEN(dev_priv
, 6, 7)) {
449 i915_reg_t reg
= RING_INSTPM(engine
->mmio_base
);
451 /* ring should be idle before issuing a sync flush*/
452 WARN_ON((I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
455 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE
|
457 if (intel_wait_for_register(dev_priv
,
458 reg
, INSTPM_SYNC_FLUSH
, 0,
460 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
465 static bool stop_ring(struct intel_engine_cs
*engine
)
467 struct drm_i915_private
*dev_priv
= engine
->i915
;
469 if (INTEL_GEN(dev_priv
) > 2) {
470 I915_WRITE_MODE(engine
, _MASKED_BIT_ENABLE(STOP_RING
));
471 if (intel_wait_for_register(dev_priv
,
472 RING_MI_MODE(engine
->mmio_base
),
476 DRM_ERROR("%s : timed out trying to stop ring\n",
478 /* Sometimes we observe that the idle flag is not
479 * set even though the ring is empty. So double
480 * check before giving up.
482 if (I915_READ_HEAD(engine
) != I915_READ_TAIL(engine
))
487 I915_WRITE_HEAD(engine
, I915_READ_TAIL(engine
));
489 I915_WRITE_HEAD(engine
, 0);
490 I915_WRITE_TAIL(engine
, 0);
492 /* The ring must be empty before it is disabled */
493 I915_WRITE_CTL(engine
, 0);
495 return (I915_READ_HEAD(engine
) & HEAD_ADDR
) == 0;
498 static int init_ring_common(struct intel_engine_cs
*engine
)
500 struct drm_i915_private
*dev_priv
= engine
->i915
;
501 struct intel_ring
*ring
= engine
->buffer
;
504 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
506 if (!stop_ring(engine
)) {
507 /* G45 ring initialization often fails to reset head to zero */
508 DRM_DEBUG_DRIVER("%s head not reset to zero "
509 "ctl %08x head %08x tail %08x start %08x\n",
511 I915_READ_CTL(engine
),
512 I915_READ_HEAD(engine
),
513 I915_READ_TAIL(engine
),
514 I915_READ_START(engine
));
516 if (!stop_ring(engine
)) {
517 DRM_ERROR("failed to set %s head to zero "
518 "ctl %08x head %08x tail %08x start %08x\n",
520 I915_READ_CTL(engine
),
521 I915_READ_HEAD(engine
),
522 I915_READ_TAIL(engine
),
523 I915_READ_START(engine
));
529 if (HWS_NEEDS_PHYSICAL(dev_priv
))
530 ring_setup_phys_status_page(engine
);
532 intel_ring_setup_status_page(engine
);
534 intel_engine_reset_breadcrumbs(engine
);
536 if (HAS_LEGACY_SEMAPHORES(engine
->i915
)) {
537 I915_WRITE(RING_SYNC_0(engine
->mmio_base
), 0);
538 I915_WRITE(RING_SYNC_1(engine
->mmio_base
), 0);
539 if (HAS_VEBOX(dev_priv
))
540 I915_WRITE(RING_SYNC_2(engine
->mmio_base
), 0);
543 /* Enforce ordering by reading HEAD register back */
544 I915_READ_HEAD(engine
);
546 /* Initialize the ring. This must happen _after_ we've cleared the ring
547 * registers with the above sequence (the readback of the HEAD registers
548 * also enforces ordering), otherwise the hw might lose the new ring
549 * register values. */
550 I915_WRITE_START(engine
, i915_ggtt_offset(ring
->vma
));
552 /* WaClearRingBufHeadRegAtInit:ctg,elk */
553 if (I915_READ_HEAD(engine
))
554 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
555 engine
->name
, I915_READ_HEAD(engine
));
557 /* Check that the ring offsets point within the ring! */
558 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->head
));
559 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->tail
));
560 intel_ring_update_space(ring
);
562 /* First wake the ring up to an empty/idle ring */
563 I915_WRITE_HEAD(engine
, ring
->head
);
564 I915_WRITE_TAIL(engine
, ring
->head
);
565 (void)I915_READ_TAIL(engine
);
567 I915_WRITE_CTL(engine
, RING_CTL_SIZE(ring
->size
) | RING_VALID
);
569 /* If the head is still not zero, the ring is dead */
570 if (intel_wait_for_register(dev_priv
, RING_CTL(engine
->mmio_base
),
571 RING_VALID
, RING_VALID
,
573 DRM_ERROR("%s initialization failed "
574 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
576 I915_READ_CTL(engine
),
577 I915_READ_CTL(engine
) & RING_VALID
,
578 I915_READ_HEAD(engine
), ring
->head
,
579 I915_READ_TAIL(engine
), ring
->tail
,
580 I915_READ_START(engine
),
581 i915_ggtt_offset(ring
->vma
));
586 if (INTEL_GEN(dev_priv
) > 2)
587 I915_WRITE_MODE(engine
, _MASKED_BIT_DISABLE(STOP_RING
));
589 /* Now awake, let it get started */
590 if (ring
->tail
!= ring
->head
) {
591 I915_WRITE_TAIL(engine
, ring
->tail
);
592 (void)I915_READ_TAIL(engine
);
595 /* Papering over lost _interrupts_ immediately following the restart */
596 intel_engine_wakeup(engine
);
598 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
603 static struct i915_request
*reset_prepare(struct intel_engine_cs
*engine
)
605 intel_engine_stop_cs(engine
);
607 if (engine
->irq_seqno_barrier
)
608 engine
->irq_seqno_barrier(engine
);
610 return i915_gem_find_active_request(engine
);
613 static void skip_request(struct i915_request
*rq
)
615 void *vaddr
= rq
->ring
->vaddr
;
619 if (rq
->postfix
< head
) {
620 memset32(vaddr
+ head
, MI_NOOP
,
621 (rq
->ring
->size
- head
) / sizeof(u32
));
624 memset32(vaddr
+ head
, MI_NOOP
, (rq
->postfix
- head
) / sizeof(u32
));
627 static void reset_ring(struct intel_engine_cs
*engine
, struct i915_request
*rq
)
629 GEM_TRACE("%s request global=%d, current=%d\n",
630 engine
->name
, rq
? rq
->global_seqno
: 0,
631 intel_engine_get_seqno(engine
));
634 * Try to restore the logical GPU state to match the continuation
635 * of the request queue. If we skip the context/PD restore, then
636 * the next request may try to execute assuming that its context
637 * is valid and loaded on the GPU and so may try to access invalid
638 * memory, prompting repeated GPU hangs.
640 * If the request was guilty, we still restore the logical state
641 * in case the next request requires it (e.g. the aliasing ppgtt),
642 * but skip over the hung batch.
644 * If the request was innocent, we try to replay the request with
645 * the restored context.
648 /* If the rq hung, jump to its breadcrumb and skip the batch */
649 rq
->ring
->head
= intel_ring_wrap(rq
->ring
, rq
->head
);
650 if (rq
->fence
.error
== -EIO
)
655 static void reset_finish(struct intel_engine_cs
*engine
)
659 static int intel_rcs_ctx_init(struct i915_request
*rq
)
663 ret
= intel_engine_emit_ctx_wa(rq
);
667 ret
= i915_gem_render_state_emit(rq
);
674 static int init_render_ring(struct intel_engine_cs
*engine
)
676 struct drm_i915_private
*dev_priv
= engine
->i915
;
677 int ret
= init_ring_common(engine
);
681 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
682 if (IS_GEN(dev_priv
, 4, 6))
683 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH
));
685 /* We need to disable the AsyncFlip performance optimisations in order
686 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
687 * programmed to '1' on all products.
689 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
691 if (IS_GEN(dev_priv
, 6, 7))
692 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE
));
694 /* Required for the hardware to program scanline values for waiting */
695 /* WaEnableFlushTlbInvalidationMode:snb */
696 if (IS_GEN6(dev_priv
))
698 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
));
700 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
701 if (IS_GEN7(dev_priv
))
702 I915_WRITE(GFX_MODE_GEN7
,
703 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
) |
704 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE
));
706 if (IS_GEN6(dev_priv
)) {
707 /* From the Sandybridge PRM, volume 1 part 3, page 24:
708 * "If this bit is set, STCunit will have LRA as replacement
709 * policy. [...] This bit must be reset. LRA replacement
710 * policy is not supported."
712 I915_WRITE(CACHE_MODE_0
,
713 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
716 if (IS_GEN(dev_priv
, 6, 7))
717 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING
));
719 if (INTEL_GEN(dev_priv
) >= 6)
720 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
725 static u32
*gen6_signal(struct i915_request
*rq
, u32
*cs
)
727 struct drm_i915_private
*dev_priv
= rq
->i915
;
728 struct intel_engine_cs
*engine
;
729 enum intel_engine_id id
;
732 for_each_engine(engine
, dev_priv
, id
) {
735 if (!(BIT(engine
->hw_id
) & GEN6_SEMAPHORES_MASK
))
738 mbox_reg
= rq
->engine
->semaphore
.mbox
.signal
[engine
->hw_id
];
739 if (i915_mmio_reg_valid(mbox_reg
)) {
740 *cs
++ = MI_LOAD_REGISTER_IMM(1);
741 *cs
++ = i915_mmio_reg_offset(mbox_reg
);
742 *cs
++ = rq
->global_seqno
;
752 static void cancel_requests(struct intel_engine_cs
*engine
)
754 struct i915_request
*request
;
757 spin_lock_irqsave(&engine
->timeline
.lock
, flags
);
759 /* Mark all submitted requests as skipped. */
760 list_for_each_entry(request
, &engine
->timeline
.requests
, link
) {
761 GEM_BUG_ON(!request
->global_seqno
);
763 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
,
764 &request
->fence
.flags
))
767 dma_fence_set_error(&request
->fence
, -EIO
);
770 intel_write_status_page(engine
,
772 intel_engine_last_submit(engine
));
774 /* Remaining _unready_ requests will be nop'ed when submitted */
776 spin_unlock_irqrestore(&engine
->timeline
.lock
, flags
);
779 static void i9xx_submit_request(struct i915_request
*request
)
781 struct drm_i915_private
*dev_priv
= request
->i915
;
783 i915_request_submit(request
);
785 I915_WRITE_TAIL(request
->engine
,
786 intel_ring_set_tail(request
->ring
, request
->tail
));
789 static void i9xx_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
791 *cs
++ = MI_STORE_DWORD_INDEX
;
792 *cs
++ = I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
;
793 *cs
++ = rq
->global_seqno
;
794 *cs
++ = MI_USER_INTERRUPT
;
796 rq
->tail
= intel_ring_offset(rq
, cs
);
797 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
800 static const int i9xx_emit_breadcrumb_sz
= 4;
802 static void gen6_sema_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
804 return i9xx_emit_breadcrumb(rq
, rq
->engine
->semaphore
.signal(rq
, cs
));
808 gen6_ring_sync_to(struct i915_request
*rq
, struct i915_request
*signal
)
810 u32 dw1
= MI_SEMAPHORE_MBOX
|
811 MI_SEMAPHORE_COMPARE
|
812 MI_SEMAPHORE_REGISTER
;
813 u32 wait_mbox
= signal
->engine
->semaphore
.mbox
.wait
[rq
->engine
->hw_id
];
816 WARN_ON(wait_mbox
== MI_SEMAPHORE_SYNC_INVALID
);
818 cs
= intel_ring_begin(rq
, 4);
822 *cs
++ = dw1
| wait_mbox
;
823 /* Throughout all of the GEM code, seqno passed implies our current
824 * seqno is >= the last seqno executed. However for hardware the
825 * comparison is strictly greater than.
827 *cs
++ = signal
->global_seqno
- 1;
830 intel_ring_advance(rq
, cs
);
836 gen5_seqno_barrier(struct intel_engine_cs
*engine
)
838 /* MI_STORE are internally buffered by the GPU and not flushed
839 * either by MI_FLUSH or SyncFlush or any other combination of
842 * "Only the submission of the store operation is guaranteed.
843 * The write result will be complete (coherent) some time later
844 * (this is practically a finite period but there is no guaranteed
847 * Empirically, we observe that we need a delay of at least 75us to
848 * be sure that the seqno write is visible by the CPU.
850 usleep_range(125, 250);
854 gen6_seqno_barrier(struct intel_engine_cs
*engine
)
856 struct drm_i915_private
*dev_priv
= engine
->i915
;
858 /* Workaround to force correct ordering between irq and seqno writes on
859 * ivb (and maybe also on snb) by reading from a CS register (like
860 * ACTHD) before reading the status page.
862 * Note that this effectively stalls the read by the time it takes to
863 * do a memory transaction, which more or less ensures that the write
864 * from the GPU has sufficient time to invalidate the CPU cacheline.
865 * Alternatively we could delay the interrupt from the CS ring to give
866 * the write time to land, but that would incur a delay after every
867 * batch i.e. much more frequent than a delay when waiting for the
868 * interrupt (with the same net latency).
870 * Also note that to prevent whole machine hangs on gen7, we have to
871 * take the spinlock to guard against concurrent cacheline access.
873 spin_lock_irq(&dev_priv
->uncore
.lock
);
874 POSTING_READ_FW(RING_ACTHD(engine
->mmio_base
));
875 spin_unlock_irq(&dev_priv
->uncore
.lock
);
879 gen5_irq_enable(struct intel_engine_cs
*engine
)
881 gen5_enable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
885 gen5_irq_disable(struct intel_engine_cs
*engine
)
887 gen5_disable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
891 i9xx_irq_enable(struct intel_engine_cs
*engine
)
893 struct drm_i915_private
*dev_priv
= engine
->i915
;
895 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
896 I915_WRITE(IMR
, dev_priv
->irq_mask
);
897 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
901 i9xx_irq_disable(struct intel_engine_cs
*engine
)
903 struct drm_i915_private
*dev_priv
= engine
->i915
;
905 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
906 I915_WRITE(IMR
, dev_priv
->irq_mask
);
910 i8xx_irq_enable(struct intel_engine_cs
*engine
)
912 struct drm_i915_private
*dev_priv
= engine
->i915
;
914 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
915 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
916 POSTING_READ16(RING_IMR(engine
->mmio_base
));
920 i8xx_irq_disable(struct intel_engine_cs
*engine
)
922 struct drm_i915_private
*dev_priv
= engine
->i915
;
924 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
925 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
929 bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
933 cs
= intel_ring_begin(rq
, 2);
939 intel_ring_advance(rq
, cs
);
944 gen6_irq_enable(struct intel_engine_cs
*engine
)
946 struct drm_i915_private
*dev_priv
= engine
->i915
;
948 I915_WRITE_IMR(engine
,
949 ~(engine
->irq_enable_mask
|
950 engine
->irq_keep_mask
));
951 gen5_enable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
955 gen6_irq_disable(struct intel_engine_cs
*engine
)
957 struct drm_i915_private
*dev_priv
= engine
->i915
;
959 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
960 gen5_disable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
964 hsw_vebox_irq_enable(struct intel_engine_cs
*engine
)
966 struct drm_i915_private
*dev_priv
= engine
->i915
;
968 I915_WRITE_IMR(engine
, ~engine
->irq_enable_mask
);
969 gen6_unmask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
973 hsw_vebox_irq_disable(struct intel_engine_cs
*engine
)
975 struct drm_i915_private
*dev_priv
= engine
->i915
;
977 I915_WRITE_IMR(engine
, ~0);
978 gen6_mask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
982 i965_emit_bb_start(struct i915_request
*rq
,
983 u64 offset
, u32 length
,
984 unsigned int dispatch_flags
)
988 cs
= intel_ring_begin(rq
, 2);
992 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| (dispatch_flags
&
993 I915_DISPATCH_SECURE
? 0 : MI_BATCH_NON_SECURE_I965
);
995 intel_ring_advance(rq
, cs
);
1000 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1001 #define I830_BATCH_LIMIT SZ_256K
1002 #define I830_TLB_ENTRIES (2)
1003 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1005 i830_emit_bb_start(struct i915_request
*rq
,
1006 u64 offset
, u32 len
,
1007 unsigned int dispatch_flags
)
1009 u32
*cs
, cs_offset
= i915_scratch_offset(rq
->i915
);
1011 GEM_BUG_ON(rq
->i915
->gt
.scratch
->size
< I830_WA_SIZE
);
1013 cs
= intel_ring_begin(rq
, 6);
1017 /* Evict the invalid PTE TLBs */
1018 *cs
++ = COLOR_BLT_CMD
| BLT_WRITE_RGBA
;
1019 *cs
++ = BLT_DEPTH_32
| BLT_ROP_COLOR_COPY
| 4096;
1020 *cs
++ = I830_TLB_ENTRIES
<< 16 | 4; /* load each page */
1024 intel_ring_advance(rq
, cs
);
1026 if ((dispatch_flags
& I915_DISPATCH_PINNED
) == 0) {
1027 if (len
> I830_BATCH_LIMIT
)
1030 cs
= intel_ring_begin(rq
, 6 + 2);
1034 /* Blit the batch (which has now all relocs applied) to the
1035 * stable batch scratch bo area (so that the CS never
1036 * stumbles over its tlb invalidation bug) ...
1038 *cs
++ = SRC_COPY_BLT_CMD
| BLT_WRITE_RGBA
;
1039 *cs
++ = BLT_DEPTH_32
| BLT_ROP_SRC_COPY
| 4096;
1040 *cs
++ = DIV_ROUND_UP(len
, 4096) << 16 | 4096;
1047 intel_ring_advance(rq
, cs
);
1049 /* ... and execute it. */
1053 cs
= intel_ring_begin(rq
, 2);
1057 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1058 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1059 MI_BATCH_NON_SECURE
);
1060 intel_ring_advance(rq
, cs
);
1066 i915_emit_bb_start(struct i915_request
*rq
,
1067 u64 offset
, u32 len
,
1068 unsigned int dispatch_flags
)
1072 cs
= intel_ring_begin(rq
, 2);
1076 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1077 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1078 MI_BATCH_NON_SECURE
);
1079 intel_ring_advance(rq
, cs
);
1084 int intel_ring_pin(struct intel_ring
*ring
)
1086 struct i915_vma
*vma
= ring
->vma
;
1087 enum i915_map_type map
= i915_coherent_map_type(vma
->vm
->i915
);
1092 GEM_BUG_ON(ring
->vaddr
);
1096 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1097 flags
|= PIN_OFFSET_BIAS
| i915_ggtt_pin_bias(vma
);
1099 if (vma
->obj
->stolen
)
1100 flags
|= PIN_MAPPABLE
;
1104 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1105 if (flags
& PIN_MAPPABLE
|| map
== I915_MAP_WC
)
1106 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1108 ret
= i915_gem_object_set_to_cpu_domain(vma
->obj
, true);
1113 ret
= i915_vma_pin(vma
, 0, 0, flags
);
1117 if (i915_vma_is_map_and_fenceable(vma
))
1118 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
1120 addr
= i915_gem_object_pin_map(vma
->obj
, map
);
1124 vma
->obj
->pin_global
++;
1130 i915_vma_unpin(vma
);
1131 return PTR_ERR(addr
);
1134 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
1136 GEM_BUG_ON(!intel_ring_offset_valid(ring
, tail
));
1141 intel_ring_update_space(ring
);
1144 void intel_ring_unpin(struct intel_ring
*ring
)
1146 GEM_BUG_ON(!ring
->vma
);
1147 GEM_BUG_ON(!ring
->vaddr
);
1149 /* Discard any unused bytes beyond that submitted to hw. */
1150 intel_ring_reset(ring
, ring
->tail
);
1152 if (i915_vma_is_map_and_fenceable(ring
->vma
))
1153 i915_vma_unpin_iomap(ring
->vma
);
1155 i915_gem_object_unpin_map(ring
->vma
->obj
);
1158 ring
->vma
->obj
->pin_global
--;
1159 i915_vma_unpin(ring
->vma
);
1162 static struct i915_vma
*
1163 intel_ring_create_vma(struct drm_i915_private
*dev_priv
, int size
)
1165 struct i915_address_space
*vm
= &dev_priv
->ggtt
.vm
;
1166 struct drm_i915_gem_object
*obj
;
1167 struct i915_vma
*vma
;
1169 obj
= i915_gem_object_create_stolen(dev_priv
, size
);
1171 obj
= i915_gem_object_create_internal(dev_priv
, size
);
1173 return ERR_CAST(obj
);
1176 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1177 * if supported by the platform's GGTT.
1179 if (vm
->has_read_only
)
1180 i915_gem_object_set_readonly(obj
);
1182 vma
= i915_vma_instance(obj
, vm
, NULL
);
1189 i915_gem_object_put(obj
);
1194 intel_engine_create_ring(struct intel_engine_cs
*engine
,
1195 struct i915_timeline
*timeline
,
1198 struct intel_ring
*ring
;
1199 struct i915_vma
*vma
;
1201 GEM_BUG_ON(!is_power_of_2(size
));
1202 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
1203 GEM_BUG_ON(timeline
== &engine
->timeline
);
1204 lockdep_assert_held(&engine
->i915
->drm
.struct_mutex
);
1206 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1208 return ERR_PTR(-ENOMEM
);
1210 INIT_LIST_HEAD(&ring
->request_list
);
1211 ring
->timeline
= i915_timeline_get(timeline
);
1214 /* Workaround an erratum on the i830 which causes a hang if
1215 * the TAIL pointer points to within the last 2 cachelines
1218 ring
->effective_size
= size
;
1219 if (IS_I830(engine
->i915
) || IS_I845G(engine
->i915
))
1220 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
1222 intel_ring_update_space(ring
);
1224 vma
= intel_ring_create_vma(engine
->i915
, size
);
1227 return ERR_CAST(vma
);
1235 intel_ring_free(struct intel_ring
*ring
)
1237 struct drm_i915_gem_object
*obj
= ring
->vma
->obj
;
1239 i915_vma_close(ring
->vma
);
1240 __i915_gem_object_release_unless_active(obj
);
1242 i915_timeline_put(ring
->timeline
);
1246 static void intel_ring_context_destroy(struct intel_context
*ce
)
1248 GEM_BUG_ON(ce
->pin_count
);
1253 GEM_BUG_ON(i915_gem_object_is_active(ce
->state
->obj
));
1254 i915_gem_object_put(ce
->state
->obj
);
1257 static int __context_pin_ppgtt(struct i915_gem_context
*ctx
)
1259 struct i915_hw_ppgtt
*ppgtt
;
1262 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1264 err
= gen6_ppgtt_pin(ppgtt
);
1269 static void __context_unpin_ppgtt(struct i915_gem_context
*ctx
)
1271 struct i915_hw_ppgtt
*ppgtt
;
1273 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1275 gen6_ppgtt_unpin(ppgtt
);
1278 static int __context_pin(struct intel_context
*ce
)
1280 struct i915_vma
*vma
;
1288 * Clear this page out of any CPU caches for coherent swap-in/out.
1289 * We only want to do this on the first bind so that we do not stall
1290 * on an active context (which by nature is already on the GPU).
1292 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1293 err
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1298 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
1303 * And mark is as a globally pinned object to let the shrinker know
1304 * it cannot reclaim the object until we release it.
1306 vma
->obj
->pin_global
++;
1311 static void __context_unpin(struct intel_context
*ce
)
1313 struct i915_vma
*vma
;
1319 vma
->obj
->pin_global
--;
1320 i915_vma_unpin(vma
);
1323 static void intel_ring_context_unpin(struct intel_context
*ce
)
1325 __context_unpin_ppgtt(ce
->gem_context
);
1326 __context_unpin(ce
);
1328 i915_gem_context_put(ce
->gem_context
);
1331 static struct i915_vma
*
1332 alloc_context_vma(struct intel_engine_cs
*engine
)
1334 struct drm_i915_private
*i915
= engine
->i915
;
1335 struct drm_i915_gem_object
*obj
;
1336 struct i915_vma
*vma
;
1339 obj
= i915_gem_object_create(i915
, engine
->context_size
);
1341 return ERR_CAST(obj
);
1343 if (engine
->default_state
) {
1344 void *defaults
, *vaddr
;
1346 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1347 if (IS_ERR(vaddr
)) {
1348 err
= PTR_ERR(vaddr
);
1352 defaults
= i915_gem_object_pin_map(engine
->default_state
,
1354 if (IS_ERR(defaults
)) {
1355 err
= PTR_ERR(defaults
);
1359 memcpy(vaddr
, defaults
, engine
->context_size
);
1361 i915_gem_object_unpin_map(engine
->default_state
);
1362 i915_gem_object_unpin_map(obj
);
1366 * Try to make the context utilize L3 as well as LLC.
1368 * On VLV we don't have L3 controls in the PTEs so we
1369 * shouldn't touch the cache level, especially as that
1370 * would make the object snooped which might have a
1371 * negative performance impact.
1373 * Snooping is required on non-llc platforms in execlist
1374 * mode, but since all GGTT accesses use PAT entry 0 we
1375 * get snooping anyway regardless of cache_level.
1377 * This is only applicable for Ivy Bridge devices since
1378 * later platforms don't have L3 control bits in the PTE.
1380 if (IS_IVYBRIDGE(i915
)) {
1381 /* Ignore any error, regard it as a simple optimisation */
1382 i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
1385 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1394 i915_gem_object_unpin_map(obj
);
1396 i915_gem_object_put(obj
);
1397 return ERR_PTR(err
);
1400 static struct intel_context
*
1401 __ring_context_pin(struct intel_engine_cs
*engine
,
1402 struct i915_gem_context
*ctx
,
1403 struct intel_context
*ce
)
1407 if (!ce
->state
&& engine
->context_size
) {
1408 struct i915_vma
*vma
;
1410 vma
= alloc_context_vma(engine
);
1419 err
= __context_pin(ce
);
1423 err
= __context_pin_ppgtt(ce
->gem_context
);
1427 i915_gem_context_get(ctx
);
1429 /* One ringbuffer to rule them all */
1430 GEM_BUG_ON(!engine
->buffer
);
1431 ce
->ring
= engine
->buffer
;
1436 __context_unpin(ce
);
1439 return ERR_PTR(err
);
1442 static const struct intel_context_ops ring_context_ops
= {
1443 .unpin
= intel_ring_context_unpin
,
1444 .destroy
= intel_ring_context_destroy
,
1447 static struct intel_context
*
1448 intel_ring_context_pin(struct intel_engine_cs
*engine
,
1449 struct i915_gem_context
*ctx
)
1451 struct intel_context
*ce
= to_intel_context(ctx
, engine
);
1453 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1455 if (likely(ce
->pin_count
++))
1457 GEM_BUG_ON(!ce
->pin_count
); /* no overflow please! */
1459 ce
->ops
= &ring_context_ops
;
1461 return __ring_context_pin(engine
, ctx
, ce
);
1464 static int intel_init_ring_buffer(struct intel_engine_cs
*engine
)
1466 struct i915_timeline
*timeline
;
1467 struct intel_ring
*ring
;
1470 intel_engine_setup_common(engine
);
1472 timeline
= i915_timeline_create(engine
->i915
, engine
->name
);
1473 if (IS_ERR(timeline
)) {
1474 err
= PTR_ERR(timeline
);
1478 ring
= intel_engine_create_ring(engine
, timeline
, 32 * PAGE_SIZE
);
1479 i915_timeline_put(timeline
);
1481 err
= PTR_ERR(ring
);
1485 err
= intel_ring_pin(ring
);
1489 GEM_BUG_ON(engine
->buffer
);
1490 engine
->buffer
= ring
;
1492 err
= intel_engine_init_common(engine
);
1499 intel_ring_unpin(ring
);
1501 intel_ring_free(ring
);
1503 intel_engine_cleanup_common(engine
);
1507 void intel_engine_cleanup(struct intel_engine_cs
*engine
)
1509 struct drm_i915_private
*dev_priv
= engine
->i915
;
1511 WARN_ON(INTEL_GEN(dev_priv
) > 2 &&
1512 (I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
1514 intel_ring_unpin(engine
->buffer
);
1515 intel_ring_free(engine
->buffer
);
1517 if (engine
->cleanup
)
1518 engine
->cleanup(engine
);
1520 intel_engine_cleanup_common(engine
);
1522 dev_priv
->engine
[engine
->id
] = NULL
;
1526 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
)
1528 struct intel_engine_cs
*engine
;
1529 enum intel_engine_id id
;
1531 /* Restart from the beginning of the rings for convenience */
1532 for_each_engine(engine
, dev_priv
, id
)
1533 intel_ring_reset(engine
->buffer
, 0);
1536 static int load_pd_dir(struct i915_request
*rq
,
1537 const struct i915_hw_ppgtt
*ppgtt
)
1539 const struct intel_engine_cs
* const engine
= rq
->engine
;
1542 cs
= intel_ring_begin(rq
, 6);
1546 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1547 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine
));
1548 *cs
++ = PP_DIR_DCLV_2G
;
1550 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1551 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1552 *cs
++ = ppgtt
->pd
.base
.ggtt_offset
<< 10;
1554 intel_ring_advance(rq
, cs
);
1559 static int flush_pd_dir(struct i915_request
*rq
)
1561 const struct intel_engine_cs
* const engine
= rq
->engine
;
1564 cs
= intel_ring_begin(rq
, 4);
1568 /* Stall until the page table load is complete */
1569 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1570 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1571 *cs
++ = i915_scratch_offset(rq
->i915
);
1574 intel_ring_advance(rq
, cs
);
1578 static inline int mi_set_context(struct i915_request
*rq
, u32 flags
)
1580 struct drm_i915_private
*i915
= rq
->i915
;
1581 struct intel_engine_cs
*engine
= rq
->engine
;
1582 enum intel_engine_id id
;
1583 const int num_rings
=
1584 /* Use an extended w/a on gen7 if signalling from other rings */
1585 (HAS_LEGACY_SEMAPHORES(i915
) && IS_GEN7(i915
)) ?
1586 INTEL_INFO(i915
)->num_rings
- 1 :
1588 bool force_restore
= false;
1592 flags
|= MI_MM_SPACE_GTT
;
1593 if (IS_HASWELL(i915
))
1594 /* These flags are for resource streamer on HSW+ */
1595 flags
|= HSW_MI_RS_SAVE_STATE_EN
| HSW_MI_RS_RESTORE_STATE_EN
;
1597 flags
|= MI_SAVE_EXT_STATE_EN
| MI_RESTORE_EXT_STATE_EN
;
1601 len
+= 2 + (num_rings
? 4*num_rings
+ 6 : 0);
1602 if (flags
& MI_FORCE_RESTORE
) {
1603 GEM_BUG_ON(flags
& MI_RESTORE_INHIBIT
);
1604 flags
&= ~MI_FORCE_RESTORE
;
1605 force_restore
= true;
1609 cs
= intel_ring_begin(rq
, len
);
1613 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1614 if (IS_GEN7(i915
)) {
1615 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
1617 struct intel_engine_cs
*signaller
;
1619 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1620 for_each_engine(signaller
, i915
, id
) {
1621 if (signaller
== engine
)
1624 *cs
++ = i915_mmio_reg_offset(
1625 RING_PSMI_CTL(signaller
->mmio_base
));
1626 *cs
++ = _MASKED_BIT_ENABLE(
1627 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1632 if (force_restore
) {
1634 * The HW doesn't handle being told to restore the current
1635 * context very well. Quite often it likes goes to go off and
1636 * sulk, especially when it is meant to be reloading PP_DIR.
1637 * A very simple fix to force the reload is to simply switch
1638 * away from the current context and back again.
1640 * Note that the kernel_context will contain random state
1641 * following the INHIBIT_RESTORE. We accept this since we
1642 * never use the kernel_context state; it is merely a
1643 * placeholder we use to flush other contexts.
1645 *cs
++ = MI_SET_CONTEXT
;
1646 *cs
++ = i915_ggtt_offset(to_intel_context(i915
->kernel_context
,
1653 *cs
++ = MI_SET_CONTEXT
;
1654 *cs
++ = i915_ggtt_offset(rq
->hw_context
->state
) | flags
;
1656 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1657 * WaMiSetContext_Hang:snb,ivb,vlv
1661 if (IS_GEN7(i915
)) {
1663 struct intel_engine_cs
*signaller
;
1664 i915_reg_t last_reg
= {}; /* keep gcc quiet */
1666 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1667 for_each_engine(signaller
, i915
, id
) {
1668 if (signaller
== engine
)
1671 last_reg
= RING_PSMI_CTL(signaller
->mmio_base
);
1672 *cs
++ = i915_mmio_reg_offset(last_reg
);
1673 *cs
++ = _MASKED_BIT_DISABLE(
1674 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1677 /* Insert a delay before the next switch! */
1678 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1679 *cs
++ = i915_mmio_reg_offset(last_reg
);
1680 *cs
++ = i915_scratch_offset(rq
->i915
);
1683 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
1686 intel_ring_advance(rq
, cs
);
1691 static int remap_l3(struct i915_request
*rq
, int slice
)
1693 u32
*cs
, *remap_info
= rq
->i915
->l3_parity
.remap_info
[slice
];
1699 cs
= intel_ring_begin(rq
, GEN7_L3LOG_SIZE
/4 * 2 + 2);
1704 * Note: We do not worry about the concurrent register cacheline hang
1705 * here because no other code should access these registers other than
1706 * at initialization time.
1708 *cs
++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE
/4);
1709 for (i
= 0; i
< GEN7_L3LOG_SIZE
/4; i
++) {
1710 *cs
++ = i915_mmio_reg_offset(GEN7_L3LOG(slice
, i
));
1711 *cs
++ = remap_info
[i
];
1714 intel_ring_advance(rq
, cs
);
1719 static int switch_context(struct i915_request
*rq
)
1721 struct intel_engine_cs
*engine
= rq
->engine
;
1722 struct i915_gem_context
*ctx
= rq
->gem_context
;
1723 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
?: rq
->i915
->mm
.aliasing_ppgtt
;
1724 unsigned int unwind_mm
= 0;
1728 lockdep_assert_held(&rq
->i915
->drm
.struct_mutex
);
1729 GEM_BUG_ON(HAS_EXECLISTS(rq
->i915
));
1735 * Baytail takes a little more convincing that it really needs
1736 * to reload the PD between contexts. It is not just a little
1737 * longer, as adding more stalls after the load_pd_dir (i.e.
1738 * adding a long loop around flush_pd_dir) is not as effective
1739 * as reloading the PD umpteen times. 32 is derived from
1740 * experimentation (gem_exec_parallel/fds) and has no good
1744 if (engine
->id
== BCS
&& IS_VALLEYVIEW(engine
->i915
))
1748 ret
= load_pd_dir(rq
, ppgtt
);
1753 if (intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
) {
1754 unwind_mm
= intel_engine_flag(engine
);
1755 ppgtt
->pd_dirty_rings
&= ~unwind_mm
;
1756 hw_flags
= MI_FORCE_RESTORE
;
1760 if (rq
->hw_context
->state
) {
1761 GEM_BUG_ON(engine
->id
!= RCS
);
1764 * The kernel context(s) is treated as pure scratch and is not
1765 * expected to retain any state (as we sacrifice it during
1766 * suspend and on resume it may be corrupted). This is ok,
1767 * as nothing actually executes using the kernel context; it
1768 * is purely used for flushing user contexts.
1770 if (i915_gem_context_is_kernel(ctx
))
1771 hw_flags
= MI_RESTORE_INHIBIT
;
1773 ret
= mi_set_context(rq
, hw_flags
);
1779 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1783 ret
= flush_pd_dir(rq
);
1788 * Not only do we need a full barrier (post-sync write) after
1789 * invalidating the TLBs, but we need to wait a little bit
1790 * longer. Whether this is merely delaying us, or the
1791 * subsequent flush is a key part of serialising with the
1792 * post-sync op, this extra pass appears vital before a
1795 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1799 ret
= engine
->emit_flush(rq
, EMIT_FLUSH
);
1804 if (ctx
->remap_slice
) {
1805 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
1806 if (!(ctx
->remap_slice
& BIT(i
)))
1809 ret
= remap_l3(rq
, i
);
1814 ctx
->remap_slice
= 0;
1821 ppgtt
->pd_dirty_rings
|= unwind_mm
;
1826 static int ring_request_alloc(struct i915_request
*request
)
1830 GEM_BUG_ON(!request
->hw_context
->pin_count
);
1832 /* Flush enough space to reduce the likelihood of waiting after
1833 * we start building the request - in which case we will just
1834 * have to repeat work.
1836 request
->reserved_space
+= LEGACY_REQUEST_SIZE
;
1838 ret
= intel_ring_wait_for_space(request
->ring
, request
->reserved_space
);
1842 ret
= switch_context(request
);
1846 request
->reserved_space
-= LEGACY_REQUEST_SIZE
;
1850 static noinline
int wait_for_space(struct intel_ring
*ring
, unsigned int bytes
)
1852 struct i915_request
*target
;
1855 lockdep_assert_held(&ring
->vma
->vm
->i915
->drm
.struct_mutex
);
1857 if (intel_ring_update_space(ring
) >= bytes
)
1860 GEM_BUG_ON(list_empty(&ring
->request_list
));
1861 list_for_each_entry(target
, &ring
->request_list
, ring_link
) {
1862 /* Would completion of this request free enough space? */
1863 if (bytes
<= __intel_ring_space(target
->postfix
,
1864 ring
->emit
, ring
->size
))
1868 if (WARN_ON(&target
->ring_link
== &ring
->request_list
))
1871 timeout
= i915_request_wait(target
,
1872 I915_WAIT_INTERRUPTIBLE
| I915_WAIT_LOCKED
,
1873 MAX_SCHEDULE_TIMEOUT
);
1877 i915_request_retire_upto(target
);
1879 intel_ring_update_space(ring
);
1880 GEM_BUG_ON(ring
->space
< bytes
);
1884 int intel_ring_wait_for_space(struct intel_ring
*ring
, unsigned int bytes
)
1886 GEM_BUG_ON(bytes
> ring
->effective_size
);
1887 if (unlikely(bytes
> ring
->effective_size
- ring
->emit
))
1888 bytes
+= ring
->size
- ring
->emit
;
1890 if (unlikely(bytes
> ring
->space
)) {
1891 int ret
= wait_for_space(ring
, bytes
);
1896 GEM_BUG_ON(ring
->space
< bytes
);
1900 u32
*intel_ring_begin(struct i915_request
*rq
, unsigned int num_dwords
)
1902 struct intel_ring
*ring
= rq
->ring
;
1903 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
1904 const unsigned int bytes
= num_dwords
* sizeof(u32
);
1905 unsigned int need_wrap
= 0;
1906 unsigned int total_bytes
;
1909 /* Packets must be qword aligned. */
1910 GEM_BUG_ON(num_dwords
& 1);
1912 total_bytes
= bytes
+ rq
->reserved_space
;
1913 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
1915 if (unlikely(total_bytes
> remain_usable
)) {
1916 const int remain_actual
= ring
->size
- ring
->emit
;
1918 if (bytes
> remain_usable
) {
1920 * Not enough space for the basic request. So need to
1921 * flush out the remainder and then wait for
1924 total_bytes
+= remain_actual
;
1925 need_wrap
= remain_actual
| 1;
1928 * The base request will fit but the reserved space
1929 * falls off the end. So we don't need an immediate
1930 * wrap and only need to effectively wait for the
1931 * reserved size from the start of ringbuffer.
1933 total_bytes
= rq
->reserved_space
+ remain_actual
;
1937 if (unlikely(total_bytes
> ring
->space
)) {
1941 * Space is reserved in the ringbuffer for finalising the
1942 * request, as that cannot be allowed to fail. During request
1943 * finalisation, reserved_space is set to 0 to stop the
1944 * overallocation and the assumption is that then we never need
1945 * to wait (which has the risk of failing with EINTR).
1947 * See also i915_request_alloc() and i915_request_add().
1949 GEM_BUG_ON(!rq
->reserved_space
);
1951 ret
= wait_for_space(ring
, total_bytes
);
1953 return ERR_PTR(ret
);
1956 if (unlikely(need_wrap
)) {
1958 GEM_BUG_ON(need_wrap
> ring
->space
);
1959 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
1960 GEM_BUG_ON(!IS_ALIGNED(need_wrap
, sizeof(u64
)));
1962 /* Fill the tail with MI_NOOP */
1963 memset64(ring
->vaddr
+ ring
->emit
, 0, need_wrap
/ sizeof(u64
));
1964 ring
->space
-= need_wrap
;
1968 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
1969 GEM_BUG_ON(ring
->space
< bytes
);
1970 cs
= ring
->vaddr
+ ring
->emit
;
1971 GEM_DEBUG_EXEC(memset32(cs
, POISON_INUSE
, bytes
/ sizeof(*cs
)));
1972 ring
->emit
+= bytes
;
1973 ring
->space
-= bytes
;
1978 /* Align the ring tail to a cacheline boundary */
1979 int intel_ring_cacheline_align(struct i915_request
*rq
)
1984 num_dwords
= (rq
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(u32
);
1985 if (num_dwords
== 0)
1988 num_dwords
= CACHELINE_DWORDS
- num_dwords
;
1989 GEM_BUG_ON(num_dwords
& 1);
1991 cs
= intel_ring_begin(rq
, num_dwords
);
1995 memset64(cs
, (u64
)MI_NOOP
<< 32 | MI_NOOP
, num_dwords
/ 2);
1996 intel_ring_advance(rq
, cs
);
1998 GEM_BUG_ON(rq
->ring
->emit
& (CACHELINE_BYTES
- 1));
2002 static void gen6_bsd_submit_request(struct i915_request
*request
)
2004 struct drm_i915_private
*dev_priv
= request
->i915
;
2006 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
2008 /* Every tail move must follow the sequence below */
2010 /* Disable notification that the ring is IDLE. The GT
2011 * will then assume that it is busy and bring it out of rc6.
2013 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
2014 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
2016 /* Clear the context id. Here be magic! */
2017 I915_WRITE64_FW(GEN6_BSD_RNCID
, 0x0);
2019 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2020 if (__intel_wait_for_register_fw(dev_priv
,
2021 GEN6_BSD_SLEEP_PSMI_CONTROL
,
2022 GEN6_BSD_SLEEP_INDICATOR
,
2025 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2027 /* Now that the ring is fully powered up, update the tail */
2028 i9xx_submit_request(request
);
2030 /* Let the ring send IDLE messages to the GT again,
2031 * and so let it sleep to conserve power when idle.
2033 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
2034 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
2036 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2039 static int mi_flush_dw(struct i915_request
*rq
, u32 flags
)
2043 cs
= intel_ring_begin(rq
, 4);
2050 * We always require a command barrier so that subsequent
2051 * commands, such as breadcrumb interrupts, are strictly ordered
2052 * wrt the contents of the write cache being flushed to memory
2053 * (and thus being coherent from the CPU).
2055 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
2058 * Bspec vol 1c.3 - blitter engine command streamer:
2059 * "If ENABLED, all TLBs will be invalidated once the flush
2060 * operation is complete. This bit is only valid when the
2061 * Post-Sync Operation field is a value of 1h or 3h."
2066 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
2070 intel_ring_advance(rq
, cs
);
2075 static int gen6_flush_dw(struct i915_request
*rq
, u32 mode
, u32 invflags
)
2077 return mi_flush_dw(rq
, mode
& EMIT_INVALIDATE
? invflags
: 0);
2080 static int gen6_bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
2082 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
| MI_INVALIDATE_BSD
);
2086 hsw_emit_bb_start(struct i915_request
*rq
,
2087 u64 offset
, u32 len
,
2088 unsigned int dispatch_flags
)
2092 cs
= intel_ring_begin(rq
, 2);
2096 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2097 0 : MI_BATCH_PPGTT_HSW
| MI_BATCH_NON_SECURE_HSW
);
2098 /* bit0-7 is the length on GEN6+ */
2100 intel_ring_advance(rq
, cs
);
2106 gen6_emit_bb_start(struct i915_request
*rq
,
2107 u64 offset
, u32 len
,
2108 unsigned int dispatch_flags
)
2112 cs
= intel_ring_begin(rq
, 2);
2116 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2117 0 : MI_BATCH_NON_SECURE_I965
);
2118 /* bit0-7 is the length on GEN6+ */
2120 intel_ring_advance(rq
, cs
);
2125 /* Blitter support (SandyBridge+) */
2127 static int gen6_ring_flush(struct i915_request
*rq
, u32 mode
)
2129 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
);
2132 static void intel_ring_init_semaphores(struct drm_i915_private
*dev_priv
,
2133 struct intel_engine_cs
*engine
)
2137 if (!HAS_LEGACY_SEMAPHORES(dev_priv
))
2140 GEM_BUG_ON(INTEL_GEN(dev_priv
) < 6);
2141 engine
->semaphore
.sync_to
= gen6_ring_sync_to
;
2142 engine
->semaphore
.signal
= gen6_signal
;
2145 * The current semaphore is only applied on pre-gen8
2146 * platform. And there is no VCS2 ring on the pre-gen8
2147 * platform. So the semaphore between RCS and VCS2 is
2148 * initialized as INVALID.
2150 for (i
= 0; i
< GEN6_NUM_SEMAPHORES
; i
++) {
2151 static const struct {
2153 i915_reg_t mbox_reg
;
2154 } sem_data
[GEN6_NUM_SEMAPHORES
][GEN6_NUM_SEMAPHORES
] = {
2156 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RV
, .mbox_reg
= GEN6_VRSYNC
},
2157 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RB
, .mbox_reg
= GEN6_BRSYNC
},
2158 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RVE
, .mbox_reg
= GEN6_VERSYNC
},
2161 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VR
, .mbox_reg
= GEN6_RVSYNC
},
2162 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VB
, .mbox_reg
= GEN6_BVSYNC
},
2163 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VVE
, .mbox_reg
= GEN6_VEVSYNC
},
2166 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BR
, .mbox_reg
= GEN6_RBSYNC
},
2167 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BV
, .mbox_reg
= GEN6_VBSYNC
},
2168 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BVE
, .mbox_reg
= GEN6_VEBSYNC
},
2171 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VER
, .mbox_reg
= GEN6_RVESYNC
},
2172 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEV
, .mbox_reg
= GEN6_VVESYNC
},
2173 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEB
, .mbox_reg
= GEN6_BVESYNC
},
2177 i915_reg_t mbox_reg
;
2179 if (i
== engine
->hw_id
) {
2180 wait_mbox
= MI_SEMAPHORE_SYNC_INVALID
;
2181 mbox_reg
= GEN6_NOSYNC
;
2183 wait_mbox
= sem_data
[engine
->hw_id
][i
].wait_mbox
;
2184 mbox_reg
= sem_data
[engine
->hw_id
][i
].mbox_reg
;
2187 engine
->semaphore
.mbox
.wait
[i
] = wait_mbox
;
2188 engine
->semaphore
.mbox
.signal
[i
] = mbox_reg
;
2192 static void intel_ring_init_irq(struct drm_i915_private
*dev_priv
,
2193 struct intel_engine_cs
*engine
)
2195 if (INTEL_GEN(dev_priv
) >= 6) {
2196 engine
->irq_enable
= gen6_irq_enable
;
2197 engine
->irq_disable
= gen6_irq_disable
;
2198 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2199 } else if (INTEL_GEN(dev_priv
) >= 5) {
2200 engine
->irq_enable
= gen5_irq_enable
;
2201 engine
->irq_disable
= gen5_irq_disable
;
2202 engine
->irq_seqno_barrier
= gen5_seqno_barrier
;
2203 } else if (INTEL_GEN(dev_priv
) >= 3) {
2204 engine
->irq_enable
= i9xx_irq_enable
;
2205 engine
->irq_disable
= i9xx_irq_disable
;
2207 engine
->irq_enable
= i8xx_irq_enable
;
2208 engine
->irq_disable
= i8xx_irq_disable
;
2212 static void i9xx_set_default_submission(struct intel_engine_cs
*engine
)
2214 engine
->submit_request
= i9xx_submit_request
;
2215 engine
->cancel_requests
= cancel_requests
;
2217 engine
->park
= NULL
;
2218 engine
->unpark
= NULL
;
2221 static void gen6_bsd_set_default_submission(struct intel_engine_cs
*engine
)
2223 i9xx_set_default_submission(engine
);
2224 engine
->submit_request
= gen6_bsd_submit_request
;
2227 static void intel_ring_default_vfuncs(struct drm_i915_private
*dev_priv
,
2228 struct intel_engine_cs
*engine
)
2230 /* gen8+ are only supported with execlists */
2231 GEM_BUG_ON(INTEL_GEN(dev_priv
) >= 8);
2233 intel_ring_init_irq(dev_priv
, engine
);
2234 intel_ring_init_semaphores(dev_priv
, engine
);
2236 engine
->init_hw
= init_ring_common
;
2237 engine
->reset
.prepare
= reset_prepare
;
2238 engine
->reset
.reset
= reset_ring
;
2239 engine
->reset
.finish
= reset_finish
;
2241 engine
->context_pin
= intel_ring_context_pin
;
2242 engine
->request_alloc
= ring_request_alloc
;
2244 engine
->emit_breadcrumb
= i9xx_emit_breadcrumb
;
2245 engine
->emit_breadcrumb_sz
= i9xx_emit_breadcrumb_sz
;
2246 if (HAS_LEGACY_SEMAPHORES(dev_priv
)) {
2249 engine
->emit_breadcrumb
= gen6_sema_emit_breadcrumb
;
2251 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2252 engine
->emit_breadcrumb_sz
+= num_rings
* 3;
2254 engine
->emit_breadcrumb_sz
++;
2257 engine
->set_default_submission
= i9xx_set_default_submission
;
2259 if (INTEL_GEN(dev_priv
) >= 6)
2260 engine
->emit_bb_start
= gen6_emit_bb_start
;
2261 else if (INTEL_GEN(dev_priv
) >= 4)
2262 engine
->emit_bb_start
= i965_emit_bb_start
;
2263 else if (IS_I830(dev_priv
) || IS_I845G(dev_priv
))
2264 engine
->emit_bb_start
= i830_emit_bb_start
;
2266 engine
->emit_bb_start
= i915_emit_bb_start
;
2269 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
)
2271 struct drm_i915_private
*dev_priv
= engine
->i915
;
2274 intel_ring_default_vfuncs(dev_priv
, engine
);
2276 if (HAS_L3_DPF(dev_priv
))
2277 engine
->irq_keep_mask
= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2279 engine
->irq_enable_mask
= GT_RENDER_USER_INTERRUPT
;
2281 if (INTEL_GEN(dev_priv
) >= 6) {
2282 engine
->init_context
= intel_rcs_ctx_init
;
2283 engine
->emit_flush
= gen7_render_ring_flush
;
2284 if (IS_GEN6(dev_priv
))
2285 engine
->emit_flush
= gen6_render_ring_flush
;
2286 } else if (IS_GEN5(dev_priv
)) {
2287 engine
->emit_flush
= gen4_render_ring_flush
;
2289 if (INTEL_GEN(dev_priv
) < 4)
2290 engine
->emit_flush
= gen2_render_ring_flush
;
2292 engine
->emit_flush
= gen4_render_ring_flush
;
2293 engine
->irq_enable_mask
= I915_USER_INTERRUPT
;
2296 if (IS_HASWELL(dev_priv
))
2297 engine
->emit_bb_start
= hsw_emit_bb_start
;
2299 engine
->init_hw
= init_render_ring
;
2301 ret
= intel_init_ring_buffer(engine
);
2308 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
)
2310 struct drm_i915_private
*dev_priv
= engine
->i915
;
2312 intel_ring_default_vfuncs(dev_priv
, engine
);
2314 if (INTEL_GEN(dev_priv
) >= 6) {
2315 /* gen6 bsd needs a special wa for tail updates */
2316 if (IS_GEN6(dev_priv
))
2317 engine
->set_default_submission
= gen6_bsd_set_default_submission
;
2318 engine
->emit_flush
= gen6_bsd_ring_flush
;
2319 engine
->irq_enable_mask
= GT_BSD_USER_INTERRUPT
;
2321 engine
->emit_flush
= bsd_ring_flush
;
2322 if (IS_GEN5(dev_priv
))
2323 engine
->irq_enable_mask
= ILK_BSD_USER_INTERRUPT
;
2325 engine
->irq_enable_mask
= I915_BSD_USER_INTERRUPT
;
2328 return intel_init_ring_buffer(engine
);
2331 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
)
2333 struct drm_i915_private
*dev_priv
= engine
->i915
;
2335 intel_ring_default_vfuncs(dev_priv
, engine
);
2337 engine
->emit_flush
= gen6_ring_flush
;
2338 engine
->irq_enable_mask
= GT_BLT_USER_INTERRUPT
;
2340 return intel_init_ring_buffer(engine
);
2343 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
)
2345 struct drm_i915_private
*dev_priv
= engine
->i915
;
2347 intel_ring_default_vfuncs(dev_priv
, engine
);
2349 engine
->emit_flush
= gen6_ring_flush
;
2350 engine
->irq_enable_mask
= PM_VEBOX_USER_INTERRUPT
;
2351 engine
->irq_enable
= hsw_vebox_irq_enable
;
2352 engine
->irq_disable
= hsw_vebox_irq_disable
;
2354 return intel_init_ring_buffer(engine
);