2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
36 #include "i915_gem_render_state.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static unsigned int __intel_ring_space(unsigned int head
,
51 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
52 * same cacheline, the Head Pointer must not be greater than the Tail
55 GEM_BUG_ON(!is_power_of_2(size
));
56 return (head
- tail
- CACHELINE_BYTES
) & (size
- 1);
59 unsigned int intel_ring_update_space(struct intel_ring
*ring
)
63 space
= __intel_ring_space(ring
->head
, ring
->emit
, ring
->size
);
70 gen2_render_ring_flush(struct i915_request
*rq
, u32 mode
)
76 if (mode
& EMIT_INVALIDATE
)
79 cs
= intel_ring_begin(rq
, 2);
85 intel_ring_advance(rq
, cs
);
91 gen4_render_ring_flush(struct i915_request
*rq
, u32 mode
)
99 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
100 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
101 * also flushed at 2d versus 3d pipeline switches.
105 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
106 * MI_READ_FLUSH is set, and is always flushed on 965.
108 * I915_GEM_DOMAIN_COMMAND may not exist?
110 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
111 * invalidated when MI_EXE_FLUSH is set.
113 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
114 * invalidated with every MI_FLUSH.
118 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
119 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
120 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
121 * are flushed at any MI_FLUSH.
125 if (mode
& EMIT_INVALIDATE
) {
127 if (IS_G4X(rq
->i915
) || IS_GEN5(rq
->i915
))
128 cmd
|= MI_INVALIDATE_ISP
;
132 if (mode
& EMIT_INVALIDATE
)
135 cs
= intel_ring_begin(rq
, i
);
142 * A random delay to let the CS invalidate take effect? Without this
143 * delay, the GPU relocation path fails as the CS does not see
144 * the updated contents. Just as important, if we apply the flushes
145 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146 * write and before the invalidate on the next batch), the relocations
147 * still fail. This implies that is a delay following invalidation
148 * that is required to reset the caches as opposed to a delay to
149 * ensure the memory is written.
151 if (mode
& EMIT_INVALIDATE
) {
152 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
153 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
157 for (i
= 0; i
< 12; i
++)
160 *cs
++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE
;
161 *cs
++ = i915_scratch_offset(rq
->i915
) | PIPE_CONTROL_GLOBAL_GTT
;
168 intel_ring_advance(rq
, cs
);
174 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
175 * implementing two workarounds on gen6. From section 1.4.7.1
176 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
178 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
179 * produced by non-pipelined state commands), software needs to first
180 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
183 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
184 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
186 * And the workaround for these two requires this workaround first:
188 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
189 * BEFORE the pipe-control with a post-sync op and no write-cache
192 * And this last workaround is tricky because of the requirements on
193 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
196 * "1 of the following must also be set:
197 * - Render Target Cache Flush Enable ([12] of DW1)
198 * - Depth Cache Flush Enable ([0] of DW1)
199 * - Stall at Pixel Scoreboard ([1] of DW1)
200 * - Depth Stall ([13] of DW1)
201 * - Post-Sync Operation ([13] of DW1)
202 * - Notify Enable ([8] of DW1)"
204 * The cache flushes require the workaround flush that triggered this
205 * one, so we can't use it. Depth stall would trigger the same.
206 * Post-sync nonzero is what triggered this second workaround, so we
207 * can't use that one either. Notify enable is IRQs, which aren't
208 * really our business. That leaves only stall at scoreboard.
211 intel_emit_post_sync_nonzero_flush(struct i915_request
*rq
)
213 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
216 cs
= intel_ring_begin(rq
, 6);
220 *cs
++ = GFX_OP_PIPE_CONTROL(5);
221 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
222 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
223 *cs
++ = 0; /* low dword */
224 *cs
++ = 0; /* high dword */
226 intel_ring_advance(rq
, cs
);
228 cs
= intel_ring_begin(rq
, 6);
232 *cs
++ = GFX_OP_PIPE_CONTROL(5);
233 *cs
++ = PIPE_CONTROL_QW_WRITE
;
234 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
238 intel_ring_advance(rq
, cs
);
244 gen6_render_ring_flush(struct i915_request
*rq
, u32 mode
)
246 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
250 /* Force SNB workarounds for PIPE_CONTROL flushes */
251 ret
= intel_emit_post_sync_nonzero_flush(rq
);
255 /* Just flush everything. Experiments have shown that reducing the
256 * number of bits based on the write domains has little performance
259 if (mode
& EMIT_FLUSH
) {
260 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
261 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
263 * Ensure that any following seqno writes only happen
264 * when the render cache is indeed flushed.
266 flags
|= PIPE_CONTROL_CS_STALL
;
268 if (mode
& EMIT_INVALIDATE
) {
269 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
270 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
271 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
272 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
273 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
274 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
276 * TLB invalidate requires a post-sync write.
278 flags
|= PIPE_CONTROL_QW_WRITE
| PIPE_CONTROL_CS_STALL
;
281 cs
= intel_ring_begin(rq
, 4);
285 *cs
++ = GFX_OP_PIPE_CONTROL(4);
287 *cs
++ = scratch_addr
| PIPE_CONTROL_GLOBAL_GTT
;
289 intel_ring_advance(rq
, cs
);
295 gen7_render_ring_cs_stall_wa(struct i915_request
*rq
)
299 cs
= intel_ring_begin(rq
, 4);
303 *cs
++ = GFX_OP_PIPE_CONTROL(4);
304 *cs
++ = PIPE_CONTROL_CS_STALL
| PIPE_CONTROL_STALL_AT_SCOREBOARD
;
307 intel_ring_advance(rq
, cs
);
313 gen7_render_ring_flush(struct i915_request
*rq
, u32 mode
)
315 u32 scratch_addr
= i915_scratch_offset(rq
->i915
) + 2 * CACHELINE_BYTES
;
319 * Ensure that any following seqno writes only happen when the render
320 * cache is indeed flushed.
322 * Workaround: 4th PIPE_CONTROL command (except the ones with only
323 * read-cache invalidate bits set) must have the CS_STALL bit set. We
324 * don't try to be clever and just set it unconditionally.
326 flags
|= PIPE_CONTROL_CS_STALL
;
328 /* Just flush everything. Experiments have shown that reducing the
329 * number of bits based on the write domains has little performance
332 if (mode
& EMIT_FLUSH
) {
333 flags
|= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH
;
334 flags
|= PIPE_CONTROL_DEPTH_CACHE_FLUSH
;
335 flags
|= PIPE_CONTROL_DC_FLUSH_ENABLE
;
336 flags
|= PIPE_CONTROL_FLUSH_ENABLE
;
338 if (mode
& EMIT_INVALIDATE
) {
339 flags
|= PIPE_CONTROL_TLB_INVALIDATE
;
340 flags
|= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE
;
341 flags
|= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE
;
342 flags
|= PIPE_CONTROL_VF_CACHE_INVALIDATE
;
343 flags
|= PIPE_CONTROL_CONST_CACHE_INVALIDATE
;
344 flags
|= PIPE_CONTROL_STATE_CACHE_INVALIDATE
;
345 flags
|= PIPE_CONTROL_MEDIA_STATE_CLEAR
;
347 * TLB invalidate requires a post-sync write.
349 flags
|= PIPE_CONTROL_QW_WRITE
;
350 flags
|= PIPE_CONTROL_GLOBAL_GTT_IVB
;
352 flags
|= PIPE_CONTROL_STALL_AT_SCOREBOARD
;
354 /* Workaround: we must issue a pipe_control with CS-stall bit
355 * set before a pipe_control command that has the state cache
356 * invalidate bit set. */
357 gen7_render_ring_cs_stall_wa(rq
);
360 cs
= intel_ring_begin(rq
, 4);
364 *cs
++ = GFX_OP_PIPE_CONTROL(4);
366 *cs
++ = scratch_addr
;
368 intel_ring_advance(rq
, cs
);
373 static void ring_setup_phys_status_page(struct intel_engine_cs
*engine
)
375 struct drm_i915_private
*dev_priv
= engine
->i915
;
376 struct page
*page
= virt_to_page(engine
->status_page
.page_addr
);
377 phys_addr_t phys
= PFN_PHYS(page_to_pfn(page
));
380 addr
= lower_32_bits(phys
);
381 if (INTEL_GEN(dev_priv
) >= 4)
382 addr
|= (phys
>> 28) & 0xf0;
384 I915_WRITE(HWS_PGA
, addr
);
387 static void intel_ring_setup_status_page(struct intel_engine_cs
*engine
)
389 struct drm_i915_private
*dev_priv
= engine
->i915
;
392 /* The ring status page addresses are no longer next to the rest of
393 * the ring registers as of gen7.
395 if (IS_GEN7(dev_priv
)) {
396 switch (engine
->id
) {
398 * No more rings exist on Gen7. Default case is only to shut up
399 * gcc switch check warning.
402 GEM_BUG_ON(engine
->id
);
404 mmio
= RENDER_HWS_PGA_GEN7
;
407 mmio
= BLT_HWS_PGA_GEN7
;
410 mmio
= BSD_HWS_PGA_GEN7
;
413 mmio
= VEBOX_HWS_PGA_GEN7
;
416 } else if (IS_GEN6(dev_priv
)) {
417 mmio
= RING_HWS_PGA_GEN6(engine
->mmio_base
);
419 mmio
= RING_HWS_PGA(engine
->mmio_base
);
422 if (INTEL_GEN(dev_priv
) >= 6) {
426 * Keep the render interrupt unmasked as this papers over
427 * lost interrupts following a reset.
429 if (engine
->id
== RCS
)
432 I915_WRITE(RING_HWSTAM(engine
->mmio_base
), mask
);
435 I915_WRITE(mmio
, engine
->status_page
.ggtt_offset
);
438 /* Flush the TLB for this page */
439 if (IS_GEN(dev_priv
, 6, 7)) {
440 i915_reg_t reg
= RING_INSTPM(engine
->mmio_base
);
442 /* ring should be idle before issuing a sync flush*/
443 WARN_ON((I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
446 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE
|
448 if (intel_wait_for_register(dev_priv
,
449 reg
, INSTPM_SYNC_FLUSH
, 0,
451 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
456 static bool stop_ring(struct intel_engine_cs
*engine
)
458 struct drm_i915_private
*dev_priv
= engine
->i915
;
460 if (INTEL_GEN(dev_priv
) > 2) {
461 I915_WRITE_MODE(engine
, _MASKED_BIT_ENABLE(STOP_RING
));
462 if (intel_wait_for_register(dev_priv
,
463 RING_MI_MODE(engine
->mmio_base
),
467 DRM_ERROR("%s : timed out trying to stop ring\n",
469 /* Sometimes we observe that the idle flag is not
470 * set even though the ring is empty. So double
471 * check before giving up.
473 if (I915_READ_HEAD(engine
) != I915_READ_TAIL(engine
))
478 I915_WRITE_HEAD(engine
, I915_READ_TAIL(engine
));
480 I915_WRITE_HEAD(engine
, 0);
481 I915_WRITE_TAIL(engine
, 0);
483 /* The ring must be empty before it is disabled */
484 I915_WRITE_CTL(engine
, 0);
486 return (I915_READ_HEAD(engine
) & HEAD_ADDR
) == 0;
489 static int init_ring_common(struct intel_engine_cs
*engine
)
491 struct drm_i915_private
*dev_priv
= engine
->i915
;
492 struct intel_ring
*ring
= engine
->buffer
;
495 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
497 if (!stop_ring(engine
)) {
498 /* G45 ring initialization often fails to reset head to zero */
499 DRM_DEBUG_DRIVER("%s head not reset to zero "
500 "ctl %08x head %08x tail %08x start %08x\n",
502 I915_READ_CTL(engine
),
503 I915_READ_HEAD(engine
),
504 I915_READ_TAIL(engine
),
505 I915_READ_START(engine
));
507 if (!stop_ring(engine
)) {
508 DRM_ERROR("failed to set %s head to zero "
509 "ctl %08x head %08x tail %08x start %08x\n",
511 I915_READ_CTL(engine
),
512 I915_READ_HEAD(engine
),
513 I915_READ_TAIL(engine
),
514 I915_READ_START(engine
));
520 if (HWS_NEEDS_PHYSICAL(dev_priv
))
521 ring_setup_phys_status_page(engine
);
523 intel_ring_setup_status_page(engine
);
525 intel_engine_reset_breadcrumbs(engine
);
527 /* Enforce ordering by reading HEAD register back */
528 I915_READ_HEAD(engine
);
530 /* Initialize the ring. This must happen _after_ we've cleared the ring
531 * registers with the above sequence (the readback of the HEAD registers
532 * also enforces ordering), otherwise the hw might lose the new ring
533 * register values. */
534 I915_WRITE_START(engine
, i915_ggtt_offset(ring
->vma
));
536 /* WaClearRingBufHeadRegAtInit:ctg,elk */
537 if (I915_READ_HEAD(engine
))
538 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
539 engine
->name
, I915_READ_HEAD(engine
));
541 /* Check that the ring offsets point within the ring! */
542 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->head
));
543 GEM_BUG_ON(!intel_ring_offset_valid(ring
, ring
->tail
));
545 intel_ring_update_space(ring
);
546 I915_WRITE_HEAD(engine
, ring
->head
);
547 I915_WRITE_TAIL(engine
, ring
->tail
);
548 (void)I915_READ_TAIL(engine
);
550 I915_WRITE_CTL(engine
, RING_CTL_SIZE(ring
->size
) | RING_VALID
);
552 /* If the head is still not zero, the ring is dead */
553 if (intel_wait_for_register(dev_priv
, RING_CTL(engine
->mmio_base
),
554 RING_VALID
, RING_VALID
,
556 DRM_ERROR("%s initialization failed "
557 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
559 I915_READ_CTL(engine
),
560 I915_READ_CTL(engine
) & RING_VALID
,
561 I915_READ_HEAD(engine
), ring
->head
,
562 I915_READ_TAIL(engine
), ring
->tail
,
563 I915_READ_START(engine
),
564 i915_ggtt_offset(ring
->vma
));
569 if (INTEL_GEN(dev_priv
) > 2)
570 I915_WRITE_MODE(engine
, _MASKED_BIT_DISABLE(STOP_RING
));
572 /* Papering over lost _interrupts_ immediately following the restart */
573 intel_engine_wakeup(engine
);
575 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
580 static struct i915_request
*reset_prepare(struct intel_engine_cs
*engine
)
582 intel_engine_stop_cs(engine
);
584 if (engine
->irq_seqno_barrier
)
585 engine
->irq_seqno_barrier(engine
);
587 return i915_gem_find_active_request(engine
);
590 static void skip_request(struct i915_request
*rq
)
592 void *vaddr
= rq
->ring
->vaddr
;
596 if (rq
->postfix
< head
) {
597 memset32(vaddr
+ head
, MI_NOOP
,
598 (rq
->ring
->size
- head
) / sizeof(u32
));
601 memset32(vaddr
+ head
, MI_NOOP
, (rq
->postfix
- head
) / sizeof(u32
));
604 static void reset_ring(struct intel_engine_cs
*engine
, struct i915_request
*rq
)
606 GEM_TRACE("%s seqno=%x\n", engine
->name
, rq
? rq
->global_seqno
: 0);
609 * Try to restore the logical GPU state to match the continuation
610 * of the request queue. If we skip the context/PD restore, then
611 * the next request may try to execute assuming that its context
612 * is valid and loaded on the GPU and so may try to access invalid
613 * memory, prompting repeated GPU hangs.
615 * If the request was guilty, we still restore the logical state
616 * in case the next request requires it (e.g. the aliasing ppgtt),
617 * but skip over the hung batch.
619 * If the request was innocent, we try to replay the request with
620 * the restored context.
623 /* If the rq hung, jump to its breadcrumb and skip the batch */
624 rq
->ring
->head
= intel_ring_wrap(rq
->ring
, rq
->head
);
625 if (rq
->fence
.error
== -EIO
)
630 static void reset_finish(struct intel_engine_cs
*engine
)
634 static int intel_rcs_ctx_init(struct i915_request
*rq
)
638 ret
= intel_ctx_workarounds_emit(rq
);
642 ret
= i915_gem_render_state_emit(rq
);
649 static int init_render_ring(struct intel_engine_cs
*engine
)
651 struct drm_i915_private
*dev_priv
= engine
->i915
;
652 int ret
= init_ring_common(engine
);
656 intel_whitelist_workarounds_apply(engine
);
658 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
659 if (IS_GEN(dev_priv
, 4, 6))
660 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH
));
662 /* We need to disable the AsyncFlip performance optimisations in order
663 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
664 * programmed to '1' on all products.
666 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
668 if (IS_GEN(dev_priv
, 6, 7))
669 I915_WRITE(MI_MODE
, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE
));
671 /* Required for the hardware to program scanline values for waiting */
672 /* WaEnableFlushTlbInvalidationMode:snb */
673 if (IS_GEN6(dev_priv
))
675 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
));
677 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
678 if (IS_GEN7(dev_priv
))
679 I915_WRITE(GFX_MODE_GEN7
,
680 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT
) |
681 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE
));
683 if (IS_GEN6(dev_priv
)) {
684 /* From the Sandybridge PRM, volume 1 part 3, page 24:
685 * "If this bit is set, STCunit will have LRA as replacement
686 * policy. [...] This bit must be reset. LRA replacement
687 * policy is not supported."
689 I915_WRITE(CACHE_MODE_0
,
690 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
693 if (IS_GEN(dev_priv
, 6, 7))
694 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING
));
696 if (INTEL_GEN(dev_priv
) >= 6)
697 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
702 static u32
*gen6_signal(struct i915_request
*rq
, u32
*cs
)
704 struct drm_i915_private
*dev_priv
= rq
->i915
;
705 struct intel_engine_cs
*engine
;
706 enum intel_engine_id id
;
709 for_each_engine(engine
, dev_priv
, id
) {
712 if (!(BIT(engine
->hw_id
) & GEN6_SEMAPHORES_MASK
))
715 mbox_reg
= rq
->engine
->semaphore
.mbox
.signal
[engine
->hw_id
];
716 if (i915_mmio_reg_valid(mbox_reg
)) {
717 *cs
++ = MI_LOAD_REGISTER_IMM(1);
718 *cs
++ = i915_mmio_reg_offset(mbox_reg
);
719 *cs
++ = rq
->global_seqno
;
729 static void cancel_requests(struct intel_engine_cs
*engine
)
731 struct i915_request
*request
;
734 spin_lock_irqsave(&engine
->timeline
.lock
, flags
);
736 /* Mark all submitted requests as skipped. */
737 list_for_each_entry(request
, &engine
->timeline
.requests
, link
) {
738 GEM_BUG_ON(!request
->global_seqno
);
739 if (!i915_request_completed(request
))
740 dma_fence_set_error(&request
->fence
, -EIO
);
742 /* Remaining _unready_ requests will be nop'ed when submitted */
744 spin_unlock_irqrestore(&engine
->timeline
.lock
, flags
);
747 static void i9xx_submit_request(struct i915_request
*request
)
749 struct drm_i915_private
*dev_priv
= request
->i915
;
751 i915_request_submit(request
);
753 I915_WRITE_TAIL(request
->engine
,
754 intel_ring_set_tail(request
->ring
, request
->tail
));
757 static void i9xx_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
759 *cs
++ = MI_STORE_DWORD_INDEX
;
760 *cs
++ = I915_GEM_HWS_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
;
761 *cs
++ = rq
->global_seqno
;
762 *cs
++ = MI_USER_INTERRUPT
;
764 rq
->tail
= intel_ring_offset(rq
, cs
);
765 assert_ring_tail_valid(rq
->ring
, rq
->tail
);
768 static const int i9xx_emit_breadcrumb_sz
= 4;
770 static void gen6_sema_emit_breadcrumb(struct i915_request
*rq
, u32
*cs
)
772 return i9xx_emit_breadcrumb(rq
, rq
->engine
->semaphore
.signal(rq
, cs
));
776 gen6_ring_sync_to(struct i915_request
*rq
, struct i915_request
*signal
)
778 u32 dw1
= MI_SEMAPHORE_MBOX
|
779 MI_SEMAPHORE_COMPARE
|
780 MI_SEMAPHORE_REGISTER
;
781 u32 wait_mbox
= signal
->engine
->semaphore
.mbox
.wait
[rq
->engine
->hw_id
];
784 WARN_ON(wait_mbox
== MI_SEMAPHORE_SYNC_INVALID
);
786 cs
= intel_ring_begin(rq
, 4);
790 *cs
++ = dw1
| wait_mbox
;
791 /* Throughout all of the GEM code, seqno passed implies our current
792 * seqno is >= the last seqno executed. However for hardware the
793 * comparison is strictly greater than.
795 *cs
++ = signal
->global_seqno
- 1;
798 intel_ring_advance(rq
, cs
);
804 gen5_seqno_barrier(struct intel_engine_cs
*engine
)
806 /* MI_STORE are internally buffered by the GPU and not flushed
807 * either by MI_FLUSH or SyncFlush or any other combination of
810 * "Only the submission of the store operation is guaranteed.
811 * The write result will be complete (coherent) some time later
812 * (this is practically a finite period but there is no guaranteed
815 * Empirically, we observe that we need a delay of at least 75us to
816 * be sure that the seqno write is visible by the CPU.
818 usleep_range(125, 250);
822 gen6_seqno_barrier(struct intel_engine_cs
*engine
)
824 struct drm_i915_private
*dev_priv
= engine
->i915
;
826 /* Workaround to force correct ordering between irq and seqno writes on
827 * ivb (and maybe also on snb) by reading from a CS register (like
828 * ACTHD) before reading the status page.
830 * Note that this effectively stalls the read by the time it takes to
831 * do a memory transaction, which more or less ensures that the write
832 * from the GPU has sufficient time to invalidate the CPU cacheline.
833 * Alternatively we could delay the interrupt from the CS ring to give
834 * the write time to land, but that would incur a delay after every
835 * batch i.e. much more frequent than a delay when waiting for the
836 * interrupt (with the same net latency).
838 * Also note that to prevent whole machine hangs on gen7, we have to
839 * take the spinlock to guard against concurrent cacheline access.
841 spin_lock_irq(&dev_priv
->uncore
.lock
);
842 POSTING_READ_FW(RING_ACTHD(engine
->mmio_base
));
843 spin_unlock_irq(&dev_priv
->uncore
.lock
);
847 gen5_irq_enable(struct intel_engine_cs
*engine
)
849 gen5_enable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
853 gen5_irq_disable(struct intel_engine_cs
*engine
)
855 gen5_disable_gt_irq(engine
->i915
, engine
->irq_enable_mask
);
859 i9xx_irq_enable(struct intel_engine_cs
*engine
)
861 struct drm_i915_private
*dev_priv
= engine
->i915
;
863 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
864 I915_WRITE(IMR
, dev_priv
->irq_mask
);
865 POSTING_READ_FW(RING_IMR(engine
->mmio_base
));
869 i9xx_irq_disable(struct intel_engine_cs
*engine
)
871 struct drm_i915_private
*dev_priv
= engine
->i915
;
873 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
874 I915_WRITE(IMR
, dev_priv
->irq_mask
);
878 i8xx_irq_enable(struct intel_engine_cs
*engine
)
880 struct drm_i915_private
*dev_priv
= engine
->i915
;
882 dev_priv
->irq_mask
&= ~engine
->irq_enable_mask
;
883 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
884 POSTING_READ16(RING_IMR(engine
->mmio_base
));
888 i8xx_irq_disable(struct intel_engine_cs
*engine
)
890 struct drm_i915_private
*dev_priv
= engine
->i915
;
892 dev_priv
->irq_mask
|= engine
->irq_enable_mask
;
893 I915_WRITE16(IMR
, dev_priv
->irq_mask
);
897 bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
901 cs
= intel_ring_begin(rq
, 2);
907 intel_ring_advance(rq
, cs
);
912 gen6_irq_enable(struct intel_engine_cs
*engine
)
914 struct drm_i915_private
*dev_priv
= engine
->i915
;
916 I915_WRITE_IMR(engine
,
917 ~(engine
->irq_enable_mask
|
918 engine
->irq_keep_mask
));
919 gen5_enable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
923 gen6_irq_disable(struct intel_engine_cs
*engine
)
925 struct drm_i915_private
*dev_priv
= engine
->i915
;
927 I915_WRITE_IMR(engine
, ~engine
->irq_keep_mask
);
928 gen5_disable_gt_irq(dev_priv
, engine
->irq_enable_mask
);
932 hsw_vebox_irq_enable(struct intel_engine_cs
*engine
)
934 struct drm_i915_private
*dev_priv
= engine
->i915
;
936 I915_WRITE_IMR(engine
, ~engine
->irq_enable_mask
);
937 gen6_unmask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
941 hsw_vebox_irq_disable(struct intel_engine_cs
*engine
)
943 struct drm_i915_private
*dev_priv
= engine
->i915
;
945 I915_WRITE_IMR(engine
, ~0);
946 gen6_mask_pm_irq(dev_priv
, engine
->irq_enable_mask
);
950 i965_emit_bb_start(struct i915_request
*rq
,
951 u64 offset
, u32 length
,
952 unsigned int dispatch_flags
)
956 cs
= intel_ring_begin(rq
, 2);
960 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
| (dispatch_flags
&
961 I915_DISPATCH_SECURE
? 0 : MI_BATCH_NON_SECURE_I965
);
963 intel_ring_advance(rq
, cs
);
968 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
969 #define I830_BATCH_LIMIT SZ_256K
970 #define I830_TLB_ENTRIES (2)
971 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
973 i830_emit_bb_start(struct i915_request
*rq
,
975 unsigned int dispatch_flags
)
977 u32
*cs
, cs_offset
= i915_scratch_offset(rq
->i915
);
979 GEM_BUG_ON(rq
->i915
->gt
.scratch
->size
< I830_WA_SIZE
);
981 cs
= intel_ring_begin(rq
, 6);
985 /* Evict the invalid PTE TLBs */
986 *cs
++ = COLOR_BLT_CMD
| BLT_WRITE_RGBA
;
987 *cs
++ = BLT_DEPTH_32
| BLT_ROP_COLOR_COPY
| 4096;
988 *cs
++ = I830_TLB_ENTRIES
<< 16 | 4; /* load each page */
992 intel_ring_advance(rq
, cs
);
994 if ((dispatch_flags
& I915_DISPATCH_PINNED
) == 0) {
995 if (len
> I830_BATCH_LIMIT
)
998 cs
= intel_ring_begin(rq
, 6 + 2);
1002 /* Blit the batch (which has now all relocs applied) to the
1003 * stable batch scratch bo area (so that the CS never
1004 * stumbles over its tlb invalidation bug) ...
1006 *cs
++ = SRC_COPY_BLT_CMD
| BLT_WRITE_RGBA
;
1007 *cs
++ = BLT_DEPTH_32
| BLT_ROP_SRC_COPY
| 4096;
1008 *cs
++ = DIV_ROUND_UP(len
, 4096) << 16 | 4096;
1015 intel_ring_advance(rq
, cs
);
1017 /* ... and execute it. */
1021 cs
= intel_ring_begin(rq
, 2);
1025 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1026 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1027 MI_BATCH_NON_SECURE
);
1028 intel_ring_advance(rq
, cs
);
1034 i915_emit_bb_start(struct i915_request
*rq
,
1035 u64 offset
, u32 len
,
1036 unsigned int dispatch_flags
)
1040 cs
= intel_ring_begin(rq
, 2);
1044 *cs
++ = MI_BATCH_BUFFER_START
| MI_BATCH_GTT
;
1045 *cs
++ = offset
| (dispatch_flags
& I915_DISPATCH_SECURE
? 0 :
1046 MI_BATCH_NON_SECURE
);
1047 intel_ring_advance(rq
, cs
);
1052 int intel_ring_pin(struct intel_ring
*ring
)
1054 struct i915_vma
*vma
= ring
->vma
;
1055 enum i915_map_type map
=
1056 HAS_LLC(vma
->vm
->i915
) ? I915_MAP_WB
: I915_MAP_WC
;
1061 GEM_BUG_ON(ring
->vaddr
);
1065 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1066 flags
|= PIN_OFFSET_BIAS
| i915_ggtt_pin_bias(vma
);
1068 if (vma
->obj
->stolen
)
1069 flags
|= PIN_MAPPABLE
;
1073 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1074 if (flags
& PIN_MAPPABLE
|| map
== I915_MAP_WC
)
1075 ret
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1077 ret
= i915_gem_object_set_to_cpu_domain(vma
->obj
, true);
1082 ret
= i915_vma_pin(vma
, 0, 0, flags
);
1086 if (i915_vma_is_map_and_fenceable(vma
))
1087 addr
= (void __force
*)i915_vma_pin_iomap(vma
);
1089 addr
= i915_gem_object_pin_map(vma
->obj
, map
);
1093 vma
->obj
->pin_global
++;
1099 i915_vma_unpin(vma
);
1100 return PTR_ERR(addr
);
1103 void intel_ring_reset(struct intel_ring
*ring
, u32 tail
)
1105 GEM_BUG_ON(!intel_ring_offset_valid(ring
, tail
));
1110 intel_ring_update_space(ring
);
1113 void intel_ring_unpin(struct intel_ring
*ring
)
1115 GEM_BUG_ON(!ring
->vma
);
1116 GEM_BUG_ON(!ring
->vaddr
);
1118 /* Discard any unused bytes beyond that submitted to hw. */
1119 intel_ring_reset(ring
, ring
->tail
);
1121 if (i915_vma_is_map_and_fenceable(ring
->vma
))
1122 i915_vma_unpin_iomap(ring
->vma
);
1124 i915_gem_object_unpin_map(ring
->vma
->obj
);
1127 ring
->vma
->obj
->pin_global
--;
1128 i915_vma_unpin(ring
->vma
);
1131 static struct i915_vma
*
1132 intel_ring_create_vma(struct drm_i915_private
*dev_priv
, int size
)
1134 struct i915_address_space
*vm
= &dev_priv
->ggtt
.vm
;
1135 struct drm_i915_gem_object
*obj
;
1136 struct i915_vma
*vma
;
1138 obj
= i915_gem_object_create_stolen(dev_priv
, size
);
1140 obj
= i915_gem_object_create_internal(dev_priv
, size
);
1142 return ERR_CAST(obj
);
1145 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1146 * if supported by the platform's GGTT.
1148 if (vm
->has_read_only
)
1149 i915_gem_object_set_readonly(obj
);
1151 vma
= i915_vma_instance(obj
, vm
, NULL
);
1158 i915_gem_object_put(obj
);
1163 intel_engine_create_ring(struct intel_engine_cs
*engine
,
1164 struct i915_timeline
*timeline
,
1167 struct intel_ring
*ring
;
1168 struct i915_vma
*vma
;
1170 GEM_BUG_ON(!is_power_of_2(size
));
1171 GEM_BUG_ON(RING_CTL_SIZE(size
) & ~RING_NR_PAGES
);
1172 GEM_BUG_ON(timeline
== &engine
->timeline
);
1173 lockdep_assert_held(&engine
->i915
->drm
.struct_mutex
);
1175 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
1177 return ERR_PTR(-ENOMEM
);
1179 INIT_LIST_HEAD(&ring
->request_list
);
1180 ring
->timeline
= i915_timeline_get(timeline
);
1183 /* Workaround an erratum on the i830 which causes a hang if
1184 * the TAIL pointer points to within the last 2 cachelines
1187 ring
->effective_size
= size
;
1188 if (IS_I830(engine
->i915
) || IS_I845G(engine
->i915
))
1189 ring
->effective_size
-= 2 * CACHELINE_BYTES
;
1191 intel_ring_update_space(ring
);
1193 vma
= intel_ring_create_vma(engine
->i915
, size
);
1196 return ERR_CAST(vma
);
1204 intel_ring_free(struct intel_ring
*ring
)
1206 struct drm_i915_gem_object
*obj
= ring
->vma
->obj
;
1208 i915_vma_close(ring
->vma
);
1209 __i915_gem_object_release_unless_active(obj
);
1211 i915_timeline_put(ring
->timeline
);
1215 static void intel_ring_context_destroy(struct intel_context
*ce
)
1217 GEM_BUG_ON(ce
->pin_count
);
1222 GEM_BUG_ON(i915_gem_object_is_active(ce
->state
->obj
));
1223 i915_gem_object_put(ce
->state
->obj
);
1226 static int __context_pin_ppgtt(struct i915_gem_context
*ctx
)
1228 struct i915_hw_ppgtt
*ppgtt
;
1231 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1233 err
= gen6_ppgtt_pin(ppgtt
);
1238 static void __context_unpin_ppgtt(struct i915_gem_context
*ctx
)
1240 struct i915_hw_ppgtt
*ppgtt
;
1242 ppgtt
= ctx
->ppgtt
?: ctx
->i915
->mm
.aliasing_ppgtt
;
1244 gen6_ppgtt_unpin(ppgtt
);
1247 static int __context_pin(struct intel_context
*ce
)
1249 struct i915_vma
*vma
;
1257 * Clear this page out of any CPU caches for coherent swap-in/out.
1258 * We only want to do this on the first bind so that we do not stall
1259 * on an active context (which by nature is already on the GPU).
1261 if (!(vma
->flags
& I915_VMA_GLOBAL_BIND
)) {
1262 err
= i915_gem_object_set_to_gtt_domain(vma
->obj
, true);
1267 err
= i915_vma_pin(vma
, 0, 0, PIN_GLOBAL
| PIN_HIGH
);
1272 * And mark is as a globally pinned object to let the shrinker know
1273 * it cannot reclaim the object until we release it.
1275 vma
->obj
->pin_global
++;
1280 static void __context_unpin(struct intel_context
*ce
)
1282 struct i915_vma
*vma
;
1288 vma
->obj
->pin_global
--;
1289 i915_vma_unpin(vma
);
1292 static void intel_ring_context_unpin(struct intel_context
*ce
)
1294 __context_unpin_ppgtt(ce
->gem_context
);
1295 __context_unpin(ce
);
1297 i915_gem_context_put(ce
->gem_context
);
1300 static struct i915_vma
*
1301 alloc_context_vma(struct intel_engine_cs
*engine
)
1303 struct drm_i915_private
*i915
= engine
->i915
;
1304 struct drm_i915_gem_object
*obj
;
1305 struct i915_vma
*vma
;
1308 obj
= i915_gem_object_create(i915
, engine
->context_size
);
1310 return ERR_CAST(obj
);
1312 if (engine
->default_state
) {
1313 void *defaults
, *vaddr
;
1315 vaddr
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
1316 if (IS_ERR(vaddr
)) {
1317 err
= PTR_ERR(vaddr
);
1321 defaults
= i915_gem_object_pin_map(engine
->default_state
,
1323 if (IS_ERR(defaults
)) {
1324 err
= PTR_ERR(defaults
);
1328 memcpy(vaddr
, defaults
, engine
->context_size
);
1330 i915_gem_object_unpin_map(engine
->default_state
);
1331 i915_gem_object_unpin_map(obj
);
1335 * Try to make the context utilize L3 as well as LLC.
1337 * On VLV we don't have L3 controls in the PTEs so we
1338 * shouldn't touch the cache level, especially as that
1339 * would make the object snooped which might have a
1340 * negative performance impact.
1342 * Snooping is required on non-llc platforms in execlist
1343 * mode, but since all GGTT accesses use PAT entry 0 we
1344 * get snooping anyway regardless of cache_level.
1346 * This is only applicable for Ivy Bridge devices since
1347 * later platforms don't have L3 control bits in the PTE.
1349 if (IS_IVYBRIDGE(i915
)) {
1350 /* Ignore any error, regard it as a simple optimisation */
1351 i915_gem_object_set_cache_level(obj
, I915_CACHE_L3_LLC
);
1354 vma
= i915_vma_instance(obj
, &i915
->ggtt
.vm
, NULL
);
1363 i915_gem_object_unpin_map(obj
);
1365 i915_gem_object_put(obj
);
1366 return ERR_PTR(err
);
1369 static struct intel_context
*
1370 __ring_context_pin(struct intel_engine_cs
*engine
,
1371 struct i915_gem_context
*ctx
,
1372 struct intel_context
*ce
)
1376 if (!ce
->state
&& engine
->context_size
) {
1377 struct i915_vma
*vma
;
1379 vma
= alloc_context_vma(engine
);
1388 err
= __context_pin(ce
);
1392 err
= __context_pin_ppgtt(ce
->gem_context
);
1396 i915_gem_context_get(ctx
);
1398 /* One ringbuffer to rule them all */
1399 GEM_BUG_ON(!engine
->buffer
);
1400 ce
->ring
= engine
->buffer
;
1405 __context_unpin(ce
);
1408 return ERR_PTR(err
);
1411 static const struct intel_context_ops ring_context_ops
= {
1412 .unpin
= intel_ring_context_unpin
,
1413 .destroy
= intel_ring_context_destroy
,
1416 static struct intel_context
*
1417 intel_ring_context_pin(struct intel_engine_cs
*engine
,
1418 struct i915_gem_context
*ctx
)
1420 struct intel_context
*ce
= to_intel_context(ctx
, engine
);
1422 lockdep_assert_held(&ctx
->i915
->drm
.struct_mutex
);
1424 if (likely(ce
->pin_count
++))
1426 GEM_BUG_ON(!ce
->pin_count
); /* no overflow please! */
1428 ce
->ops
= &ring_context_ops
;
1430 return __ring_context_pin(engine
, ctx
, ce
);
1433 static int intel_init_ring_buffer(struct intel_engine_cs
*engine
)
1435 struct i915_timeline
*timeline
;
1436 struct intel_ring
*ring
;
1439 intel_engine_setup_common(engine
);
1441 timeline
= i915_timeline_create(engine
->i915
, engine
->name
);
1442 if (IS_ERR(timeline
)) {
1443 err
= PTR_ERR(timeline
);
1447 ring
= intel_engine_create_ring(engine
, timeline
, 32 * PAGE_SIZE
);
1448 i915_timeline_put(timeline
);
1450 err
= PTR_ERR(ring
);
1454 err
= intel_ring_pin(ring
);
1458 GEM_BUG_ON(engine
->buffer
);
1459 engine
->buffer
= ring
;
1461 err
= intel_engine_init_common(engine
);
1468 intel_ring_unpin(ring
);
1470 intel_ring_free(ring
);
1472 intel_engine_cleanup_common(engine
);
1476 void intel_engine_cleanup(struct intel_engine_cs
*engine
)
1478 struct drm_i915_private
*dev_priv
= engine
->i915
;
1480 WARN_ON(INTEL_GEN(dev_priv
) > 2 &&
1481 (I915_READ_MODE(engine
) & MODE_IDLE
) == 0);
1483 intel_ring_unpin(engine
->buffer
);
1484 intel_ring_free(engine
->buffer
);
1486 if (engine
->cleanup
)
1487 engine
->cleanup(engine
);
1489 intel_engine_cleanup_common(engine
);
1491 dev_priv
->engine
[engine
->id
] = NULL
;
1495 void intel_legacy_submission_resume(struct drm_i915_private
*dev_priv
)
1497 struct intel_engine_cs
*engine
;
1498 enum intel_engine_id id
;
1500 /* Restart from the beginning of the rings for convenience */
1501 for_each_engine(engine
, dev_priv
, id
)
1502 intel_ring_reset(engine
->buffer
, 0);
1505 static int load_pd_dir(struct i915_request
*rq
,
1506 const struct i915_hw_ppgtt
*ppgtt
)
1508 const struct intel_engine_cs
* const engine
= rq
->engine
;
1511 cs
= intel_ring_begin(rq
, 6);
1515 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1516 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine
));
1517 *cs
++ = PP_DIR_DCLV_2G
;
1519 *cs
++ = MI_LOAD_REGISTER_IMM(1);
1520 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1521 *cs
++ = ppgtt
->pd
.base
.ggtt_offset
<< 10;
1523 intel_ring_advance(rq
, cs
);
1528 static int flush_pd_dir(struct i915_request
*rq
)
1530 const struct intel_engine_cs
* const engine
= rq
->engine
;
1533 cs
= intel_ring_begin(rq
, 4);
1537 /* Stall until the page table load is complete */
1538 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1539 *cs
++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine
));
1540 *cs
++ = i915_scratch_offset(rq
->i915
);
1543 intel_ring_advance(rq
, cs
);
1547 static inline int mi_set_context(struct i915_request
*rq
, u32 flags
)
1549 struct drm_i915_private
*i915
= rq
->i915
;
1550 struct intel_engine_cs
*engine
= rq
->engine
;
1551 enum intel_engine_id id
;
1552 const int num_rings
=
1553 /* Use an extended w/a on gen7 if signalling from other rings */
1554 (HAS_LEGACY_SEMAPHORES(i915
) && IS_GEN7(i915
)) ?
1555 INTEL_INFO(i915
)->num_rings
- 1 :
1557 bool force_restore
= false;
1561 flags
|= MI_MM_SPACE_GTT
;
1562 if (IS_HASWELL(i915
))
1563 /* These flags are for resource streamer on HSW+ */
1564 flags
|= HSW_MI_RS_SAVE_STATE_EN
| HSW_MI_RS_RESTORE_STATE_EN
;
1566 flags
|= MI_SAVE_EXT_STATE_EN
| MI_RESTORE_EXT_STATE_EN
;
1570 len
+= 2 + (num_rings
? 4*num_rings
+ 6 : 0);
1571 if (flags
& MI_FORCE_RESTORE
) {
1572 GEM_BUG_ON(flags
& MI_RESTORE_INHIBIT
);
1573 flags
&= ~MI_FORCE_RESTORE
;
1574 force_restore
= true;
1578 cs
= intel_ring_begin(rq
, len
);
1582 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1583 if (IS_GEN7(i915
)) {
1584 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
1586 struct intel_engine_cs
*signaller
;
1588 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1589 for_each_engine(signaller
, i915
, id
) {
1590 if (signaller
== engine
)
1593 *cs
++ = i915_mmio_reg_offset(
1594 RING_PSMI_CTL(signaller
->mmio_base
));
1595 *cs
++ = _MASKED_BIT_ENABLE(
1596 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1601 if (force_restore
) {
1603 * The HW doesn't handle being told to restore the current
1604 * context very well. Quite often it likes goes to go off and
1605 * sulk, especially when it is meant to be reloading PP_DIR.
1606 * A very simple fix to force the reload is to simply switch
1607 * away from the current context and back again.
1609 * Note that the kernel_context will contain random state
1610 * following the INHIBIT_RESTORE. We accept this since we
1611 * never use the kernel_context state; it is merely a
1612 * placeholder we use to flush other contexts.
1614 *cs
++ = MI_SET_CONTEXT
;
1615 *cs
++ = i915_ggtt_offset(to_intel_context(i915
->kernel_context
,
1622 *cs
++ = MI_SET_CONTEXT
;
1623 *cs
++ = i915_ggtt_offset(rq
->hw_context
->state
) | flags
;
1625 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1626 * WaMiSetContext_Hang:snb,ivb,vlv
1630 if (IS_GEN7(i915
)) {
1632 struct intel_engine_cs
*signaller
;
1633 i915_reg_t last_reg
= {}; /* keep gcc quiet */
1635 *cs
++ = MI_LOAD_REGISTER_IMM(num_rings
);
1636 for_each_engine(signaller
, i915
, id
) {
1637 if (signaller
== engine
)
1640 last_reg
= RING_PSMI_CTL(signaller
->mmio_base
);
1641 *cs
++ = i915_mmio_reg_offset(last_reg
);
1642 *cs
++ = _MASKED_BIT_DISABLE(
1643 GEN6_PSMI_SLEEP_MSG_DISABLE
);
1646 /* Insert a delay before the next switch! */
1647 *cs
++ = MI_STORE_REGISTER_MEM
| MI_SRM_LRM_GLOBAL_GTT
;
1648 *cs
++ = i915_mmio_reg_offset(last_reg
);
1649 *cs
++ = i915_scratch_offset(rq
->i915
);
1652 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
1655 intel_ring_advance(rq
, cs
);
1660 static int remap_l3(struct i915_request
*rq
, int slice
)
1662 u32
*cs
, *remap_info
= rq
->i915
->l3_parity
.remap_info
[slice
];
1668 cs
= intel_ring_begin(rq
, GEN7_L3LOG_SIZE
/4 * 2 + 2);
1673 * Note: We do not worry about the concurrent register cacheline hang
1674 * here because no other code should access these registers other than
1675 * at initialization time.
1677 *cs
++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE
/4);
1678 for (i
= 0; i
< GEN7_L3LOG_SIZE
/4; i
++) {
1679 *cs
++ = i915_mmio_reg_offset(GEN7_L3LOG(slice
, i
));
1680 *cs
++ = remap_info
[i
];
1683 intel_ring_advance(rq
, cs
);
1688 static int switch_context(struct i915_request
*rq
)
1690 struct intel_engine_cs
*engine
= rq
->engine
;
1691 struct i915_gem_context
*ctx
= rq
->gem_context
;
1692 struct i915_hw_ppgtt
*ppgtt
= ctx
->ppgtt
?: rq
->i915
->mm
.aliasing_ppgtt
;
1693 unsigned int unwind_mm
= 0;
1697 lockdep_assert_held(&rq
->i915
->drm
.struct_mutex
);
1698 GEM_BUG_ON(HAS_EXECLISTS(rq
->i915
));
1704 * Baytail takes a little more convincing that it really needs
1705 * to reload the PD between contexts. It is not just a little
1706 * longer, as adding more stalls after the load_pd_dir (i.e.
1707 * adding a long loop around flush_pd_dir) is not as effective
1708 * as reloading the PD umpteen times. 32 is derived from
1709 * experimentation (gem_exec_parallel/fds) and has no good
1713 if (engine
->id
== BCS
&& IS_VALLEYVIEW(engine
->i915
))
1717 ret
= load_pd_dir(rq
, ppgtt
);
1722 if (intel_engine_flag(engine
) & ppgtt
->pd_dirty_rings
) {
1723 unwind_mm
= intel_engine_flag(engine
);
1724 ppgtt
->pd_dirty_rings
&= ~unwind_mm
;
1725 hw_flags
= MI_FORCE_RESTORE
;
1729 if (rq
->hw_context
->state
) {
1730 GEM_BUG_ON(engine
->id
!= RCS
);
1733 * The kernel context(s) is treated as pure scratch and is not
1734 * expected to retain any state (as we sacrifice it during
1735 * suspend and on resume it may be corrupted). This is ok,
1736 * as nothing actually executes using the kernel context; it
1737 * is purely used for flushing user contexts.
1739 if (i915_gem_context_is_kernel(ctx
))
1740 hw_flags
= MI_RESTORE_INHIBIT
;
1742 ret
= mi_set_context(rq
, hw_flags
);
1748 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1752 ret
= flush_pd_dir(rq
);
1757 * Not only do we need a full barrier (post-sync write) after
1758 * invalidating the TLBs, but we need to wait a little bit
1759 * longer. Whether this is merely delaying us, or the
1760 * subsequent flush is a key part of serialising with the
1761 * post-sync op, this extra pass appears vital before a
1764 ret
= engine
->emit_flush(rq
, EMIT_INVALIDATE
);
1768 ret
= engine
->emit_flush(rq
, EMIT_FLUSH
);
1773 if (ctx
->remap_slice
) {
1774 for (i
= 0; i
< MAX_L3_SLICES
; i
++) {
1775 if (!(ctx
->remap_slice
& BIT(i
)))
1778 ret
= remap_l3(rq
, i
);
1783 ctx
->remap_slice
= 0;
1790 ppgtt
->pd_dirty_rings
|= unwind_mm
;
1795 static int ring_request_alloc(struct i915_request
*request
)
1799 GEM_BUG_ON(!request
->hw_context
->pin_count
);
1801 /* Flush enough space to reduce the likelihood of waiting after
1802 * we start building the request - in which case we will just
1803 * have to repeat work.
1805 request
->reserved_space
+= LEGACY_REQUEST_SIZE
;
1807 ret
= intel_ring_wait_for_space(request
->ring
, request
->reserved_space
);
1811 ret
= switch_context(request
);
1815 request
->reserved_space
-= LEGACY_REQUEST_SIZE
;
1819 static noinline
int wait_for_space(struct intel_ring
*ring
, unsigned int bytes
)
1821 struct i915_request
*target
;
1824 lockdep_assert_held(&ring
->vma
->vm
->i915
->drm
.struct_mutex
);
1826 if (intel_ring_update_space(ring
) >= bytes
)
1829 GEM_BUG_ON(list_empty(&ring
->request_list
));
1830 list_for_each_entry(target
, &ring
->request_list
, ring_link
) {
1831 /* Would completion of this request free enough space? */
1832 if (bytes
<= __intel_ring_space(target
->postfix
,
1833 ring
->emit
, ring
->size
))
1837 if (WARN_ON(&target
->ring_link
== &ring
->request_list
))
1840 timeout
= i915_request_wait(target
,
1841 I915_WAIT_INTERRUPTIBLE
| I915_WAIT_LOCKED
,
1842 MAX_SCHEDULE_TIMEOUT
);
1846 i915_request_retire_upto(target
);
1848 intel_ring_update_space(ring
);
1849 GEM_BUG_ON(ring
->space
< bytes
);
1853 int intel_ring_wait_for_space(struct intel_ring
*ring
, unsigned int bytes
)
1855 GEM_BUG_ON(bytes
> ring
->effective_size
);
1856 if (unlikely(bytes
> ring
->effective_size
- ring
->emit
))
1857 bytes
+= ring
->size
- ring
->emit
;
1859 if (unlikely(bytes
> ring
->space
)) {
1860 int ret
= wait_for_space(ring
, bytes
);
1865 GEM_BUG_ON(ring
->space
< bytes
);
1869 u32
*intel_ring_begin(struct i915_request
*rq
, unsigned int num_dwords
)
1871 struct intel_ring
*ring
= rq
->ring
;
1872 const unsigned int remain_usable
= ring
->effective_size
- ring
->emit
;
1873 const unsigned int bytes
= num_dwords
* sizeof(u32
);
1874 unsigned int need_wrap
= 0;
1875 unsigned int total_bytes
;
1878 /* Packets must be qword aligned. */
1879 GEM_BUG_ON(num_dwords
& 1);
1881 total_bytes
= bytes
+ rq
->reserved_space
;
1882 GEM_BUG_ON(total_bytes
> ring
->effective_size
);
1884 if (unlikely(total_bytes
> remain_usable
)) {
1885 const int remain_actual
= ring
->size
- ring
->emit
;
1887 if (bytes
> remain_usable
) {
1889 * Not enough space for the basic request. So need to
1890 * flush out the remainder and then wait for
1893 total_bytes
+= remain_actual
;
1894 need_wrap
= remain_actual
| 1;
1897 * The base request will fit but the reserved space
1898 * falls off the end. So we don't need an immediate
1899 * wrap and only need to effectively wait for the
1900 * reserved size from the start of ringbuffer.
1902 total_bytes
= rq
->reserved_space
+ remain_actual
;
1906 if (unlikely(total_bytes
> ring
->space
)) {
1910 * Space is reserved in the ringbuffer for finalising the
1911 * request, as that cannot be allowed to fail. During request
1912 * finalisation, reserved_space is set to 0 to stop the
1913 * overallocation and the assumption is that then we never need
1914 * to wait (which has the risk of failing with EINTR).
1916 * See also i915_request_alloc() and i915_request_add().
1918 GEM_BUG_ON(!rq
->reserved_space
);
1920 ret
= wait_for_space(ring
, total_bytes
);
1922 return ERR_PTR(ret
);
1925 if (unlikely(need_wrap
)) {
1927 GEM_BUG_ON(need_wrap
> ring
->space
);
1928 GEM_BUG_ON(ring
->emit
+ need_wrap
> ring
->size
);
1929 GEM_BUG_ON(!IS_ALIGNED(need_wrap
, sizeof(u64
)));
1931 /* Fill the tail with MI_NOOP */
1932 memset64(ring
->vaddr
+ ring
->emit
, 0, need_wrap
/ sizeof(u64
));
1933 ring
->space
-= need_wrap
;
1937 GEM_BUG_ON(ring
->emit
> ring
->size
- bytes
);
1938 GEM_BUG_ON(ring
->space
< bytes
);
1939 cs
= ring
->vaddr
+ ring
->emit
;
1940 GEM_DEBUG_EXEC(memset32(cs
, POISON_INUSE
, bytes
/ sizeof(*cs
)));
1941 ring
->emit
+= bytes
;
1942 ring
->space
-= bytes
;
1947 /* Align the ring tail to a cacheline boundary */
1948 int intel_ring_cacheline_align(struct i915_request
*rq
)
1953 num_dwords
= (rq
->ring
->emit
& (CACHELINE_BYTES
- 1)) / sizeof(u32
);
1954 if (num_dwords
== 0)
1957 num_dwords
= CACHELINE_DWORDS
- num_dwords
;
1958 GEM_BUG_ON(num_dwords
& 1);
1960 cs
= intel_ring_begin(rq
, num_dwords
);
1964 memset64(cs
, (u64
)MI_NOOP
<< 32 | MI_NOOP
, num_dwords
/ 2);
1965 intel_ring_advance(rq
, cs
);
1967 GEM_BUG_ON(rq
->ring
->emit
& (CACHELINE_BYTES
- 1));
1971 static void gen6_bsd_submit_request(struct i915_request
*request
)
1973 struct drm_i915_private
*dev_priv
= request
->i915
;
1975 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1977 /* Every tail move must follow the sequence below */
1979 /* Disable notification that the ring is IDLE. The GT
1980 * will then assume that it is busy and bring it out of rc6.
1982 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
1983 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
1985 /* Clear the context id. Here be magic! */
1986 I915_WRITE64_FW(GEN6_BSD_RNCID
, 0x0);
1988 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1989 if (__intel_wait_for_register_fw(dev_priv
,
1990 GEN6_BSD_SLEEP_PSMI_CONTROL
,
1991 GEN6_BSD_SLEEP_INDICATOR
,
1994 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1996 /* Now that the ring is fully powered up, update the tail */
1997 i9xx_submit_request(request
);
1999 /* Let the ring send IDLE messages to the GT again,
2000 * and so let it sleep to conserve power when idle.
2002 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL
,
2003 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE
));
2005 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
2008 static int mi_flush_dw(struct i915_request
*rq
, u32 flags
)
2012 cs
= intel_ring_begin(rq
, 4);
2019 * We always require a command barrier so that subsequent
2020 * commands, such as breadcrumb interrupts, are strictly ordered
2021 * wrt the contents of the write cache being flushed to memory
2022 * (and thus being coherent from the CPU).
2024 cmd
|= MI_FLUSH_DW_STORE_INDEX
| MI_FLUSH_DW_OP_STOREDW
;
2027 * Bspec vol 1c.3 - blitter engine command streamer:
2028 * "If ENABLED, all TLBs will be invalidated once the flush
2029 * operation is complete. This bit is only valid when the
2030 * Post-Sync Operation field is a value of 1h or 3h."
2035 *cs
++ = I915_GEM_HWS_SCRATCH_ADDR
| MI_FLUSH_DW_USE_GTT
;
2039 intel_ring_advance(rq
, cs
);
2044 static int gen6_flush_dw(struct i915_request
*rq
, u32 mode
, u32 invflags
)
2046 return mi_flush_dw(rq
, mode
& EMIT_INVALIDATE
? invflags
: 0);
2049 static int gen6_bsd_ring_flush(struct i915_request
*rq
, u32 mode
)
2051 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
| MI_INVALIDATE_BSD
);
2055 hsw_emit_bb_start(struct i915_request
*rq
,
2056 u64 offset
, u32 len
,
2057 unsigned int dispatch_flags
)
2061 cs
= intel_ring_begin(rq
, 2);
2065 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2066 0 : MI_BATCH_PPGTT_HSW
| MI_BATCH_NON_SECURE_HSW
);
2067 /* bit0-7 is the length on GEN6+ */
2069 intel_ring_advance(rq
, cs
);
2075 gen6_emit_bb_start(struct i915_request
*rq
,
2076 u64 offset
, u32 len
,
2077 unsigned int dispatch_flags
)
2081 cs
= intel_ring_begin(rq
, 2);
2085 *cs
++ = MI_BATCH_BUFFER_START
| (dispatch_flags
& I915_DISPATCH_SECURE
?
2086 0 : MI_BATCH_NON_SECURE_I965
);
2087 /* bit0-7 is the length on GEN6+ */
2089 intel_ring_advance(rq
, cs
);
2094 /* Blitter support (SandyBridge+) */
2096 static int gen6_ring_flush(struct i915_request
*rq
, u32 mode
)
2098 return gen6_flush_dw(rq
, mode
, MI_INVALIDATE_TLB
);
2101 static void intel_ring_init_semaphores(struct drm_i915_private
*dev_priv
,
2102 struct intel_engine_cs
*engine
)
2106 if (!HAS_LEGACY_SEMAPHORES(dev_priv
))
2109 GEM_BUG_ON(INTEL_GEN(dev_priv
) < 6);
2110 engine
->semaphore
.sync_to
= gen6_ring_sync_to
;
2111 engine
->semaphore
.signal
= gen6_signal
;
2114 * The current semaphore is only applied on pre-gen8
2115 * platform. And there is no VCS2 ring on the pre-gen8
2116 * platform. So the semaphore between RCS and VCS2 is
2117 * initialized as INVALID.
2119 for (i
= 0; i
< GEN6_NUM_SEMAPHORES
; i
++) {
2120 static const struct {
2122 i915_reg_t mbox_reg
;
2123 } sem_data
[GEN6_NUM_SEMAPHORES
][GEN6_NUM_SEMAPHORES
] = {
2125 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RV
, .mbox_reg
= GEN6_VRSYNC
},
2126 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RB
, .mbox_reg
= GEN6_BRSYNC
},
2127 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_RVE
, .mbox_reg
= GEN6_VERSYNC
},
2130 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VR
, .mbox_reg
= GEN6_RVSYNC
},
2131 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VB
, .mbox_reg
= GEN6_BVSYNC
},
2132 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VVE
, .mbox_reg
= GEN6_VEVSYNC
},
2135 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BR
, .mbox_reg
= GEN6_RBSYNC
},
2136 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BV
, .mbox_reg
= GEN6_VBSYNC
},
2137 [VECS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_BVE
, .mbox_reg
= GEN6_VEBSYNC
},
2140 [RCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VER
, .mbox_reg
= GEN6_RVESYNC
},
2141 [VCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEV
, .mbox_reg
= GEN6_VVESYNC
},
2142 [BCS_HW
] = { .wait_mbox
= MI_SEMAPHORE_SYNC_VEB
, .mbox_reg
= GEN6_BVESYNC
},
2146 i915_reg_t mbox_reg
;
2148 if (i
== engine
->hw_id
) {
2149 wait_mbox
= MI_SEMAPHORE_SYNC_INVALID
;
2150 mbox_reg
= GEN6_NOSYNC
;
2152 wait_mbox
= sem_data
[engine
->hw_id
][i
].wait_mbox
;
2153 mbox_reg
= sem_data
[engine
->hw_id
][i
].mbox_reg
;
2156 engine
->semaphore
.mbox
.wait
[i
] = wait_mbox
;
2157 engine
->semaphore
.mbox
.signal
[i
] = mbox_reg
;
2161 static void intel_ring_init_irq(struct drm_i915_private
*dev_priv
,
2162 struct intel_engine_cs
*engine
)
2164 if (INTEL_GEN(dev_priv
) >= 6) {
2165 engine
->irq_enable
= gen6_irq_enable
;
2166 engine
->irq_disable
= gen6_irq_disable
;
2167 engine
->irq_seqno_barrier
= gen6_seqno_barrier
;
2168 } else if (INTEL_GEN(dev_priv
) >= 5) {
2169 engine
->irq_enable
= gen5_irq_enable
;
2170 engine
->irq_disable
= gen5_irq_disable
;
2171 engine
->irq_seqno_barrier
= gen5_seqno_barrier
;
2172 } else if (INTEL_GEN(dev_priv
) >= 3) {
2173 engine
->irq_enable
= i9xx_irq_enable
;
2174 engine
->irq_disable
= i9xx_irq_disable
;
2176 engine
->irq_enable
= i8xx_irq_enable
;
2177 engine
->irq_disable
= i8xx_irq_disable
;
2181 static void i9xx_set_default_submission(struct intel_engine_cs
*engine
)
2183 engine
->submit_request
= i9xx_submit_request
;
2184 engine
->cancel_requests
= cancel_requests
;
2186 engine
->park
= NULL
;
2187 engine
->unpark
= NULL
;
2190 static void gen6_bsd_set_default_submission(struct intel_engine_cs
*engine
)
2192 i9xx_set_default_submission(engine
);
2193 engine
->submit_request
= gen6_bsd_submit_request
;
2196 static void intel_ring_default_vfuncs(struct drm_i915_private
*dev_priv
,
2197 struct intel_engine_cs
*engine
)
2199 /* gen8+ are only supported with execlists */
2200 GEM_BUG_ON(INTEL_GEN(dev_priv
) >= 8);
2202 intel_ring_init_irq(dev_priv
, engine
);
2203 intel_ring_init_semaphores(dev_priv
, engine
);
2205 engine
->init_hw
= init_ring_common
;
2206 engine
->reset
.prepare
= reset_prepare
;
2207 engine
->reset
.reset
= reset_ring
;
2208 engine
->reset
.finish
= reset_finish
;
2210 engine
->context_pin
= intel_ring_context_pin
;
2211 engine
->request_alloc
= ring_request_alloc
;
2213 engine
->emit_breadcrumb
= i9xx_emit_breadcrumb
;
2214 engine
->emit_breadcrumb_sz
= i9xx_emit_breadcrumb_sz
;
2215 if (HAS_LEGACY_SEMAPHORES(dev_priv
)) {
2218 engine
->emit_breadcrumb
= gen6_sema_emit_breadcrumb
;
2220 num_rings
= INTEL_INFO(dev_priv
)->num_rings
- 1;
2221 engine
->emit_breadcrumb_sz
+= num_rings
* 3;
2223 engine
->emit_breadcrumb_sz
++;
2226 engine
->set_default_submission
= i9xx_set_default_submission
;
2228 if (INTEL_GEN(dev_priv
) >= 6)
2229 engine
->emit_bb_start
= gen6_emit_bb_start
;
2230 else if (INTEL_GEN(dev_priv
) >= 4)
2231 engine
->emit_bb_start
= i965_emit_bb_start
;
2232 else if (IS_I830(dev_priv
) || IS_I845G(dev_priv
))
2233 engine
->emit_bb_start
= i830_emit_bb_start
;
2235 engine
->emit_bb_start
= i915_emit_bb_start
;
2238 int intel_init_render_ring_buffer(struct intel_engine_cs
*engine
)
2240 struct drm_i915_private
*dev_priv
= engine
->i915
;
2243 intel_ring_default_vfuncs(dev_priv
, engine
);
2245 if (HAS_L3_DPF(dev_priv
))
2246 engine
->irq_keep_mask
= GT_RENDER_L3_PARITY_ERROR_INTERRUPT
;
2248 engine
->irq_enable_mask
= GT_RENDER_USER_INTERRUPT
;
2250 if (INTEL_GEN(dev_priv
) >= 6) {
2251 engine
->init_context
= intel_rcs_ctx_init
;
2252 engine
->emit_flush
= gen7_render_ring_flush
;
2253 if (IS_GEN6(dev_priv
))
2254 engine
->emit_flush
= gen6_render_ring_flush
;
2255 } else if (IS_GEN5(dev_priv
)) {
2256 engine
->emit_flush
= gen4_render_ring_flush
;
2258 if (INTEL_GEN(dev_priv
) < 4)
2259 engine
->emit_flush
= gen2_render_ring_flush
;
2261 engine
->emit_flush
= gen4_render_ring_flush
;
2262 engine
->irq_enable_mask
= I915_USER_INTERRUPT
;
2265 if (IS_HASWELL(dev_priv
))
2266 engine
->emit_bb_start
= hsw_emit_bb_start
;
2268 engine
->init_hw
= init_render_ring
;
2270 ret
= intel_init_ring_buffer(engine
);
2277 int intel_init_bsd_ring_buffer(struct intel_engine_cs
*engine
)
2279 struct drm_i915_private
*dev_priv
= engine
->i915
;
2281 intel_ring_default_vfuncs(dev_priv
, engine
);
2283 if (INTEL_GEN(dev_priv
) >= 6) {
2284 /* gen6 bsd needs a special wa for tail updates */
2285 if (IS_GEN6(dev_priv
))
2286 engine
->set_default_submission
= gen6_bsd_set_default_submission
;
2287 engine
->emit_flush
= gen6_bsd_ring_flush
;
2288 engine
->irq_enable_mask
= GT_BSD_USER_INTERRUPT
;
2290 engine
->emit_flush
= bsd_ring_flush
;
2291 if (IS_GEN5(dev_priv
))
2292 engine
->irq_enable_mask
= ILK_BSD_USER_INTERRUPT
;
2294 engine
->irq_enable_mask
= I915_BSD_USER_INTERRUPT
;
2297 return intel_init_ring_buffer(engine
);
2300 int intel_init_blt_ring_buffer(struct intel_engine_cs
*engine
)
2302 struct drm_i915_private
*dev_priv
= engine
->i915
;
2304 intel_ring_default_vfuncs(dev_priv
, engine
);
2306 engine
->emit_flush
= gen6_ring_flush
;
2307 engine
->irq_enable_mask
= GT_BLT_USER_INTERRUPT
;
2309 return intel_init_ring_buffer(engine
);
2312 int intel_init_vebox_ring_buffer(struct intel_engine_cs
*engine
)
2314 struct drm_i915_private
*dev_priv
= engine
->i915
;
2316 intel_ring_default_vfuncs(dev_priv
, engine
);
2318 engine
->emit_flush
= gen6_ring_flush
;
2319 engine
->irq_enable_mask
= PM_VEBOX_USER_INTERRUPT
;
2320 engine
->irq_enable
= hsw_vebox_irq_enable
;
2321 engine
->irq_disable
= hsw_vebox_irq_disable
;
2323 return intel_init_ring_buffer(engine
);