]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/i915/intel_lrc.c
drm/i915: Rename request->ringbuf to request->ring
[thirdparty/linux.git] / drivers / gpu / drm / i915 / intel_lrc.c
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
28 *
29 */
30
73e4d07f
OM
31/**
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
33 *
34 * Motivation:
b20385f1
OM
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
38 *
73e4d07f
OM
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
42 *
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
47 *
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
55 *
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
58 *
59 * LRC implementation:
60 * Regarding the creation of contexts, we have:
61 *
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
65 *
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
69 *
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
72 *
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
77 * contexts:
78 *
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
83 * so on.
84 *
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
88 *
89 * Execlists implementation:
b20385f1
OM
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
73e4d07f
OM
92 * This method works as follows:
93 *
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
101 * context itself.
102 *
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
108 *
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
114 *
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
122 *
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
132 *
b20385f1 133 */
27af5eea 134#include <linux/interrupt.h>
b20385f1
OM
135
136#include <drm/drmP.h>
137#include <drm/i915_drm.h>
138#include "i915_drv.h"
3bbaba0c 139#include "intel_mocs.h"
127f1003 140
468c6816 141#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
8c857917
OM
142#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
143#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
144
e981e7b1
TD
145#define RING_EXECLIST_QFULL (1 << 0x2)
146#define RING_EXECLIST1_VALID (1 << 0x3)
147#define RING_EXECLIST0_VALID (1 << 0x4)
148#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
149#define RING_EXECLIST1_ACTIVE (1 << 0x11)
150#define RING_EXECLIST0_ACTIVE (1 << 0x12)
151
152#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
153#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
154#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
155#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
156#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
157#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
8670d6f9
OM
158
159#define CTX_LRI_HEADER_0 0x01
160#define CTX_CONTEXT_CONTROL 0x02
161#define CTX_RING_HEAD 0x04
162#define CTX_RING_TAIL 0x06
163#define CTX_RING_BUFFER_START 0x08
164#define CTX_RING_BUFFER_CONTROL 0x0a
165#define CTX_BB_HEAD_U 0x0c
166#define CTX_BB_HEAD_L 0x0e
167#define CTX_BB_STATE 0x10
168#define CTX_SECOND_BB_HEAD_U 0x12
169#define CTX_SECOND_BB_HEAD_L 0x14
170#define CTX_SECOND_BB_STATE 0x16
171#define CTX_BB_PER_CTX_PTR 0x18
172#define CTX_RCS_INDIRECT_CTX 0x1a
173#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c
174#define CTX_LRI_HEADER_1 0x21
175#define CTX_CTX_TIMESTAMP 0x22
176#define CTX_PDP3_UDW 0x24
177#define CTX_PDP3_LDW 0x26
178#define CTX_PDP2_UDW 0x28
179#define CTX_PDP2_LDW 0x2a
180#define CTX_PDP1_UDW 0x2c
181#define CTX_PDP1_LDW 0x2e
182#define CTX_PDP0_UDW 0x30
183#define CTX_PDP0_LDW 0x32
184#define CTX_LRI_HEADER_2 0x41
185#define CTX_R_PWR_CLK_STATE 0x42
186#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
187
84b790f8
BW
188#define GEN8_CTX_VALID (1<<0)
189#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
190#define GEN8_CTX_FORCE_RESTORE (1<<2)
191#define GEN8_CTX_L3LLC_COHERENT (1<<5)
192#define GEN8_CTX_PRIVILEGE (1<<8)
e5815a2e 193
0d925ea0 194#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
f0f59a00 195 (reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
0d925ea0
VS
196 (reg_state)[(pos)+1] = (val); \
197} while (0)
198
199#define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \
d852c7bf 200 const u64 _addr = i915_page_dir_dma_addr((ppgtt), (n)); \
e5815a2e
MT
201 reg_state[CTX_PDP ## n ## _UDW+1] = upper_32_bits(_addr); \
202 reg_state[CTX_PDP ## n ## _LDW+1] = lower_32_bits(_addr); \
9244a817 203} while (0)
e5815a2e 204
9244a817 205#define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \
2dba3239
MT
206 reg_state[CTX_PDP0_UDW + 1] = upper_32_bits(px_dma(&ppgtt->pml4)); \
207 reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
9244a817 208} while (0)
2dba3239 209
84b790f8
BW
210enum {
211 FAULT_AND_HANG = 0,
212 FAULT_AND_HALT, /* Debug only */
213 FAULT_AND_STREAM,
214 FAULT_AND_CONTINUE /* Unsupported */
215};
216#define GEN8_CTX_ID_SHIFT 32
7069b144 217#define GEN8_CTX_ID_WIDTH 21
71562919
MT
218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
84b790f8 220
0e93cdd4
CW
221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
e2efd130 224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
978f1e09 225 struct intel_engine_cs *engine);
e2efd130 226static int intel_lr_context_pin(struct i915_gem_context *ctx,
e5292823 227 struct intel_engine_cs *engine);
7ba717cf 228
73e4d07f
OM
229/**
230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
14bb2c11 231 * @dev_priv: i915 device private
73e4d07f
OM
232 * @enable_execlists: value of i915.enable_execlists module parameter.
233 *
234 * Only certain platforms support Execlists (the prerequisites being
27401d12 235 * support for Logical Ring Contexts and Aliasing PPGTT or better).
73e4d07f
OM
236 *
237 * Return: 1 if Execlists is supported and has to be enabled.
238 */
c033666a 239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
127f1003 240{
a0bd6c31
ZL
241 /* On platforms with execlist available, vGPU will only
242 * support execlist mode, no ring buffer mode.
243 */
c033666a 244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
a0bd6c31
ZL
245 return 1;
246
c033666a 247 if (INTEL_GEN(dev_priv) >= 9)
70ee45e1
DL
248 return 1;
249
127f1003
OM
250 if (enable_execlists == 0)
251 return 0;
252
5a21b665
DV
253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
255 i915.use_mmio_flip >= 0)
127f1003
OM
256 return 1;
257
258 return 0;
259}
ede7d42b 260
ca82580c 261static void
0bc40be8 262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
ca82580c 263{
c033666a 264 struct drm_i915_private *dev_priv = engine->i915;
ca82580c 265
c033666a 266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
0bc40be8 267 engine->idle_lite_restore_wa = ~0;
c6a2ac71 268
c033666a
CW
269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
0bc40be8 271 (engine->id == VCS || engine->id == VCS2);
ca82580c 272
0bc40be8 273 engine->ctx_desc_template = GEN8_CTX_VALID;
c033666a 274 if (IS_GEN8(dev_priv))
0bc40be8
TU
275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
ca82580c
TU
277
278 /* TODO: WaDisableLiteRestore when we start using semaphore
279 * signalling between Command Streamers */
280 /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
281
282 /* WaEnableForceRestoreInCtxtDescForVCS:skl */
283 /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
0bc40be8
TU
284 if (engine->disable_lite_restore_wa)
285 engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
ca82580c
TU
286}
287
73e4d07f 288/**
ca82580c
TU
289 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
290 * descriptor for a pinned context
ca82580c 291 * @ctx: Context to work on
9021ad03 292 * @engine: Engine the descriptor will be used with
73e4d07f 293 *
ca82580c
TU
294 * The context descriptor encodes various attributes of a context,
295 * including its GTT address and some flags. Because it's fairly
296 * expensive to calculate, we'll just do it once and cache the result,
297 * which remains valid until the context is unpinned.
298 *
6e5248b5
DV
299 * This is what a descriptor looks like, from LSB to MSB::
300 *
301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
303 * bits 32-52: ctx ID, a globally unique tag
304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
73e4d07f 306 */
ca82580c 307static void
e2efd130 308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
0bc40be8 309 struct intel_engine_cs *engine)
84b790f8 310{
9021ad03 311 struct intel_context *ce = &ctx->engine[engine->id];
7069b144 312 u64 desc;
84b790f8 313
7069b144 314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
84b790f8 315
c01fc532
ZW
316 desc = ctx->desc_template; /* bits 3-4 */
317 desc |= engine->ctx_desc_template; /* bits 0-11 */
9021ad03
CW
318 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319 /* bits 12-31 */
7069b144 320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
5af05fef 321
9021ad03 322 ce->lrc_desc = desc;
5af05fef
MT
323}
324
e2efd130 325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
0bc40be8 326 struct intel_engine_cs *engine)
84b790f8 327{
0bc40be8 328 return ctx->engine[engine->id].lrc_desc;
ca82580c 329}
203a571b 330
cc3c4253
MK
331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
332 struct drm_i915_gem_request *rq1)
84b790f8 333{
cc3c4253 334
4a570db5 335 struct intel_engine_cs *engine = rq0->engine;
c033666a 336 struct drm_i915_private *dev_priv = rq0->i915;
1cff8cc3 337 uint64_t desc[2];
84b790f8 338
1cff8cc3 339 if (rq1) {
4a570db5 340 desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
1cff8cc3
MK
341 rq1->elsp_submitted++;
342 } else {
343 desc[1] = 0;
344 }
84b790f8 345
4a570db5 346 desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
1cff8cc3 347 rq0->elsp_submitted++;
84b790f8 348
1cff8cc3 349 /* You must always write both descriptors in the order below. */
e2f80391
TU
350 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
351 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
6daccb0b 352
e2f80391 353 I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
84b790f8 354 /* The context is automatically loaded after the following */
e2f80391 355 I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
84b790f8 356
1cff8cc3 357 /* ELSP is a wo register, use another nearby reg for posting */
e2f80391 358 POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
84b790f8
BW
359}
360
c6a2ac71
TU
361static void
362execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
363{
364 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
365 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
366 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
367 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
368}
369
370static void execlists_update_context(struct drm_i915_gem_request *rq)
ae1250b9 371{
4a570db5 372 struct intel_engine_cs *engine = rq->engine;
05d9824b 373 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
e2f80391 374 uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
ae1250b9 375
05d9824b 376 reg_state[CTX_RING_TAIL+1] = rq->tail;
ae1250b9 377
c6a2ac71
TU
378 /* True 32b PPGTT with dynamic page allocation: update PDP
379 * registers and point the unallocated PDPs to scratch page.
380 * PML4 is allocated during ppgtt init, so this is not needed
381 * in 48-bit mode.
382 */
383 if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
384 execlists_update_context_pdps(ppgtt, reg_state);
ae1250b9
OM
385}
386
d8cb8875
MK
387static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
388 struct drm_i915_gem_request *rq1)
84b790f8 389{
26720ab9 390 struct drm_i915_private *dev_priv = rq0->i915;
3756685a 391 unsigned int fw_domains = rq0->engine->fw_domains;
26720ab9 392
05d9824b 393 execlists_update_context(rq0);
d8cb8875 394
cc3c4253 395 if (rq1)
05d9824b 396 execlists_update_context(rq1);
84b790f8 397
27af5eea 398 spin_lock_irq(&dev_priv->uncore.lock);
3756685a 399 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
26720ab9 400
cc3c4253 401 execlists_elsp_write(rq0, rq1);
26720ab9 402
3756685a 403 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
27af5eea 404 spin_unlock_irq(&dev_priv->uncore.lock);
84b790f8
BW
405}
406
3c7ba635
ZW
407static inline void execlists_context_status_change(
408 struct drm_i915_gem_request *rq,
409 unsigned long status)
410{
411 /*
412 * Only used when GVT-g is enabled now. When GVT-g is disabled,
413 * The compiler should eliminate this function as dead-code.
414 */
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return;
417
418 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419}
420
26720ab9 421static void execlists_context_unqueue(struct intel_engine_cs *engine)
acdd884a 422{
6d3d8274 423 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
c6a2ac71 424 struct drm_i915_gem_request *cursor, *tmp;
e981e7b1 425
0bc40be8 426 assert_spin_locked(&engine->execlist_lock);
acdd884a 427
779949f4
PA
428 /*
429 * If irqs are not active generate a warning as batches that finish
430 * without the irqs may get lost and a GPU Hang may occur.
431 */
c033666a 432 WARN_ON(!intel_irqs_enabled(engine->i915));
779949f4 433
acdd884a 434 /* Try to read in pairs */
0bc40be8 435 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
acdd884a
MT
436 execlist_link) {
437 if (!req0) {
438 req0 = cursor;
6d3d8274 439 } else if (req0->ctx == cursor->ctx) {
acdd884a
MT
440 /* Same ctx: ignore first request, as second request
441 * will update tail past first request's workload */
e1fee72c 442 cursor->elsp_submitted = req0->elsp_submitted;
e39d42fa 443 list_del(&req0->execlist_link);
e8a261ea 444 i915_gem_request_put(req0);
acdd884a
MT
445 req0 = cursor;
446 } else {
80a9a8db
ZW
447 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448 /*
449 * req0 (after merged) ctx requires single
450 * submission, stop picking
451 */
452 if (req0->ctx->execlists_force_single_submission)
453 break;
454 /*
455 * req0 ctx doesn't require single submission,
456 * but next req ctx requires, stop picking
457 */
458 if (cursor->ctx->execlists_force_single_submission)
459 break;
460 }
acdd884a 461 req1 = cursor;
c6a2ac71 462 WARN_ON(req1->elsp_submitted);
acdd884a
MT
463 break;
464 }
465 }
466
c6a2ac71
TU
467 if (unlikely(!req0))
468 return;
469
3c7ba635
ZW
470 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472 if (req1)
473 execlists_context_status_change(req1,
474 INTEL_CONTEXT_SCHEDULE_IN);
475
0bc40be8 476 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
53292cdb 477 /*
c6a2ac71
TU
478 * WaIdleLiteRestore: make sure we never cause a lite restore
479 * with HEAD==TAIL.
480 *
481 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
482 * resubmit the request. See gen8_emit_request() for where we
483 * prepare the padding after the end of the request.
53292cdb 484 */
c6a2ac71 485 struct intel_ringbuffer *ringbuf;
53292cdb 486
0bc40be8 487 ringbuf = req0->ctx->engine[engine->id].ringbuf;
c6a2ac71
TU
488 req0->tail += 8;
489 req0->tail &= ringbuf->size - 1;
53292cdb
MT
490 }
491
d8cb8875 492 execlists_submit_requests(req0, req1);
acdd884a
MT
493}
494
c6a2ac71 495static unsigned int
e39d42fa 496execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
e981e7b1 497{
6d3d8274 498 struct drm_i915_gem_request *head_req;
e981e7b1 499
0bc40be8 500 assert_spin_locked(&engine->execlist_lock);
e981e7b1 501
0bc40be8 502 head_req = list_first_entry_or_null(&engine->execlist_queue,
6d3d8274 503 struct drm_i915_gem_request,
e981e7b1
TD
504 execlist_link);
505
e39d42fa
TU
506 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
507 return 0;
c6a2ac71
TU
508
509 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
510
511 if (--head_req->elsp_submitted > 0)
512 return 0;
513
3c7ba635
ZW
514 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
515
e39d42fa 516 list_del(&head_req->execlist_link);
e8a261ea 517 i915_gem_request_put(head_req);
e981e7b1 518
c6a2ac71 519 return 1;
e981e7b1
TD
520}
521
c6a2ac71 522static u32
0bc40be8 523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
c6a2ac71 524 u32 *context_id)
91a41032 525{
c033666a 526 struct drm_i915_private *dev_priv = engine->i915;
c6a2ac71 527 u32 status;
91a41032 528
c6a2ac71
TU
529 read_pointer %= GEN8_CSB_ENTRIES;
530
0bc40be8 531 status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
c6a2ac71
TU
532
533 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
534 return 0;
91a41032 535
0bc40be8 536 *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
c6a2ac71
TU
537 read_pointer));
538
539 return status;
91a41032
BW
540}
541
6e5248b5 542/*
73e4d07f
OM
543 * Check the unread Context Status Buffers and manage the submission of new
544 * contexts to the ELSP accordingly.
545 */
27af5eea 546static void intel_lrc_irq_handler(unsigned long data)
e981e7b1 547{
27af5eea 548 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
c033666a 549 struct drm_i915_private *dev_priv = engine->i915;
e981e7b1 550 u32 status_pointer;
c6a2ac71 551 unsigned int read_pointer, write_pointer;
26720ab9
TU
552 u32 csb[GEN8_CSB_ENTRIES][2];
553 unsigned int csb_read = 0, i;
c6a2ac71
TU
554 unsigned int submit_contexts = 0;
555
3756685a 556 intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
c6a2ac71 557
0bc40be8 558 status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
e981e7b1 559
0bc40be8 560 read_pointer = engine->next_context_status_buffer;
5590a5f0 561 write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
e981e7b1 562 if (read_pointer > write_pointer)
dfc53c5e 563 write_pointer += GEN8_CSB_ENTRIES;
e981e7b1 564
e981e7b1 565 while (read_pointer < write_pointer) {
26720ab9
TU
566 if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
567 break;
568 csb[csb_read][0] = get_context_status(engine, ++read_pointer,
569 &csb[csb_read][1]);
570 csb_read++;
571 }
91a41032 572
26720ab9
TU
573 engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
574
575 /* Update the read pointer to the old write pointer. Manual ringbuffer
576 * management ftw </sarcasm> */
577 I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
578 _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
579 engine->next_context_status_buffer << 8));
580
3756685a 581 intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
26720ab9
TU
582
583 spin_lock(&engine->execlist_lock);
584
585 for (i = 0; i < csb_read; i++) {
586 if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
587 if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
588 if (execlists_check_remove_request(engine, csb[i][1]))
e1fee72c
OM
589 WARN(1, "Lite Restored request removed from queue\n");
590 } else
591 WARN(1, "Preemption without Lite Restore\n");
592 }
593
26720ab9 594 if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
c6a2ac71
TU
595 GEN8_CTX_STATUS_ELEMENT_SWITCH))
596 submit_contexts +=
26720ab9 597 execlists_check_remove_request(engine, csb[i][1]);
e981e7b1
TD
598 }
599
c6a2ac71 600 if (submit_contexts) {
0bc40be8 601 if (!engine->disable_lite_restore_wa ||
26720ab9
TU
602 (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
603 execlists_context_unqueue(engine);
5af05fef 604 }
e981e7b1 605
0bc40be8 606 spin_unlock(&engine->execlist_lock);
c6a2ac71
TU
607
608 if (unlikely(submit_contexts > 2))
609 DRM_ERROR("More than two context complete events?\n");
e981e7b1
TD
610}
611
c6a2ac71 612static void execlists_context_queue(struct drm_i915_gem_request *request)
acdd884a 613{
4a570db5 614 struct intel_engine_cs *engine = request->engine;
6d3d8274 615 struct drm_i915_gem_request *cursor;
f1ad5a1f 616 int num_elements = 0;
acdd884a 617
27af5eea 618 spin_lock_bh(&engine->execlist_lock);
acdd884a 619
e2f80391 620 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
f1ad5a1f
OM
621 if (++num_elements > 2)
622 break;
623
624 if (num_elements > 2) {
6d3d8274 625 struct drm_i915_gem_request *tail_req;
f1ad5a1f 626
e2f80391 627 tail_req = list_last_entry(&engine->execlist_queue,
6d3d8274 628 struct drm_i915_gem_request,
f1ad5a1f
OM
629 execlist_link);
630
ae70797d 631 if (request->ctx == tail_req->ctx) {
f1ad5a1f 632 WARN(tail_req->elsp_submitted != 0,
7ba717cf 633 "More than 2 already-submitted reqs queued\n");
e39d42fa 634 list_del(&tail_req->execlist_link);
e8a261ea 635 i915_gem_request_put(tail_req);
f1ad5a1f
OM
636 }
637 }
638
e8a261ea 639 i915_gem_request_get(request);
e2f80391 640 list_add_tail(&request->execlist_link, &engine->execlist_queue);
a3d12761 641 request->ctx_hw_id = request->ctx->hw_id;
f1ad5a1f 642 if (num_elements == 0)
e2f80391 643 execlists_context_unqueue(engine);
acdd884a 644
27af5eea 645 spin_unlock_bh(&engine->execlist_lock);
acdd884a
MT
646}
647
2f20055d 648static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
ba8b7ccb 649{
4a570db5 650 struct intel_engine_cs *engine = req->engine;
ba8b7ccb
OM
651 uint32_t flush_domains;
652 int ret;
653
654 flush_domains = 0;
e2f80391 655 if (engine->gpu_caches_dirty)
ba8b7ccb
OM
656 flush_domains = I915_GEM_GPU_DOMAINS;
657
e2f80391 658 ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
ba8b7ccb
OM
659 if (ret)
660 return ret;
661
e2f80391 662 engine->gpu_caches_dirty = false;
ba8b7ccb
OM
663 return 0;
664}
665
535fbe82 666static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
ba8b7ccb
OM
667 struct list_head *vmas)
668{
666796da 669 const unsigned other_rings = ~intel_engine_flag(req->engine);
ba8b7ccb
OM
670 struct i915_vma *vma;
671 uint32_t flush_domains = 0;
672 bool flush_chipset = false;
673 int ret;
674
675 list_for_each_entry(vma, vmas, exec_list) {
676 struct drm_i915_gem_object *obj = vma->obj;
677
03ade511 678 if (obj->active & other_rings) {
4a570db5 679 ret = i915_gem_object_sync(obj, req->engine, &req);
03ade511
CW
680 if (ret)
681 return ret;
682 }
ba8b7ccb
OM
683
684 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
685 flush_chipset |= i915_gem_clflush_object(obj, false);
686
687 flush_domains |= obj->base.write_domain;
688 }
689
690 if (flush_domains & I915_GEM_DOMAIN_GTT)
691 wmb();
692
693 /* Unconditionally invalidate gpu caches and ensure that we do flush
694 * any residual writes from the previous batch.
695 */
2f20055d 696 return logical_ring_invalidate_all_caches(req);
ba8b7ccb
OM
697}
698
40e895ce 699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
bc0dce3f 700{
24f1d3cc 701 struct intel_engine_cs *engine = request->engine;
9021ad03 702 struct intel_context *ce = &request->ctx->engine[engine->id];
bfa01200 703 int ret;
bc0dce3f 704
6310346e
CW
705 /* Flush enough space to reduce the likelihood of waiting after
706 * we start building the request - in which case we will just
707 * have to repeat work.
708 */
0e93cdd4 709 request->reserved_space += EXECLISTS_REQUEST_SIZE;
6310346e 710
9021ad03 711 if (!ce->state) {
978f1e09
CW
712 ret = execlists_context_deferred_alloc(request->ctx, engine);
713 if (ret)
714 return ret;
715 }
716
1dae2dfb 717 request->ring = ce->ringbuf;
f3cc01f0 718
a7e02199
AD
719 if (i915.enable_guc_submission) {
720 /*
721 * Check that the GuC has space for the request before
722 * going any further, as the i915_add_request() call
723 * later on mustn't fail ...
724 */
7c2c270d 725 ret = i915_guc_wq_check_space(request);
a7e02199
AD
726 if (ret)
727 return ret;
728 }
729
24f1d3cc
CW
730 ret = intel_lr_context_pin(request->ctx, engine);
731 if (ret)
732 return ret;
e28e404c 733
bfa01200
CW
734 ret = intel_ring_begin(request, 0);
735 if (ret)
736 goto err_unpin;
737
9021ad03 738 if (!ce->initialised) {
24f1d3cc
CW
739 ret = engine->init_context(request);
740 if (ret)
741 goto err_unpin;
742
9021ad03 743 ce->initialised = true;
24f1d3cc
CW
744 }
745
746 /* Note that after this point, we have committed to using
747 * this request as it is being used to both track the
748 * state of engine initialisation and liveness of the
749 * golden renderstate above. Think twice before you try
750 * to cancel/unwind this request now.
751 */
752
0e93cdd4 753 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
bfa01200
CW
754 return 0;
755
756err_unpin:
24f1d3cc 757 intel_lr_context_unpin(request->ctx, engine);
e28e404c 758 return ret;
bc0dce3f
JH
759}
760
bc0dce3f
JH
761/*
762 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
ae70797d 763 * @request: Request to advance the logical ringbuffer of.
bc0dce3f
JH
764 *
765 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
766 * really happens during submission is that the context and current tail will be placed
767 * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
768 * point, the tail *inside* the context is updated and the ELSP written to.
769 */
7c17d377 770static int
ae70797d 771intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
bc0dce3f 772{
1dae2dfb 773 struct intel_ringbuffer *ring = request->ring;
4a570db5 774 struct intel_engine_cs *engine = request->engine;
bc0dce3f 775
1dae2dfb
CW
776 intel_ring_advance(ring);
777 request->tail = ring->tail;
bc0dce3f 778
7c17d377
CW
779 /*
780 * Here we add two extra NOOPs as padding to avoid
781 * lite restore of a context with HEAD==TAIL.
782 *
783 * Caller must reserve WA_TAIL_DWORDS for us!
784 */
1dae2dfb
CW
785 intel_ring_emit(ring, MI_NOOP);
786 intel_ring_emit(ring, MI_NOOP);
787 intel_ring_advance(ring);
d1675198 788
a16a4052
CW
789 /* We keep the previous context alive until we retire the following
790 * request. This ensures that any the context object is still pinned
791 * for any residual writes the HW makes into it on the context switch
792 * into the next object following the breadcrumb. Otherwise, we may
793 * retire the context too early.
794 */
795 request->previous_context = engine->last_context;
796 engine->last_context = request->ctx;
f4e2dece 797
7c2c270d
DG
798 if (i915.enable_guc_submission)
799 i915_guc_submit(request);
d1675198
AD
800 else
801 execlists_context_queue(request);
7c17d377
CW
802
803 return 0;
bc0dce3f
JH
804}
805
73e4d07f 806/**
6e5248b5 807 * intel_execlists_submission() - submit a batchbuffer for execution, Execlists style
14bb2c11 808 * @params: execbuffer call parameters.
73e4d07f
OM
809 * @args: execbuffer call arguments.
810 * @vmas: list of vmas.
73e4d07f
OM
811 *
812 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
813 * away the submission details of the execbuffer ioctl call.
814 *
815 * Return: non-zero if the submission fails.
816 */
5f19e2bf 817int intel_execlists_submission(struct i915_execbuffer_params *params,
454afebd 818 struct drm_i915_gem_execbuffer2 *args,
5f19e2bf 819 struct list_head *vmas)
454afebd 820{
5f19e2bf 821 struct drm_device *dev = params->dev;
4a570db5 822 struct intel_engine_cs *engine = params->engine;
fac5e23e 823 struct drm_i915_private *dev_priv = to_i915(dev);
1dae2dfb 824 struct intel_ringbuffer *ring = params->request->ring;
5f19e2bf 825 u64 exec_start;
ba8b7ccb
OM
826 int instp_mode;
827 u32 instp_mask;
828 int ret;
829
830 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
831 instp_mask = I915_EXEC_CONSTANTS_MASK;
832 switch (instp_mode) {
833 case I915_EXEC_CONSTANTS_REL_GENERAL:
834 case I915_EXEC_CONSTANTS_ABSOLUTE:
835 case I915_EXEC_CONSTANTS_REL_SURFACE:
1dae2dfb 836 if (instp_mode != 0 && engine->id != RCS) {
ba8b7ccb
OM
837 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
838 return -EINVAL;
839 }
840
841 if (instp_mode != dev_priv->relative_constants_mode) {
842 if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
843 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
844 return -EINVAL;
845 }
846
847 /* The HW changed the meaning on this bit on gen6 */
848 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
849 }
850 break;
851 default:
852 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
853 return -EINVAL;
854 }
855
ba8b7ccb
OM
856 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
857 DRM_DEBUG("sol reset is gen7 only\n");
858 return -EINVAL;
859 }
860
535fbe82 861 ret = execlists_move_to_gpu(params->request, vmas);
ba8b7ccb
OM
862 if (ret)
863 return ret;
864
1dae2dfb 865 if (engine->id == RCS &&
ba8b7ccb 866 instp_mode != dev_priv->relative_constants_mode) {
987046ad 867 ret = intel_ring_begin(params->request, 4);
ba8b7ccb
OM
868 if (ret)
869 return ret;
870
1dae2dfb
CW
871 intel_ring_emit(ring, MI_NOOP);
872 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
873 intel_ring_emit_reg(ring, INSTPM);
874 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
875 intel_ring_advance(ring);
ba8b7ccb
OM
876
877 dev_priv->relative_constants_mode = instp_mode;
878 }
879
5f19e2bf
JH
880 exec_start = params->batch_obj_vm_offset +
881 args->batch_start_offset;
882
e2f80391 883 ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
ba8b7ccb
OM
884 if (ret)
885 return ret;
886
95c24161 887 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
5e4be7bd 888
8a8edb59 889 i915_gem_execbuffer_move_to_active(vmas, params->request);
ba8b7ccb 890
454afebd
OM
891 return 0;
892}
893
e39d42fa 894void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
c86ee3a9 895{
6d3d8274 896 struct drm_i915_gem_request *req, *tmp;
e39d42fa 897 LIST_HEAD(cancel_list);
c86ee3a9 898
91c8a326 899 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
c86ee3a9 900
27af5eea 901 spin_lock_bh(&engine->execlist_lock);
e39d42fa 902 list_replace_init(&engine->execlist_queue, &cancel_list);
27af5eea 903 spin_unlock_bh(&engine->execlist_lock);
c86ee3a9 904
e39d42fa 905 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
c86ee3a9 906 list_del(&req->execlist_link);
e8a261ea 907 i915_gem_request_put(req);
c86ee3a9
TD
908 }
909}
910
0bc40be8 911void intel_logical_ring_stop(struct intel_engine_cs *engine)
454afebd 912{
c033666a 913 struct drm_i915_private *dev_priv = engine->i915;
9832b9da
OM
914 int ret;
915
117897f4 916 if (!intel_engine_initialized(engine))
9832b9da
OM
917 return;
918
666796da 919 ret = intel_engine_idle(engine);
f4457ae7 920 if (ret)
9832b9da 921 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
0bc40be8 922 engine->name, ret);
9832b9da
OM
923
924 /* TODO: Is this correct with Execlists enabled? */
0bc40be8 925 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
3e7941a1
CW
926 if (intel_wait_for_register(dev_priv,
927 RING_MI_MODE(engine->mmio_base),
928 MODE_IDLE, MODE_IDLE,
929 1000)) {
0bc40be8 930 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
9832b9da
OM
931 return;
932 }
0bc40be8 933 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
454afebd
OM
934}
935
4866d729 936int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
48e29f55 937{
4a570db5 938 struct intel_engine_cs *engine = req->engine;
48e29f55
OM
939 int ret;
940
e2f80391 941 if (!engine->gpu_caches_dirty)
48e29f55
OM
942 return 0;
943
e2f80391 944 ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
48e29f55
OM
945 if (ret)
946 return ret;
947
e2f80391 948 engine->gpu_caches_dirty = false;
48e29f55
OM
949 return 0;
950}
951
e2efd130 952static int intel_lr_context_pin(struct i915_gem_context *ctx,
24f1d3cc 953 struct intel_engine_cs *engine)
dcb4c12a 954{
24f1d3cc 955 struct drm_i915_private *dev_priv = ctx->i915;
9021ad03 956 struct intel_context *ce = &ctx->engine[engine->id];
7d774cac
TU
957 void *vaddr;
958 u32 *lrc_reg_state;
ca82580c 959 int ret;
dcb4c12a 960
91c8a326 961 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
ca82580c 962
9021ad03 963 if (ce->pin_count++)
24f1d3cc
CW
964 return 0;
965
9021ad03
CW
966 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
967 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
e84fe803 968 if (ret)
24f1d3cc 969 goto err;
7ba717cf 970
9021ad03 971 vaddr = i915_gem_object_pin_map(ce->state);
7d774cac
TU
972 if (IS_ERR(vaddr)) {
973 ret = PTR_ERR(vaddr);
82352e90
TU
974 goto unpin_ctx_obj;
975 }
976
7d774cac
TU
977 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
978
9021ad03 979 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
e84fe803 980 if (ret)
7d774cac 981 goto unpin_map;
d1675198 982
9021ad03 983 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
0bc40be8 984 intel_lr_context_descriptor_update(ctx, engine);
9021ad03
CW
985
986 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
987 ce->lrc_reg_state = lrc_reg_state;
988 ce->state->dirty = true;
e93c28f3 989
e84fe803
NH
990 /* Invalidate GuC TLB. */
991 if (i915.enable_guc_submission)
992 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
dcb4c12a 993
9a6feaf0 994 i915_gem_context_get(ctx);
24f1d3cc 995 return 0;
7ba717cf 996
7d774cac 997unpin_map:
9021ad03 998 i915_gem_object_unpin_map(ce->state);
7ba717cf 999unpin_ctx_obj:
9021ad03 1000 i915_gem_object_ggtt_unpin(ce->state);
24f1d3cc 1001err:
9021ad03 1002 ce->pin_count = 0;
e84fe803
NH
1003 return ret;
1004}
1005
e2efd130 1006void intel_lr_context_unpin(struct i915_gem_context *ctx,
24f1d3cc 1007 struct intel_engine_cs *engine)
e84fe803 1008{
9021ad03 1009 struct intel_context *ce = &ctx->engine[engine->id];
e84fe803 1010
91c8a326 1011 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
9021ad03 1012 GEM_BUG_ON(ce->pin_count == 0);
321fe304 1013
9021ad03 1014 if (--ce->pin_count)
24f1d3cc 1015 return;
e84fe803 1016
9021ad03 1017 intel_unpin_ringbuffer_obj(ce->ringbuf);
dcb4c12a 1018
9021ad03
CW
1019 i915_gem_object_unpin_map(ce->state);
1020 i915_gem_object_ggtt_unpin(ce->state);
af3302b9 1021
9021ad03
CW
1022 ce->lrc_vma = NULL;
1023 ce->lrc_desc = 0;
1024 ce->lrc_reg_state = NULL;
321fe304 1025
9a6feaf0 1026 i915_gem_context_put(ctx);
dcb4c12a
OM
1027}
1028
e2be4faf 1029static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
771b9a53
MT
1030{
1031 int ret, i;
4a570db5 1032 struct intel_engine_cs *engine = req->engine;
1dae2dfb 1033 struct intel_ringbuffer *ring = req->ring;
c033666a 1034 struct i915_workarounds *w = &req->i915->workarounds;
771b9a53 1035
cd7feaaa 1036 if (w->count == 0)
771b9a53
MT
1037 return 0;
1038
e2f80391 1039 engine->gpu_caches_dirty = true;
4866d729 1040 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1041 if (ret)
1042 return ret;
1043
987046ad 1044 ret = intel_ring_begin(req, w->count * 2 + 2);
771b9a53
MT
1045 if (ret)
1046 return ret;
1047
1dae2dfb 1048 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
771b9a53 1049 for (i = 0; i < w->count; i++) {
1dae2dfb
CW
1050 intel_ring_emit_reg(ring, w->reg[i].addr);
1051 intel_ring_emit(ring, w->reg[i].value);
771b9a53 1052 }
1dae2dfb 1053 intel_ring_emit(ring, MI_NOOP);
771b9a53 1054
1dae2dfb 1055 intel_ring_advance(ring);
771b9a53 1056
e2f80391 1057 engine->gpu_caches_dirty = true;
4866d729 1058 ret = logical_ring_flush_all_caches(req);
771b9a53
MT
1059 if (ret)
1060 return ret;
1061
1062 return 0;
1063}
1064
83b8a982 1065#define wa_ctx_emit(batch, index, cmd) \
17ee950d 1066 do { \
83b8a982
AS
1067 int __index = (index)++; \
1068 if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
17ee950d
AS
1069 return -ENOSPC; \
1070 } \
83b8a982 1071 batch[__index] = (cmd); \
17ee950d
AS
1072 } while (0)
1073
8f40db77 1074#define wa_ctx_emit_reg(batch, index, reg) \
f0f59a00 1075 wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
9e000847
AS
1076
1077/*
1078 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1079 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1080 * but there is a slight complication as this is applied in WA batch where the
1081 * values are only initialized once so we cannot take register value at the
1082 * beginning and reuse it further; hence we save its value to memory, upload a
1083 * constant value with bit21 set and then we restore it back with the saved value.
1084 * To simplify the WA, a constant value is formed by using the default value
1085 * of this register. This shouldn't be a problem because we are only modifying
1086 * it for a short period and this batch in non-premptible. We can ofcourse
1087 * use additional instructions that read the actual value of the register
1088 * at that time and set our bit of interest but it makes the WA complicated.
1089 *
1090 * This WA is also required for Gen9 so extracting as a function avoids
1091 * code duplication.
1092 */
0bc40be8 1093static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
6e5248b5 1094 uint32_t *batch,
9e000847
AS
1095 uint32_t index)
1096{
1097 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1098
a4106a78 1099 /*
fe905819 1100 * WaDisableLSQCROPERFforOCL:skl,kbl
a4106a78
AS
1101 * This WA is implemented in skl_init_clock_gating() but since
1102 * this batch updates GEN8_L3SQCREG4 with default value we need to
1103 * set this bit here to retain the WA during flush.
1104 */
fe905819
MK
1105 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_E0) ||
1106 IS_KBL_REVID(engine->i915, 0, KBL_REVID_E0))
a4106a78
AS
1107 l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
1108
f1afe24f 1109 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
83b8a982 1110 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1111 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
0bc40be8 1112 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
83b8a982
AS
1113 wa_ctx_emit(batch, index, 0);
1114
1115 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1116 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
83b8a982
AS
1117 wa_ctx_emit(batch, index, l3sqc4_flush);
1118
1119 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1120 wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
1121 PIPE_CONTROL_DC_FLUSH_ENABLE));
1122 wa_ctx_emit(batch, index, 0);
1123 wa_ctx_emit(batch, index, 0);
1124 wa_ctx_emit(batch, index, 0);
1125 wa_ctx_emit(batch, index, 0);
1126
f1afe24f 1127 wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
83b8a982 1128 MI_SRM_LRM_GLOBAL_GTT));
8f40db77 1129 wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
0bc40be8 1130 wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
83b8a982 1131 wa_ctx_emit(batch, index, 0);
9e000847
AS
1132
1133 return index;
1134}
1135
17ee950d
AS
1136static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
1137 uint32_t offset,
1138 uint32_t start_alignment)
1139{
1140 return wa_ctx->offset = ALIGN(offset, start_alignment);
1141}
1142
1143static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1144 uint32_t offset,
1145 uint32_t size_alignment)
1146{
1147 wa_ctx->size = offset - wa_ctx->offset;
1148
1149 WARN(wa_ctx->size % size_alignment,
1150 "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
1151 wa_ctx->size, size_alignment);
1152 return 0;
1153}
1154
6e5248b5
DV
1155/*
1156 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1157 * initialized at the beginning and shared across all contexts but this field
1158 * helps us to have multiple batches at different offsets and select them based
1159 * on a criteria. At the moment this batch always start at the beginning of the page
1160 * and at this point we don't have multiple wa_ctx batch buffers.
4d78c8dc 1161 *
6e5248b5
DV
1162 * The number of WA applied are not known at the beginning; we use this field
1163 * to return the no of DWORDS written.
17ee950d 1164 *
6e5248b5
DV
1165 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1166 * so it adds NOOPs as padding to make it cacheline aligned.
1167 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1168 * makes a complete batch buffer.
17ee950d 1169 */
0bc40be8 1170static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
17ee950d 1171 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1172 uint32_t *batch,
17ee950d
AS
1173 uint32_t *offset)
1174{
0160f055 1175 uint32_t scratch_addr;
17ee950d
AS
1176 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1177
7ad00d1a 1178 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1179 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
17ee950d 1180
c82435bb 1181 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
c033666a 1182 if (IS_BROADWELL(engine->i915)) {
0bc40be8 1183 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
604ef734
AH
1184 if (rc < 0)
1185 return rc;
1186 index = rc;
c82435bb
AS
1187 }
1188
0160f055
AS
1189 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1190 /* Actual scratch location is at 128 bytes offset */
0bc40be8 1191 scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
0160f055 1192
83b8a982
AS
1193 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1194 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1195 PIPE_CONTROL_GLOBAL_GTT_IVB |
1196 PIPE_CONTROL_CS_STALL |
1197 PIPE_CONTROL_QW_WRITE));
1198 wa_ctx_emit(batch, index, scratch_addr);
1199 wa_ctx_emit(batch, index, 0);
1200 wa_ctx_emit(batch, index, 0);
1201 wa_ctx_emit(batch, index, 0);
0160f055 1202
17ee950d
AS
1203 /* Pad to end of cacheline */
1204 while (index % CACHELINE_DWORDS)
83b8a982 1205 wa_ctx_emit(batch, index, MI_NOOP);
17ee950d
AS
1206
1207 /*
1208 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1209 * execution depends on the length specified in terms of cache lines
1210 * in the register CTX_RCS_INDIRECT_CTX
1211 */
1212
1213 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1214}
1215
6e5248b5
DV
1216/*
1217 * This batch is started immediately after indirect_ctx batch. Since we ensure
1218 * that indirect_ctx ends on a cacheline this batch is aligned automatically.
17ee950d 1219 *
6e5248b5 1220 * The number of DWORDS written are returned using this field.
17ee950d
AS
1221 *
1222 * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
1223 * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
1224 */
0bc40be8 1225static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
17ee950d 1226 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1227 uint32_t *batch,
17ee950d
AS
1228 uint32_t *offset)
1229{
1230 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1231
7ad00d1a 1232 /* WaDisableCtxRestoreArbitration:bdw,chv */
83b8a982 1233 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
7ad00d1a 1234
83b8a982 1235 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
17ee950d
AS
1236
1237 return wa_ctx_end(wa_ctx, *offset = index, 1);
1238}
1239
0bc40be8 1240static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
0504cffc 1241 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1242 uint32_t *batch,
0504cffc
AS
1243 uint32_t *offset)
1244{
a4106a78 1245 int ret;
0504cffc
AS
1246 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1247
0907c8f7 1248 /* WaDisableCtxRestoreArbitration:skl,bxt */
c033666a
CW
1249 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1250 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
0907c8f7 1251 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
0504cffc 1252
a4106a78 1253 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
0bc40be8 1254 ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
a4106a78
AS
1255 if (ret < 0)
1256 return ret;
1257 index = ret;
1258
873e8171
MK
1259 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
1260 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1261 wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
1262 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
1263 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
1264 wa_ctx_emit(batch, index, MI_NOOP);
1265
066d4628
MK
1266 /* WaClearSlmSpaceAtContextSwitch:kbl */
1267 /* Actual scratch location is at 128 bytes offset */
1268 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1269 uint32_t scratch_addr
1270 = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
1271
1272 wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
1273 wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
1274 PIPE_CONTROL_GLOBAL_GTT_IVB |
1275 PIPE_CONTROL_CS_STALL |
1276 PIPE_CONTROL_QW_WRITE));
1277 wa_ctx_emit(batch, index, scratch_addr);
1278 wa_ctx_emit(batch, index, 0);
1279 wa_ctx_emit(batch, index, 0);
1280 wa_ctx_emit(batch, index, 0);
1281 }
3485d99e
TG
1282
1283 /* WaMediaPoolStateCmdInWABB:bxt */
1284 if (HAS_POOLED_EU(engine->i915)) {
1285 /*
1286 * EU pool configuration is setup along with golden context
1287 * during context initialization. This value depends on
1288 * device type (2x6 or 3x6) and needs to be updated based
1289 * on which subslice is disabled especially for 2x6
1290 * devices, however it is safe to load default
1291 * configuration of 3x6 device instead of masking off
1292 * corresponding bits because HW ignores bits of a disabled
1293 * subslice and drops down to appropriate config. Please
1294 * see render_state_setup() in i915_gem_render_state.c for
1295 * possible configurations, to avoid duplication they are
1296 * not shown here again.
1297 */
1298 u32 eu_pool_config = 0x00777000;
1299 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1300 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1301 wa_ctx_emit(batch, index, eu_pool_config);
1302 wa_ctx_emit(batch, index, 0);
1303 wa_ctx_emit(batch, index, 0);
1304 wa_ctx_emit(batch, index, 0);
1305 }
1306
0504cffc
AS
1307 /* Pad to end of cacheline */
1308 while (index % CACHELINE_DWORDS)
1309 wa_ctx_emit(batch, index, MI_NOOP);
1310
1311 return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
1312}
1313
0bc40be8 1314static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
0504cffc 1315 struct i915_wa_ctx_bb *wa_ctx,
6e5248b5 1316 uint32_t *batch,
0504cffc
AS
1317 uint32_t *offset)
1318{
1319 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1320
9b01435d 1321 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
c033666a
CW
1322 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1323 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
9b01435d 1324 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
8f40db77 1325 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
9b01435d
AS
1326 wa_ctx_emit(batch, index,
1327 _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
1328 wa_ctx_emit(batch, index, MI_NOOP);
1329 }
1330
b1e429fe 1331 /* WaClearTdlStateAckDirtyBits:bxt */
c033666a 1332 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
b1e429fe
TG
1333 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1334
1335 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
1336 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1337
1338 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
1339 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1340
1341 wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
1342 wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
1343
1344 wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
1345 /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
1346 wa_ctx_emit(batch, index, 0x0);
1347 wa_ctx_emit(batch, index, MI_NOOP);
1348 }
1349
0907c8f7 1350 /* WaDisableCtxRestoreArbitration:skl,bxt */
c033666a
CW
1351 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1352 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
0907c8f7
AS
1353 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1354
0504cffc
AS
1355 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
1356
1357 return wa_ctx_end(wa_ctx, *offset = index, 1);
1358}
1359
0bc40be8 1360static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
17ee950d
AS
1361{
1362 int ret;
1363
91c8a326
CW
1364 engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
1365 PAGE_ALIGN(size));
fe3db79b 1366 if (IS_ERR(engine->wa_ctx.obj)) {
17ee950d 1367 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
fe3db79b
CW
1368 ret = PTR_ERR(engine->wa_ctx.obj);
1369 engine->wa_ctx.obj = NULL;
1370 return ret;
17ee950d
AS
1371 }
1372
0bc40be8 1373 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
17ee950d
AS
1374 if (ret) {
1375 DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
1376 ret);
f8c417cd 1377 i915_gem_object_put(engine->wa_ctx.obj);
17ee950d
AS
1378 return ret;
1379 }
1380
1381 return 0;
1382}
1383
0bc40be8 1384static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
17ee950d 1385{
0bc40be8
TU
1386 if (engine->wa_ctx.obj) {
1387 i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
f8c417cd 1388 i915_gem_object_put(engine->wa_ctx.obj);
0bc40be8 1389 engine->wa_ctx.obj = NULL;
17ee950d
AS
1390 }
1391}
1392
0bc40be8 1393static int intel_init_workaround_bb(struct intel_engine_cs *engine)
17ee950d
AS
1394{
1395 int ret;
1396 uint32_t *batch;
1397 uint32_t offset;
1398 struct page *page;
0bc40be8 1399 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
17ee950d 1400
0bc40be8 1401 WARN_ON(engine->id != RCS);
17ee950d 1402
5e60d790 1403 /* update this when WA for higher Gen are added */
c033666a 1404 if (INTEL_GEN(engine->i915) > 9) {
0504cffc 1405 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
c033666a 1406 INTEL_GEN(engine->i915));
5e60d790 1407 return 0;
0504cffc 1408 }
5e60d790 1409
c4db7599 1410 /* some WA perform writes to scratch page, ensure it is valid */
0bc40be8
TU
1411 if (engine->scratch.obj == NULL) {
1412 DRM_ERROR("scratch page not allocated for %s\n", engine->name);
c4db7599
AS
1413 return -EINVAL;
1414 }
1415
0bc40be8 1416 ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
17ee950d
AS
1417 if (ret) {
1418 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1419 return ret;
1420 }
1421
033908ae 1422 page = i915_gem_object_get_dirty_page(wa_ctx->obj, 0);
17ee950d
AS
1423 batch = kmap_atomic(page);
1424 offset = 0;
1425
c033666a 1426 if (IS_GEN8(engine->i915)) {
0bc40be8 1427 ret = gen8_init_indirectctx_bb(engine,
17ee950d
AS
1428 &wa_ctx->indirect_ctx,
1429 batch,
1430 &offset);
1431 if (ret)
1432 goto out;
1433
0bc40be8 1434 ret = gen8_init_perctx_bb(engine,
17ee950d
AS
1435 &wa_ctx->per_ctx,
1436 batch,
1437 &offset);
1438 if (ret)
1439 goto out;
c033666a 1440 } else if (IS_GEN9(engine->i915)) {
0bc40be8 1441 ret = gen9_init_indirectctx_bb(engine,
0504cffc
AS
1442 &wa_ctx->indirect_ctx,
1443 batch,
1444 &offset);
1445 if (ret)
1446 goto out;
1447
0bc40be8 1448 ret = gen9_init_perctx_bb(engine,
0504cffc
AS
1449 &wa_ctx->per_ctx,
1450 batch,
1451 &offset);
1452 if (ret)
1453 goto out;
17ee950d
AS
1454 }
1455
1456out:
1457 kunmap_atomic(batch);
1458 if (ret)
0bc40be8 1459 lrc_destroy_wa_ctx_obj(engine);
17ee950d
AS
1460
1461 return ret;
1462}
1463
04794adb
TU
1464static void lrc_init_hws(struct intel_engine_cs *engine)
1465{
c033666a 1466 struct drm_i915_private *dev_priv = engine->i915;
04794adb
TU
1467
1468 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1469 (u32)engine->status_page.gfx_addr);
1470 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1471}
1472
0bc40be8 1473static int gen8_init_common_ring(struct intel_engine_cs *engine)
9b1136d5 1474{
c033666a 1475 struct drm_i915_private *dev_priv = engine->i915;
c6a2ac71 1476 unsigned int next_context_status_buffer_hw;
9b1136d5 1477
04794adb 1478 lrc_init_hws(engine);
e84fe803 1479
0bc40be8
TU
1480 I915_WRITE_IMR(engine,
1481 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1482 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
73d477f6 1483
0bc40be8 1484 I915_WRITE(RING_MODE_GEN7(engine),
9b1136d5
OM
1485 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1486 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
0bc40be8 1487 POSTING_READ(RING_MODE_GEN7(engine));
dfc53c5e
MT
1488
1489 /*
1490 * Instead of resetting the Context Status Buffer (CSB) read pointer to
1491 * zero, we need to read the write pointer from hardware and use its
1492 * value because "this register is power context save restored".
1493 * Effectively, these states have been observed:
1494 *
1495 * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
1496 * BDW | CSB regs not reset | CSB regs reset |
1497 * CHT | CSB regs not reset | CSB regs not reset |
5590a5f0
BW
1498 * SKL | ? | ? |
1499 * BXT | ? | ? |
dfc53c5e 1500 */
5590a5f0 1501 next_context_status_buffer_hw =
0bc40be8 1502 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
dfc53c5e
MT
1503
1504 /*
1505 * When the CSB registers are reset (also after power-up / gpu reset),
1506 * CSB write pointer is set to all 1's, which is not valid, use '5' in
1507 * this special case, so the first element read is CSB[0].
1508 */
1509 if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
1510 next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
1511
0bc40be8
TU
1512 engine->next_context_status_buffer = next_context_status_buffer_hw;
1513 DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
9b1136d5 1514
fc0768ce 1515 intel_engine_init_hangcheck(engine);
9b1136d5 1516
0ccdacf6 1517 return intel_mocs_init_engine(engine);
9b1136d5
OM
1518}
1519
0bc40be8 1520static int gen8_init_render_ring(struct intel_engine_cs *engine)
9b1136d5 1521{
c033666a 1522 struct drm_i915_private *dev_priv = engine->i915;
9b1136d5
OM
1523 int ret;
1524
0bc40be8 1525 ret = gen8_init_common_ring(engine);
9b1136d5
OM
1526 if (ret)
1527 return ret;
1528
1529 /* We need to disable the AsyncFlip performance optimisations in order
1530 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1531 * programmed to '1' on all products.
1532 *
1533 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1534 */
1535 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1536
9b1136d5
OM
1537 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1538
0bc40be8 1539 return init_workarounds_ring(engine);
9b1136d5
OM
1540}
1541
0bc40be8 1542static int gen9_init_render_ring(struct intel_engine_cs *engine)
82ef822e
DL
1543{
1544 int ret;
1545
0bc40be8 1546 ret = gen8_init_common_ring(engine);
82ef822e
DL
1547 if (ret)
1548 return ret;
1549
0bc40be8 1550 return init_workarounds_ring(engine);
82ef822e
DL
1551}
1552
7a01a0a2
MT
1553static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1554{
1555 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1dae2dfb 1556 struct intel_ringbuffer *ring = req->ring;
4a570db5 1557 struct intel_engine_cs *engine = req->engine;
7a01a0a2
MT
1558 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
1559 int i, ret;
1560
987046ad 1561 ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
7a01a0a2
MT
1562 if (ret)
1563 return ret;
1564
b5321f30 1565 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
7a01a0a2
MT
1566 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
1567 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1568
b5321f30
CW
1569 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
1570 intel_ring_emit(ring, upper_32_bits(pd_daddr));
1571 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
1572 intel_ring_emit(ring, lower_32_bits(pd_daddr));
7a01a0a2
MT
1573 }
1574
b5321f30
CW
1575 intel_ring_emit(ring, MI_NOOP);
1576 intel_ring_advance(ring);
7a01a0a2
MT
1577
1578 return 0;
1579}
1580
be795fc1 1581static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
8e004efc 1582 u64 offset, unsigned dispatch_flags)
15648585 1583{
1dae2dfb 1584 struct intel_ringbuffer *ring = req->ring;
8e004efc 1585 bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
15648585
OM
1586 int ret;
1587
7a01a0a2
MT
1588 /* Don't rely in hw updating PDPs, specially in lite-restore.
1589 * Ideally, we should set Force PD Restore in ctx descriptor,
1590 * but we can't. Force Restore would be a second option, but
1591 * it is unsafe in case of lite-restore (because the ctx is
2dba3239
MT
1592 * not idle). PML4 is allocated during ppgtt init so this is
1593 * not needed in 48-bit.*/
7a01a0a2 1594 if (req->ctx->ppgtt &&
666796da 1595 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
331f38e7 1596 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
c033666a 1597 !intel_vgpu_active(req->i915)) {
2dba3239
MT
1598 ret = intel_logical_ring_emit_pdps(req);
1599 if (ret)
1600 return ret;
1601 }
7a01a0a2 1602
666796da 1603 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
7a01a0a2
MT
1604 }
1605
987046ad 1606 ret = intel_ring_begin(req, 4);
15648585
OM
1607 if (ret)
1608 return ret;
1609
1610 /* FIXME(BDW): Address space and security selectors. */
b5321f30
CW
1611 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
1612 (ppgtt<<8) |
1613 (dispatch_flags & I915_DISPATCH_RS ?
1614 MI_BATCH_RESOURCE_STREAMER : 0));
1615 intel_ring_emit(ring, lower_32_bits(offset));
1616 intel_ring_emit(ring, upper_32_bits(offset));
1617 intel_ring_emit(ring, MI_NOOP);
1618 intel_ring_advance(ring);
15648585
OM
1619
1620 return 0;
1621}
1622
31bb59cc 1623static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
73d477f6 1624{
c033666a 1625 struct drm_i915_private *dev_priv = engine->i915;
31bb59cc
CW
1626 I915_WRITE_IMR(engine,
1627 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1628 POSTING_READ_FW(RING_IMR(engine->mmio_base));
73d477f6
OM
1629}
1630
31bb59cc 1631static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
73d477f6 1632{
c033666a 1633 struct drm_i915_private *dev_priv = engine->i915;
31bb59cc 1634 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
73d477f6
OM
1635}
1636
7deb4d39 1637static int gen8_emit_flush(struct drm_i915_gem_request *request,
4712274c
OM
1638 u32 invalidate_domains,
1639 u32 unused)
1640{
1dae2dfb 1641 struct intel_ringbuffer *ring = request->ring;
4712274c
OM
1642 uint32_t cmd;
1643 int ret;
1644
987046ad 1645 ret = intel_ring_begin(request, 4);
4712274c
OM
1646 if (ret)
1647 return ret;
1648
1649 cmd = MI_FLUSH_DW + 1;
1650
f0a1fb10
CW
1651 /* We always require a command barrier so that subsequent
1652 * commands, such as breadcrumb interrupts, are strictly ordered
1653 * wrt the contents of the write cache being flushed to memory
1654 * (and thus being coherent from the CPU).
1655 */
1656 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1657
1658 if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
1659 cmd |= MI_INVALIDATE_TLB;
1dae2dfb 1660 if (request->engine->id == VCS)
f0a1fb10 1661 cmd |= MI_INVALIDATE_BSD;
4712274c
OM
1662 }
1663
b5321f30
CW
1664 intel_ring_emit(ring, cmd);
1665 intel_ring_emit(ring,
1666 I915_GEM_HWS_SCRATCH_ADDR |
1667 MI_FLUSH_DW_USE_GTT);
1668 intel_ring_emit(ring, 0); /* upper addr */
1669 intel_ring_emit(ring, 0); /* value */
1670 intel_ring_advance(ring);
4712274c
OM
1671
1672 return 0;
1673}
1674
7deb4d39 1675static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4712274c
OM
1676 u32 invalidate_domains,
1677 u32 flush_domains)
1678{
1dae2dfb 1679 struct intel_ringbuffer *ring = request->ring;
b5321f30 1680 struct intel_engine_cs *engine = request->engine;
e2f80391 1681 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
0b2d0934 1682 bool vf_flush_wa = false, dc_flush_wa = false;
4712274c
OM
1683 u32 flags = 0;
1684 int ret;
0b2d0934 1685 int len;
4712274c
OM
1686
1687 flags |= PIPE_CONTROL_CS_STALL;
1688
1689 if (flush_domains) {
1690 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1691 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
965fd602 1692 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
40a24488 1693 flags |= PIPE_CONTROL_FLUSH_ENABLE;
4712274c
OM
1694 }
1695
1696 if (invalidate_domains) {
1697 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1698 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1699 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1700 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1701 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1702 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1703 flags |= PIPE_CONTROL_QW_WRITE;
1704 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
4712274c 1705
1a5a9ce7
BW
1706 /*
1707 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1708 * pipe control.
1709 */
c033666a 1710 if (IS_GEN9(request->i915))
1a5a9ce7 1711 vf_flush_wa = true;
0b2d0934
MK
1712
1713 /* WaForGAMHang:kbl */
1714 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1715 dc_flush_wa = true;
1a5a9ce7 1716 }
9647ff36 1717
0b2d0934
MK
1718 len = 6;
1719
1720 if (vf_flush_wa)
1721 len += 6;
1722
1723 if (dc_flush_wa)
1724 len += 12;
1725
1726 ret = intel_ring_begin(request, len);
4712274c
OM
1727 if (ret)
1728 return ret;
1729
9647ff36 1730 if (vf_flush_wa) {
b5321f30
CW
1731 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1732 intel_ring_emit(ring, 0);
1733 intel_ring_emit(ring, 0);
1734 intel_ring_emit(ring, 0);
1735 intel_ring_emit(ring, 0);
1736 intel_ring_emit(ring, 0);
9647ff36
ID
1737 }
1738
0b2d0934 1739 if (dc_flush_wa) {
b5321f30
CW
1740 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1741 intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
1742 intel_ring_emit(ring, 0);
1743 intel_ring_emit(ring, 0);
1744 intel_ring_emit(ring, 0);
1745 intel_ring_emit(ring, 0);
0b2d0934
MK
1746 }
1747
b5321f30
CW
1748 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1749 intel_ring_emit(ring, flags);
1750 intel_ring_emit(ring, scratch_addr);
1751 intel_ring_emit(ring, 0);
1752 intel_ring_emit(ring, 0);
1753 intel_ring_emit(ring, 0);
0b2d0934
MK
1754
1755 if (dc_flush_wa) {
b5321f30
CW
1756 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1757 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
1758 intel_ring_emit(ring, 0);
1759 intel_ring_emit(ring, 0);
1760 intel_ring_emit(ring, 0);
1761 intel_ring_emit(ring, 0);
0b2d0934
MK
1762 }
1763
b5321f30 1764 intel_ring_advance(ring);
4712274c
OM
1765
1766 return 0;
1767}
1768
c04e0f3b 1769static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
319404df 1770{
319404df
ID
1771 /*
1772 * On BXT A steppings there is a HW coherency issue whereby the
1773 * MI_STORE_DATA_IMM storing the completed request's seqno
1774 * occasionally doesn't invalidate the CPU cache. Work around this by
1775 * clflushing the corresponding cacheline whenever the caller wants
1776 * the coherency to be guaranteed. Note that this cacheline is known
1777 * to be clean at this point, since we only write it in
1778 * bxt_a_set_seqno(), where we also do a clflush after the write. So
1779 * this clflush in practice becomes an invalidate operation.
1780 */
c04e0f3b 1781 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
319404df
ID
1782}
1783
7c17d377
CW
1784/*
1785 * Reserve space for 2 NOOPs at the end of each request to be
1786 * used as a workaround for not being allowed to do lite
1787 * restore with HEAD==TAIL (WaIdleLiteRestore).
1788 */
1789#define WA_TAIL_DWORDS 2
1790
c4e76638 1791static int gen8_emit_request(struct drm_i915_gem_request *request)
4da46e1e 1792{
1dae2dfb 1793 struct intel_ringbuffer *ring = request->ring;
4da46e1e
OM
1794 int ret;
1795
987046ad 1796 ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
4da46e1e
OM
1797 if (ret)
1798 return ret;
1799
7c17d377
CW
1800 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1801 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
4da46e1e 1802
b5321f30
CW
1803 intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1804 intel_ring_emit(ring,
1805 intel_hws_seqno_address(request->engine) |
1806 MI_FLUSH_DW_USE_GTT);
1807 intel_ring_emit(ring, 0);
1808 intel_ring_emit(ring, request->fence.seqno);
1809 intel_ring_emit(ring, MI_USER_INTERRUPT);
1810 intel_ring_emit(ring, MI_NOOP);
7c17d377
CW
1811 return intel_logical_ring_advance_and_submit(request);
1812}
4da46e1e 1813
7c17d377
CW
1814static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1815{
1dae2dfb 1816 struct intel_ringbuffer *ring = request->ring;
7c17d377 1817 int ret;
53292cdb 1818
987046ad 1819 ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
7c17d377
CW
1820 if (ret)
1821 return ret;
1822
ce81a65c
MW
1823 /* We're using qword write, seqno should be aligned to 8 bytes. */
1824 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1825
7c17d377
CW
1826 /* w/a for post sync ops following a GPGPU operation we
1827 * need a prior CS_STALL, which is emitted by the flush
1828 * following the batch.
1829 */
b5321f30
CW
1830 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
1831 intel_ring_emit(ring,
1832 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1833 PIPE_CONTROL_CS_STALL |
1834 PIPE_CONTROL_QW_WRITE));
1835 intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
1836 intel_ring_emit(ring, 0);
1837 intel_ring_emit(ring, i915_gem_request_get_seqno(request));
ce81a65c 1838 /* We're thrashing one dword of HWS. */
b5321f30
CW
1839 intel_ring_emit(ring, 0);
1840 intel_ring_emit(ring, MI_USER_INTERRUPT);
1841 intel_ring_emit(ring, MI_NOOP);
7c17d377 1842 return intel_logical_ring_advance_and_submit(request);
4da46e1e
OM
1843}
1844
be01363f 1845static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
cef437ad 1846{
cef437ad 1847 struct render_state so;
cef437ad
DL
1848 int ret;
1849
4a570db5 1850 ret = i915_gem_render_state_prepare(req->engine, &so);
cef437ad
DL
1851 if (ret)
1852 return ret;
1853
1854 if (so.rodata == NULL)
1855 return 0;
1856
4a570db5 1857 ret = req->engine->emit_bb_start(req, so.ggtt_offset,
be01363f 1858 I915_DISPATCH_SECURE);
cef437ad
DL
1859 if (ret)
1860 goto out;
1861
4a570db5 1862 ret = req->engine->emit_bb_start(req,
84e81020
AS
1863 (so.ggtt_offset + so.aux_batch_offset),
1864 I915_DISPATCH_SECURE);
1865 if (ret)
1866 goto out;
1867
b2af0376 1868 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
cef437ad 1869
cef437ad
DL
1870out:
1871 i915_gem_render_state_fini(&so);
1872 return ret;
1873}
1874
8753181e 1875static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
e7778be1
TD
1876{
1877 int ret;
1878
e2be4faf 1879 ret = intel_logical_ring_workarounds_emit(req);
e7778be1
TD
1880 if (ret)
1881 return ret;
1882
3bbaba0c
PA
1883 ret = intel_rcs_context_init_mocs(req);
1884 /*
1885 * Failing to program the MOCS is non-fatal.The system will not
1886 * run at peak performance. So generate an error and carry on.
1887 */
1888 if (ret)
1889 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1890
be01363f 1891 return intel_lr_context_render_state_init(req);
e7778be1
TD
1892}
1893
73e4d07f
OM
1894/**
1895 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
14bb2c11 1896 * @engine: Engine Command Streamer.
73e4d07f 1897 */
0bc40be8 1898void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
454afebd 1899{
6402c330 1900 struct drm_i915_private *dev_priv;
9832b9da 1901
117897f4 1902 if (!intel_engine_initialized(engine))
48d82387
OM
1903 return;
1904
27af5eea
TU
1905 /*
1906 * Tasklet cannot be active at this point due intel_mark_active/idle
1907 * so this is just for documentation.
1908 */
1909 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1910 tasklet_kill(&engine->irq_tasklet);
1911
c033666a 1912 dev_priv = engine->i915;
6402c330 1913
0bc40be8
TU
1914 if (engine->buffer) {
1915 intel_logical_ring_stop(engine);
1916 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
b0366a54 1917 }
48d82387 1918
0bc40be8
TU
1919 if (engine->cleanup)
1920 engine->cleanup(engine);
48d82387 1921
33a051a5 1922 intel_engine_cleanup_cmd_parser(engine);
0bc40be8 1923 i915_gem_batch_pool_fini(&engine->batch_pool);
48d82387 1924
688e6c72
CW
1925 intel_engine_fini_breadcrumbs(engine);
1926
0bc40be8 1927 if (engine->status_page.obj) {
7d774cac 1928 i915_gem_object_unpin_map(engine->status_page.obj);
0bc40be8 1929 engine->status_page.obj = NULL;
48d82387 1930 }
24f1d3cc 1931 intel_lr_context_unpin(dev_priv->kernel_context, engine);
17ee950d 1932
0bc40be8
TU
1933 engine->idle_lite_restore_wa = 0;
1934 engine->disable_lite_restore_wa = false;
1935 engine->ctx_desc_template = 0;
ca82580c 1936
0bc40be8 1937 lrc_destroy_wa_ctx_obj(engine);
c033666a 1938 engine->i915 = NULL;
454afebd
OM
1939}
1940
c9cacf93 1941static void
e1382efb 1942logical_ring_default_vfuncs(struct intel_engine_cs *engine)
c9cacf93
TU
1943{
1944 /* Default vfuncs which can be overriden by each engine. */
0bc40be8
TU
1945 engine->init_hw = gen8_init_common_ring;
1946 engine->emit_request = gen8_emit_request;
1947 engine->emit_flush = gen8_emit_flush;
31bb59cc
CW
1948 engine->irq_enable = gen8_logical_ring_enable_irq;
1949 engine->irq_disable = gen8_logical_ring_disable_irq;
0bc40be8 1950 engine->emit_bb_start = gen8_emit_bb_start;
1b7744e7 1951 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
c04e0f3b 1952 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
c9cacf93
TU
1953}
1954
d9f3af96 1955static inline void
c2c7f240 1956logical_ring_default_irqs(struct intel_engine_cs *engine)
d9f3af96 1957{
c2c7f240 1958 unsigned shift = engine->irq_shift;
0bc40be8
TU
1959 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1960 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
d9f3af96
TU
1961}
1962
7d774cac 1963static int
04794adb
TU
1964lrc_setup_hws(struct intel_engine_cs *engine,
1965 struct drm_i915_gem_object *dctx_obj)
1966{
7d774cac 1967 void *hws;
04794adb
TU
1968
1969 /* The HWSP is part of the default context object in LRC mode. */
1970 engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
1971 LRC_PPHWSP_PN * PAGE_SIZE;
7d774cac
TU
1972 hws = i915_gem_object_pin_map(dctx_obj);
1973 if (IS_ERR(hws))
1974 return PTR_ERR(hws);
1975 engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
04794adb 1976 engine->status_page.obj = dctx_obj;
7d774cac
TU
1977
1978 return 0;
04794adb
TU
1979}
1980
bb45438f
TU
1981static void
1982logical_ring_setup(struct intel_engine_cs *engine)
1983{
1984 struct drm_i915_private *dev_priv = engine->i915;
1985 enum forcewake_domains fw_domains;
1986
019bf277
TU
1987 intel_engine_setup_common(engine);
1988
bb45438f
TU
1989 /* Intentionally left blank. */
1990 engine->buffer = NULL;
1991
1992 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1993 RING_ELSP(engine),
1994 FW_REG_WRITE);
1995
1996 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1997 RING_CONTEXT_STATUS_PTR(engine),
1998 FW_REG_READ | FW_REG_WRITE);
1999
2000 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2001 RING_CONTEXT_STATUS_BUF_BASE(engine),
2002 FW_REG_READ);
2003
2004 engine->fw_domains = fw_domains;
2005
bb45438f
TU
2006 tasklet_init(&engine->irq_tasklet,
2007 intel_lrc_irq_handler, (unsigned long)engine);
2008
2009 logical_ring_init_platform_invariants(engine);
2010 logical_ring_default_vfuncs(engine);
2011 logical_ring_default_irqs(engine);
bb45438f
TU
2012}
2013
a19d6ff2
TU
2014static int
2015logical_ring_init(struct intel_engine_cs *engine)
2016{
2017 struct i915_gem_context *dctx = engine->i915->kernel_context;
2018 int ret;
2019
019bf277 2020 ret = intel_engine_init_common(engine);
a19d6ff2
TU
2021 if (ret)
2022 goto error;
2023
2024 ret = execlists_context_deferred_alloc(dctx, engine);
2025 if (ret)
2026 goto error;
2027
2028 /* As this is the default context, always pin it */
2029 ret = intel_lr_context_pin(dctx, engine);
2030 if (ret) {
2031 DRM_ERROR("Failed to pin context for %s: %d\n",
2032 engine->name, ret);
2033 goto error;
2034 }
2035
2036 /* And setup the hardware status page. */
2037 ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
2038 if (ret) {
2039 DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
2040 goto error;
2041 }
2042
2043 return 0;
2044
2045error:
2046 intel_logical_ring_cleanup(engine);
2047 return ret;
2048}
2049
88d2ba2e 2050int logical_render_ring_init(struct intel_engine_cs *engine)
a19d6ff2
TU
2051{
2052 struct drm_i915_private *dev_priv = engine->i915;
2053 int ret;
2054
bb45438f
TU
2055 logical_ring_setup(engine);
2056
a19d6ff2
TU
2057 if (HAS_L3_DPF(dev_priv))
2058 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2059
2060 /* Override some for render ring. */
2061 if (INTEL_GEN(dev_priv) >= 9)
2062 engine->init_hw = gen9_init_render_ring;
2063 else
2064 engine->init_hw = gen8_init_render_ring;
2065 engine->init_context = gen8_init_rcs_context;
2066 engine->cleanup = intel_fini_pipe_control;
2067 engine->emit_flush = gen8_emit_flush_render;
2068 engine->emit_request = gen8_emit_request_render;
2069
7d5ea807 2070 ret = intel_init_pipe_control(engine, 4096);
a19d6ff2
TU
2071 if (ret)
2072 return ret;
2073
2074 ret = intel_init_workaround_bb(engine);
2075 if (ret) {
2076 /*
2077 * We continue even if we fail to initialize WA batch
2078 * because we only expect rare glitches but nothing
2079 * critical to prevent us from using GPU
2080 */
2081 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2082 ret);
2083 }
2084
2085 ret = logical_ring_init(engine);
2086 if (ret) {
2087 lrc_destroy_wa_ctx_obj(engine);
2088 }
2089
2090 return ret;
2091}
2092
88d2ba2e 2093int logical_xcs_ring_init(struct intel_engine_cs *engine)
bb45438f
TU
2094{
2095 logical_ring_setup(engine);
2096
2097 return logical_ring_init(engine);
454afebd
OM
2098}
2099
0cea6502 2100static u32
c033666a 2101make_rpcs(struct drm_i915_private *dev_priv)
0cea6502
JM
2102{
2103 u32 rpcs = 0;
2104
2105 /*
2106 * No explicit RPCS request is needed to ensure full
2107 * slice/subslice/EU enablement prior to Gen9.
2108 */
c033666a 2109 if (INTEL_GEN(dev_priv) < 9)
0cea6502
JM
2110 return 0;
2111
2112 /*
2113 * Starting in Gen9, render power gating can leave
2114 * slice/subslice/EU in a partially enabled state. We
2115 * must make an explicit request through RPCS for full
2116 * enablement.
2117 */
c033666a 2118 if (INTEL_INFO(dev_priv)->has_slice_pg) {
0cea6502 2119 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
c033666a 2120 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
0cea6502
JM
2121 GEN8_RPCS_S_CNT_SHIFT;
2122 rpcs |= GEN8_RPCS_ENABLE;
2123 }
2124
c033666a 2125 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
0cea6502 2126 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
c033666a 2127 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
0cea6502
JM
2128 GEN8_RPCS_SS_CNT_SHIFT;
2129 rpcs |= GEN8_RPCS_ENABLE;
2130 }
2131
c033666a
CW
2132 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2133 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
0cea6502 2134 GEN8_RPCS_EU_MIN_SHIFT;
c033666a 2135 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
0cea6502
JM
2136 GEN8_RPCS_EU_MAX_SHIFT;
2137 rpcs |= GEN8_RPCS_ENABLE;
2138 }
2139
2140 return rpcs;
2141}
2142
0bc40be8 2143static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
71562919
MT
2144{
2145 u32 indirect_ctx_offset;
2146
c033666a 2147 switch (INTEL_GEN(engine->i915)) {
71562919 2148 default:
c033666a 2149 MISSING_CASE(INTEL_GEN(engine->i915));
71562919
MT
2150 /* fall through */
2151 case 9:
2152 indirect_ctx_offset =
2153 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2154 break;
2155 case 8:
2156 indirect_ctx_offset =
2157 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2158 break;
2159 }
2160
2161 return indirect_ctx_offset;
2162}
2163
8670d6f9 2164static int
e2efd130 2165populate_lr_context(struct i915_gem_context *ctx,
7d774cac 2166 struct drm_i915_gem_object *ctx_obj,
0bc40be8
TU
2167 struct intel_engine_cs *engine,
2168 struct intel_ringbuffer *ringbuf)
8670d6f9 2169{
c033666a 2170 struct drm_i915_private *dev_priv = ctx->i915;
ae6c4806 2171 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
7d774cac
TU
2172 void *vaddr;
2173 u32 *reg_state;
8670d6f9
OM
2174 int ret;
2175
2d965536
TD
2176 if (!ppgtt)
2177 ppgtt = dev_priv->mm.aliasing_ppgtt;
2178
8670d6f9
OM
2179 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2180 if (ret) {
2181 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2182 return ret;
2183 }
2184
7d774cac
TU
2185 vaddr = i915_gem_object_pin_map(ctx_obj);
2186 if (IS_ERR(vaddr)) {
2187 ret = PTR_ERR(vaddr);
2188 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
8670d6f9
OM
2189 return ret;
2190 }
7d774cac 2191 ctx_obj->dirty = true;
8670d6f9
OM
2192
2193 /* The second page of the context object contains some fields which must
2194 * be set up prior to the first execution. */
7d774cac 2195 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
8670d6f9
OM
2196
2197 /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
2198 * commands followed by (reg, value) pairs. The values we are setting here are
2199 * only for the first context restore: on a subsequent save, the GPU will
2200 * recreate this batchbuffer with new values (including all the missing
2201 * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
0d925ea0 2202 reg_state[CTX_LRI_HEADER_0] =
0bc40be8
TU
2203 MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
2204 ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
2205 RING_CONTEXT_CONTROL(engine),
0d925ea0
VS
2206 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2207 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
c033666a 2208 (HAS_RESOURCE_STREAMER(dev_priv) ?
99cf8ea1 2209 CTX_CTRL_RS_CTX_ENABLE : 0)));
0bc40be8
TU
2210 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2211 0);
2212 ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
2213 0);
7ba717cf
TD
2214 /* Ring buffer start address is not known until the buffer is pinned.
2215 * It is written to the context image in execlists_update_context()
2216 */
0bc40be8
TU
2217 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
2218 RING_START(engine->mmio_base), 0);
2219 ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
2220 RING_CTL(engine->mmio_base),
0d925ea0 2221 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
0bc40be8
TU
2222 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
2223 RING_BBADDR_UDW(engine->mmio_base), 0);
2224 ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
2225 RING_BBADDR(engine->mmio_base), 0);
2226 ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
2227 RING_BBSTATE(engine->mmio_base),
0d925ea0 2228 RING_BB_PPGTT);
0bc40be8
TU
2229 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
2230 RING_SBBADDR_UDW(engine->mmio_base), 0);
2231 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
2232 RING_SBBADDR(engine->mmio_base), 0);
2233 ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
2234 RING_SBBSTATE(engine->mmio_base), 0);
2235 if (engine->id == RCS) {
2236 ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
2237 RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
2238 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
2239 RING_INDIRECT_CTX(engine->mmio_base), 0);
2240 ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
2241 RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
2242 if (engine->wa_ctx.obj) {
2243 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
17ee950d
AS
2244 uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
2245
2246 reg_state[CTX_RCS_INDIRECT_CTX+1] =
2247 (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
2248 (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
2249
2250 reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
0bc40be8 2251 intel_lr_indirect_ctx_offset(engine) << 6;
17ee950d
AS
2252
2253 reg_state[CTX_BB_PER_CTX_PTR+1] =
2254 (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
2255 0x01;
2256 }
8670d6f9 2257 }
0d925ea0 2258 reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
0bc40be8
TU
2259 ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
2260 RING_CTX_TIMESTAMP(engine->mmio_base), 0);
0d925ea0 2261 /* PDP values well be assigned later if needed */
0bc40be8
TU
2262 ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
2263 0);
2264 ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
2265 0);
2266 ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
2267 0);
2268 ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
2269 0);
2270 ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
2271 0);
2272 ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
2273 0);
2274 ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
2275 0);
2276 ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
2277 0);
d7b2633d 2278
2dba3239
MT
2279 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
2280 /* 64b PPGTT (48bit canonical)
2281 * PDP0_DESCRIPTOR contains the base address to PML4 and
2282 * other PDP Descriptors are ignored.
2283 */
2284 ASSIGN_CTX_PML4(ppgtt, reg_state);
2285 } else {
2286 /* 32b PPGTT
2287 * PDP*_DESCRIPTOR contains the base address of space supported.
2288 * With dynamic page allocation, PDPs may not be allocated at
2289 * this point. Point the unallocated PDPs to the scratch page
2290 */
c6a2ac71 2291 execlists_update_context_pdps(ppgtt, reg_state);
2dba3239
MT
2292 }
2293
0bc40be8 2294 if (engine->id == RCS) {
8670d6f9 2295 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
0d925ea0 2296 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
c033666a 2297 make_rpcs(dev_priv));
8670d6f9
OM
2298 }
2299
7d774cac 2300 i915_gem_object_unpin_map(ctx_obj);
8670d6f9
OM
2301
2302 return 0;
2303}
2304
c5d46ee2
DG
2305/**
2306 * intel_lr_context_size() - return the size of the context for an engine
14bb2c11 2307 * @engine: which engine to find the context size for
c5d46ee2
DG
2308 *
2309 * Each engine may require a different amount of space for a context image,
2310 * so when allocating (or copying) an image, this function can be used to
2311 * find the right size for the specific engine.
2312 *
2313 * Return: size (in bytes) of an engine-specific context image
2314 *
2315 * Note: this size includes the HWSP, which is part of the context image
2316 * in LRC mode, but does not include the "shared data page" used with
2317 * GuC submission. The caller should account for this if using the GuC.
2318 */
0bc40be8 2319uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
8c857917
OM
2320{
2321 int ret = 0;
2322
c033666a 2323 WARN_ON(INTEL_GEN(engine->i915) < 8);
8c857917 2324
0bc40be8 2325 switch (engine->id) {
8c857917 2326 case RCS:
c033666a 2327 if (INTEL_GEN(engine->i915) >= 9)
468c6816
MN
2328 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2329 else
2330 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
8c857917
OM
2331 break;
2332 case VCS:
2333 case BCS:
2334 case VECS:
2335 case VCS2:
2336 ret = GEN8_LR_CONTEXT_OTHER_SIZE;
2337 break;
2338 }
2339
2340 return ret;
ede7d42b
OM
2341}
2342
e2efd130 2343static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
978f1e09 2344 struct intel_engine_cs *engine)
ede7d42b 2345{
8c857917 2346 struct drm_i915_gem_object *ctx_obj;
9021ad03 2347 struct intel_context *ce = &ctx->engine[engine->id];
8c857917 2348 uint32_t context_size;
84c2377f 2349 struct intel_ringbuffer *ringbuf;
8c857917
OM
2350 int ret;
2351
9021ad03 2352 WARN_ON(ce->state);
ede7d42b 2353
0bc40be8 2354 context_size = round_up(intel_lr_context_size(engine), 4096);
8c857917 2355
d1675198
AD
2356 /* One extra page as the sharing data between driver and GuC */
2357 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2358
91c8a326 2359 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
fe3db79b 2360 if (IS_ERR(ctx_obj)) {
3126a660 2361 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
fe3db79b 2362 return PTR_ERR(ctx_obj);
8c857917
OM
2363 }
2364
bcd794c2 2365 ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
01101fa7
CW
2366 if (IS_ERR(ringbuf)) {
2367 ret = PTR_ERR(ringbuf);
e84fe803 2368 goto error_deref_obj;
8670d6f9
OM
2369 }
2370
0bc40be8 2371 ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
8670d6f9
OM
2372 if (ret) {
2373 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
e84fe803 2374 goto error_ringbuf;
84c2377f
OM
2375 }
2376
9021ad03
CW
2377 ce->ringbuf = ringbuf;
2378 ce->state = ctx_obj;
2379 ce->initialised = engine->init_context == NULL;
ede7d42b
OM
2380
2381 return 0;
8670d6f9 2382
01101fa7
CW
2383error_ringbuf:
2384 intel_ringbuffer_free(ringbuf);
e84fe803 2385error_deref_obj:
f8c417cd 2386 i915_gem_object_put(ctx_obj);
9021ad03
CW
2387 ce->ringbuf = NULL;
2388 ce->state = NULL;
8670d6f9 2389 return ret;
ede7d42b 2390}
3e5b6f05 2391
7d774cac 2392void intel_lr_context_reset(struct drm_i915_private *dev_priv,
e2efd130 2393 struct i915_gem_context *ctx)
3e5b6f05 2394{
e2f80391 2395 struct intel_engine_cs *engine;
3e5b6f05 2396
b4ac5afc 2397 for_each_engine(engine, dev_priv) {
9021ad03
CW
2398 struct intel_context *ce = &ctx->engine[engine->id];
2399 struct drm_i915_gem_object *ctx_obj = ce->state;
7d774cac 2400 void *vaddr;
3e5b6f05 2401 uint32_t *reg_state;
3e5b6f05
TD
2402
2403 if (!ctx_obj)
2404 continue;
2405
7d774cac
TU
2406 vaddr = i915_gem_object_pin_map(ctx_obj);
2407 if (WARN_ON(IS_ERR(vaddr)))
3e5b6f05 2408 continue;
7d774cac
TU
2409
2410 reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
2411 ctx_obj->dirty = true;
3e5b6f05
TD
2412
2413 reg_state[CTX_RING_HEAD+1] = 0;
2414 reg_state[CTX_RING_TAIL+1] = 0;
2415
7d774cac 2416 i915_gem_object_unpin_map(ctx_obj);
3e5b6f05 2417
9021ad03
CW
2418 ce->ringbuf->head = 0;
2419 ce->ringbuf->tail = 0;
3e5b6f05
TD
2420 }
2421}