2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
40 #define GEN9_MOCS_SIZE 64
42 /* Raw offset is appened to each line for convenience. */
43 static struct engine_mmio gen8_engine_mmio_list
[] __cacheline_aligned
= {
44 {RCS0
, GFX_MODE_GEN7
, 0xffff, false}, /* 0x229c */
45 {RCS0
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
46 {RCS0
, HWSTAM
, 0x0, false}, /* 0x2098 */
47 {RCS0
, INSTPM
, 0xffff, true}, /* 0x20c0 */
48 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 0), 0, false}, /* 0x24d0 */
49 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 1), 0, false}, /* 0x24d4 */
50 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 2), 0, false}, /* 0x24d8 */
51 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 3), 0, false}, /* 0x24dc */
52 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 4), 0, false}, /* 0x24e0 */
53 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 5), 0, false}, /* 0x24e4 */
54 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 6), 0, false}, /* 0x24e8 */
55 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 7), 0, false}, /* 0x24ec */
56 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 8), 0, false}, /* 0x24f0 */
57 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 9), 0, false}, /* 0x24f4 */
58 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 10), 0, false}, /* 0x24f8 */
59 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 11), 0, false}, /* 0x24fc */
60 {RCS0
, CACHE_MODE_1
, 0xffff, true}, /* 0x7004 */
61 {RCS0
, GEN7_GT_MODE
, 0xffff, true}, /* 0x7008 */
62 {RCS0
, CACHE_MODE_0_GEN7
, 0xffff, true}, /* 0x7000 */
63 {RCS0
, GEN7_COMMON_SLICE_CHICKEN1
, 0xffff, true}, /* 0x7010 */
64 {RCS0
, HDC_CHICKEN0
, 0xffff, true}, /* 0x7300 */
65 {RCS0
, VF_GUARDBAND
, 0xffff, true}, /* 0x83a4 */
67 {BCS0
, RING_GFX_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2229c */
68 {BCS0
, RING_MI_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2209c */
69 {BCS0
, RING_INSTPM(BLT_RING_BASE
), 0xffff, false}, /* 0x220c0 */
70 {BCS0
, RING_HWSTAM(BLT_RING_BASE
), 0x0, false}, /* 0x22098 */
71 {BCS0
, RING_EXCC(BLT_RING_BASE
), 0xffff, false}, /* 0x22028 */
72 {RCS0
, INVALID_MMIO_REG
, 0, false } /* Terminated */
75 static struct engine_mmio gen9_engine_mmio_list
[] __cacheline_aligned
= {
76 {RCS0
, GFX_MODE_GEN7
, 0xffff, false}, /* 0x229c */
77 {RCS0
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
78 {RCS0
, HWSTAM
, 0x0, false}, /* 0x2098 */
79 {RCS0
, INSTPM
, 0xffff, true}, /* 0x20c0 */
80 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 0), 0, false}, /* 0x24d0 */
81 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 1), 0, false}, /* 0x24d4 */
82 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 2), 0, false}, /* 0x24d8 */
83 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 3), 0, false}, /* 0x24dc */
84 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 4), 0, false}, /* 0x24e0 */
85 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 5), 0, false}, /* 0x24e4 */
86 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 6), 0, false}, /* 0x24e8 */
87 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 7), 0, false}, /* 0x24ec */
88 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 8), 0, false}, /* 0x24f0 */
89 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 9), 0, false}, /* 0x24f4 */
90 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 10), 0, false}, /* 0x24f8 */
91 {RCS0
, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE
, 11), 0, false}, /* 0x24fc */
92 {RCS0
, CACHE_MODE_1
, 0xffff, true}, /* 0x7004 */
93 {RCS0
, GEN7_GT_MODE
, 0xffff, true}, /* 0x7008 */
94 {RCS0
, CACHE_MODE_0_GEN7
, 0xffff, true}, /* 0x7000 */
95 {RCS0
, GEN7_COMMON_SLICE_CHICKEN1
, 0xffff, true}, /* 0x7010 */
96 {RCS0
, HDC_CHICKEN0
, 0xffff, true}, /* 0x7300 */
97 {RCS0
, VF_GUARDBAND
, 0xffff, true}, /* 0x83a4 */
99 {RCS0
, GEN8_PRIVATE_PAT_LO
, 0, false}, /* 0x40e0 */
100 {RCS0
, GEN8_PRIVATE_PAT_HI
, 0, false}, /* 0x40e4 */
101 {RCS0
, GEN8_CS_CHICKEN1
, 0xffff, true}, /* 0x2580 */
102 {RCS0
, COMMON_SLICE_CHICKEN2
, 0xffff, true}, /* 0x7014 */
103 {RCS0
, GEN9_CS_DEBUG_MODE1
, 0xffff, false}, /* 0x20ec */
104 {RCS0
, GEN8_L3SQCREG4
, 0, false}, /* 0xb118 */
105 {RCS0
, GEN7_HALF_SLICE_CHICKEN1
, 0xffff, true}, /* 0xe100 */
106 {RCS0
, HALF_SLICE_CHICKEN2
, 0xffff, true}, /* 0xe180 */
107 {RCS0
, HALF_SLICE_CHICKEN3
, 0xffff, true}, /* 0xe184 */
108 {RCS0
, GEN9_HALF_SLICE_CHICKEN5
, 0xffff, true}, /* 0xe188 */
109 {RCS0
, GEN9_HALF_SLICE_CHICKEN7
, 0xffff, true}, /* 0xe194 */
110 {RCS0
, GEN8_ROW_CHICKEN
, 0xffff, true}, /* 0xe4f0 */
111 {RCS0
, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
112 {RCS0
, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
113 {RCS0
, TRNULLDETCT
, 0, true}, /* 0x4de8 */
114 {RCS0
, TRINVTILEDETCT
, 0, true}, /* 0x4dec */
115 {RCS0
, TRVADR
, 0, true}, /* 0x4df0 */
116 {RCS0
, TRTTE
, 0, true}, /* 0x4df4 */
117 {RCS0
, _MMIO(0x4dfc), 0, true},
119 {BCS0
, RING_GFX_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2229c */
120 {BCS0
, RING_MI_MODE(BLT_RING_BASE
), 0xffff, false}, /* 0x2209c */
121 {BCS0
, RING_INSTPM(BLT_RING_BASE
), 0xffff, false}, /* 0x220c0 */
122 {BCS0
, RING_HWSTAM(BLT_RING_BASE
), 0x0, false}, /* 0x22098 */
123 {BCS0
, RING_EXCC(BLT_RING_BASE
), 0xffff, false}, /* 0x22028 */
125 {VCS1
, RING_EXCC(GEN8_BSD2_RING_BASE
), 0xffff, false}, /* 0x1c028 */
127 {VECS0
, RING_EXCC(VEBOX_RING_BASE
), 0xffff, false}, /* 0x1a028 */
129 {RCS0
, GEN8_HDC_CHICKEN1
, 0xffff, true}, /* 0x7304 */
130 {RCS0
, GEN9_CTX_PREEMPT_REG
, 0x0, false}, /* 0x2248 */
131 {RCS0
, GEN7_UCGCTL4
, 0x0, false}, /* 0x940c */
132 {RCS0
, GAMT_CHKN_BIT_REG
, 0x0, false}, /* 0x4ab8 */
134 {RCS0
, GEN9_GAMT_ECO_REG_RW_IA
, 0x0, false}, /* 0x4ab0 */
135 {RCS0
, GEN9_CSFE_CHICKEN1_RCS
, 0xffff, false}, /* 0x20d4 */
136 {RCS0
, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
138 {RCS0
, GEN8_GARBCNTL
, 0x0, false}, /* 0xb004 */
139 {RCS0
, GEN7_FF_THREAD_MODE
, 0x0, false}, /* 0x20a0 */
140 {RCS0
, FF_SLICE_CS_CHICKEN2
, 0xffff, false}, /* 0x20e4 */
141 {RCS0
, INVALID_MMIO_REG
, 0, false } /* Terminated */
146 u32 control_table
[I915_NUM_ENGINES
][GEN9_MOCS_SIZE
];
147 u32 l3cc_table
[GEN9_MOCS_SIZE
/ 2];
150 static void load_render_mocs(struct drm_i915_private
*dev_priv
)
162 for (ring_id
= 0; ring_id
< ARRAY_SIZE(regs
); ring_id
++) {
163 if (!HAS_ENGINE(dev_priv
, ring_id
))
165 offset
.reg
= regs
[ring_id
];
166 for (i
= 0; i
< GEN9_MOCS_SIZE
; i
++) {
167 gen9_render_mocs
.control_table
[ring_id
][i
] =
168 I915_READ_FW(offset
);
174 for (i
= 0; i
< GEN9_MOCS_SIZE
/ 2; i
++) {
175 gen9_render_mocs
.l3cc_table
[i
] =
176 I915_READ_FW(offset
);
179 gen9_render_mocs
.initialized
= true;
183 restore_context_mmio_for_inhibit(struct intel_vgpu
*vgpu
,
184 struct i915_request
*req
)
188 struct engine_mmio
*mmio
;
189 struct intel_gvt
*gvt
= vgpu
->gvt
;
190 int ring_id
= req
->engine
->id
;
191 int count
= gvt
->engine_mmio_list
.ctx_mmio_count
[ring_id
];
196 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
200 cs
= intel_ring_begin(req
, count
* 2 + 2);
204 *cs
++ = MI_LOAD_REGISTER_IMM(count
);
205 for (mmio
= gvt
->engine_mmio_list
.mmio
;
206 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
207 if (mmio
->ring_id
!= ring_id
||
211 *cs
++ = i915_mmio_reg_offset(mmio
->reg
);
212 *cs
++ = vgpu_vreg_t(vgpu
, mmio
->reg
) |
214 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
215 *(cs
-2), *(cs
-1), vgpu
->id
, ring_id
);
219 intel_ring_advance(req
, cs
);
221 ret
= req
->engine
->emit_flush(req
, EMIT_BARRIER
);
229 restore_render_mocs_control_for_inhibit(struct intel_vgpu
*vgpu
,
230 struct i915_request
*req
)
235 cs
= intel_ring_begin(req
, 2 * GEN9_MOCS_SIZE
+ 2);
239 *cs
++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE
);
241 for (index
= 0; index
< GEN9_MOCS_SIZE
; index
++) {
242 *cs
++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index
));
243 *cs
++ = vgpu_vreg_t(vgpu
, GEN9_GFX_MOCS(index
));
244 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
245 *(cs
-2), *(cs
-1), vgpu
->id
, req
->engine
->id
);
250 intel_ring_advance(req
, cs
);
256 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu
*vgpu
,
257 struct i915_request
*req
)
262 cs
= intel_ring_begin(req
, 2 * GEN9_MOCS_SIZE
/ 2 + 2);
266 *cs
++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE
/ 2);
268 for (index
= 0; index
< GEN9_MOCS_SIZE
/ 2; index
++) {
269 *cs
++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index
));
270 *cs
++ = vgpu_vreg_t(vgpu
, GEN9_LNCFCMOCS(index
));
271 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
272 *(cs
-2), *(cs
-1), vgpu
->id
, req
->engine
->id
);
277 intel_ring_advance(req
, cs
);
283 * Use lri command to initialize the mmio which is in context state image for
284 * inhibit context, it contains tracked engine mmio, render_mocs and
287 int intel_vgpu_restore_inhibit_context(struct intel_vgpu
*vgpu
,
288 struct i915_request
*req
)
293 cs
= intel_ring_begin(req
, 2);
297 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_DISABLE
;
299 intel_ring_advance(req
, cs
);
301 ret
= restore_context_mmio_for_inhibit(vgpu
, req
);
305 /* no MOCS register in context except render engine */
306 if (req
->engine
->id
!= RCS0
)
309 ret
= restore_render_mocs_control_for_inhibit(vgpu
, req
);
313 ret
= restore_render_mocs_l3cc_for_inhibit(vgpu
, req
);
318 cs
= intel_ring_begin(req
, 2);
322 *cs
++ = MI_ARB_ON_OFF
| MI_ARB_ENABLE
;
324 intel_ring_advance(req
, cs
);
329 static void handle_tlb_pending_event(struct intel_vgpu
*vgpu
, int ring_id
)
331 struct drm_i915_private
*dev_priv
= vgpu
->gvt
->dev_priv
;
332 struct intel_uncore
*uncore
= &dev_priv
->uncore
;
333 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
334 enum forcewake_domains fw
;
344 if (WARN_ON(ring_id
>= ARRAY_SIZE(regs
)))
347 if (!test_and_clear_bit(ring_id
, (void *)s
->tlb_handle_pending
))
350 reg
= _MMIO(regs
[ring_id
]);
352 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
353 * we need to put a forcewake when invalidating RCS TLB caches,
354 * otherwise device can go to RC6 state and interrupt invalidation
357 fw
= intel_uncore_forcewake_for_reg(uncore
, reg
,
358 FW_REG_READ
| FW_REG_WRITE
);
359 if (ring_id
== RCS0
&& INTEL_GEN(dev_priv
) >= 9)
360 fw
|= FORCEWAKE_RENDER
;
362 intel_uncore_forcewake_get(uncore
, fw
);
364 intel_uncore_write_fw(uncore
, reg
, 0x1);
366 if (wait_for_atomic((intel_uncore_read_fw(uncore
, reg
) == 0), 50))
367 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id
);
369 vgpu_vreg_t(vgpu
, reg
) = 0;
371 intel_uncore_forcewake_put(uncore
, fw
);
373 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id
);
376 static void switch_mocs(struct intel_vgpu
*pre
, struct intel_vgpu
*next
,
379 struct drm_i915_private
*dev_priv
;
380 i915_reg_t offset
, l3_offset
;
392 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
393 if (WARN_ON(ring_id
>= ARRAY_SIZE(regs
)))
396 if (ring_id
== RCS0
&& IS_GEN(dev_priv
, 9))
399 if (!pre
&& !gen9_render_mocs
.initialized
)
400 load_render_mocs(dev_priv
);
402 offset
.reg
= regs
[ring_id
];
403 for (i
= 0; i
< GEN9_MOCS_SIZE
; i
++) {
405 old_v
= vgpu_vreg_t(pre
, offset
);
407 old_v
= gen9_render_mocs
.control_table
[ring_id
][i
];
409 new_v
= vgpu_vreg_t(next
, offset
);
411 new_v
= gen9_render_mocs
.control_table
[ring_id
][i
];
414 I915_WRITE_FW(offset
, new_v
);
419 if (ring_id
== RCS0
) {
420 l3_offset
.reg
= 0xb020;
421 for (i
= 0; i
< GEN9_MOCS_SIZE
/ 2; i
++) {
423 old_v
= vgpu_vreg_t(pre
, l3_offset
);
425 old_v
= gen9_render_mocs
.l3cc_table
[i
];
427 new_v
= vgpu_vreg_t(next
, l3_offset
);
429 new_v
= gen9_render_mocs
.l3cc_table
[i
];
432 I915_WRITE_FW(l3_offset
, new_v
);
439 #define CTX_CONTEXT_CONTROL_VAL 0x03
441 bool is_inhibit_context(struct intel_context
*ce
)
443 const u32
*reg_state
= ce
->lrc_reg_state
;
445 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT
);
447 return inhibit_mask
==
448 (reg_state
[CTX_CONTEXT_CONTROL_VAL
] & inhibit_mask
);
451 /* Switch ring mmio values (context). */
452 static void switch_mmio(struct intel_vgpu
*pre
,
453 struct intel_vgpu
*next
,
456 struct drm_i915_private
*dev_priv
;
457 struct intel_vgpu_submission
*s
;
458 struct engine_mmio
*mmio
;
461 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
462 if (INTEL_GEN(dev_priv
) >= 9)
463 switch_mocs(pre
, next
, ring_id
);
465 for (mmio
= dev_priv
->gvt
->engine_mmio_list
.mmio
;
466 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
467 if (mmio
->ring_id
!= ring_id
)
470 * No need to do save or restore of the mmio which is in context
471 * state image on gen9, it's initialized by lri command and
472 * save or restore with context together.
474 if (IS_GEN(dev_priv
, 9) && mmio
->in_context
)
479 vgpu_vreg_t(pre
, mmio
->reg
) = I915_READ_FW(mmio
->reg
);
481 vgpu_vreg_t(pre
, mmio
->reg
) &=
483 old_v
= vgpu_vreg_t(pre
, mmio
->reg
);
485 old_v
= mmio
->value
= I915_READ_FW(mmio
->reg
);
489 s
= &next
->submission
;
491 * No need to restore the mmio which is in context state
492 * image if it's not inhibit context, it will restore
495 if (mmio
->in_context
&&
496 !is_inhibit_context(s
->shadow
[ring_id
]))
500 new_v
= vgpu_vreg_t(next
, mmio
->reg
) |
503 new_v
= vgpu_vreg_t(next
, mmio
->reg
);
505 if (mmio
->in_context
)
508 new_v
= mmio
->value
| (mmio
->mask
<< 16);
513 I915_WRITE_FW(mmio
->reg
, new_v
);
515 trace_render_mmio(pre
? pre
->id
: 0,
518 i915_mmio_reg_offset(mmio
->reg
),
523 handle_tlb_pending_event(next
, ring_id
);
527 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
528 * @pre: the last vGPU that own the engine
529 * @next: the vGPU to switch to
530 * @ring_id: specify the engine
532 * If pre is null indicates that host own the engine. If next is null
533 * indicates that we are switching to host workload.
535 void intel_gvt_switch_mmio(struct intel_vgpu
*pre
,
536 struct intel_vgpu
*next
, int ring_id
)
538 struct drm_i915_private
*dev_priv
;
540 if (WARN_ON(!pre
&& !next
))
543 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id
,
544 pre
? "vGPU" : "host", next
? "vGPU" : "HOST");
546 dev_priv
= pre
? pre
->gvt
->dev_priv
: next
->gvt
->dev_priv
;
549 * We are using raw mmio access wrapper to improve the
550 * performace for batch mmio read/write, so we need
551 * handle forcewake mannually.
553 intel_uncore_forcewake_get(&dev_priv
->uncore
, FORCEWAKE_ALL
);
554 switch_mmio(pre
, next
, ring_id
);
555 intel_uncore_forcewake_put(&dev_priv
->uncore
, FORCEWAKE_ALL
);
559 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
563 void intel_gvt_init_engine_mmio_context(struct intel_gvt
*gvt
)
565 struct engine_mmio
*mmio
;
567 if (INTEL_GEN(gvt
->dev_priv
) >= 9)
568 gvt
->engine_mmio_list
.mmio
= gen9_engine_mmio_list
;
570 gvt
->engine_mmio_list
.mmio
= gen8_engine_mmio_list
;
572 for (mmio
= gvt
->engine_mmio_list
.mmio
;
573 i915_mmio_reg_valid(mmio
->reg
); mmio
++) {
574 if (mmio
->in_context
) {
575 gvt
->engine_mmio_list
.ctx_mmio_count
[mmio
->ring_id
]++;
576 intel_gvt_mmio_set_in_ctx(gvt
, mmio
->reg
.reg
);