2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
29 * Min He <min.he@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Yulei Zhang <yulei.zhang@intel.com>
33 * Zhi Wang <zhi.a.wang@intel.com>
37 #include <linux/slab.h>
41 #include "gt/intel_engine_regs.h"
42 #include "gt/intel_gpu_commands.h"
43 #include "gt/intel_gt_regs.h"
44 #include "gt/intel_lrc.h"
45 #include "gt/intel_ring.h"
46 #include "gt/intel_gt_requests.h"
47 #include "gt/shmem_utils.h"
49 #include "i915_pvinfo.h"
52 #include "display/intel_display.h"
53 #include "display/intel_sprite_regs.h"
54 #include "gem/i915_gem_context.h"
55 #include "gem/i915_gem_pm.h"
56 #include "gt/intel_context.h"
58 #define INVALID_OP (~0U)
62 #define OP_LEN_3D_MEDIA 16
63 #define OP_LEN_MFX_VC 16
64 #define OP_LEN_VEBOX 16
66 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
76 const struct sub_op_bits
*sub_op
;
79 #define MAX_CMD_BUDGET 0x7fffffff
80 #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
81 #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
82 #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
84 #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
85 #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
86 #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
88 /* Render Command Map */
90 /* MI_* command Opcode (28:23) */
91 #define OP_MI_NOOP 0x0
92 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
93 #define OP_MI_USER_INTERRUPT 0x2
94 #define OP_MI_WAIT_FOR_EVENT 0x3
95 #define OP_MI_FLUSH 0x4
96 #define OP_MI_ARB_CHECK 0x5
97 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */
98 #define OP_MI_REPORT_HEAD 0x7
99 #define OP_MI_ARB_ON_OFF 0x8
100 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
101 #define OP_MI_BATCH_BUFFER_END 0xA
102 #define OP_MI_SUSPEND_FLUSH 0xB
103 #define OP_MI_PREDICATE 0xC /* IVB+ */
104 #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
105 #define OP_MI_SET_APPID 0xE /* IVB+ */
106 #define OP_MI_RS_CONTEXT 0xF /* HSW+ */
107 #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
108 #define OP_MI_DISPLAY_FLIP 0x14
109 #define OP_MI_SEMAPHORE_MBOX 0x16
110 #define OP_MI_SET_CONTEXT 0x18
111 #define OP_MI_MATH 0x1A
112 #define OP_MI_URB_CLEAR 0x19
113 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
114 #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
116 #define OP_MI_STORE_DATA_IMM 0x20
117 #define OP_MI_STORE_DATA_INDEX 0x21
118 #define OP_MI_LOAD_REGISTER_IMM 0x22
119 #define OP_MI_UPDATE_GTT 0x23
120 #define OP_MI_STORE_REGISTER_MEM 0x24
121 #define OP_MI_FLUSH_DW 0x26
122 #define OP_MI_CLFLUSH 0x27
123 #define OP_MI_REPORT_PERF_COUNT 0x28
124 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
125 #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
126 #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
127 #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
128 #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
129 #define OP_MI_2E 0x2E /* BDW+ */
130 #define OP_MI_2F 0x2F /* BDW+ */
131 #define OP_MI_BATCH_BUFFER_START 0x31
133 /* Bit definition for dword 0 */
134 #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
136 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
138 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
139 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
140 #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
141 #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
143 /* 2D command: Opcode (28:22) */
144 #define OP_2D(x) ((2<<7) | x)
146 #define OP_XY_SETUP_BLT OP_2D(0x1)
147 #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
148 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
149 #define OP_XY_PIXEL_BLT OP_2D(0x24)
150 #define OP_XY_SCANLINES_BLT OP_2D(0x25)
151 #define OP_XY_TEXT_BLT OP_2D(0x26)
152 #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
153 #define OP_XY_COLOR_BLT OP_2D(0x50)
154 #define OP_XY_PAT_BLT OP_2D(0x51)
155 #define OP_XY_MONO_PAT_BLT OP_2D(0x52)
156 #define OP_XY_SRC_COPY_BLT OP_2D(0x53)
157 #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
158 #define OP_XY_FULL_BLT OP_2D(0x55)
159 #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
160 #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
161 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
162 #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
163 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
164 #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
165 #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
166 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
167 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
168 #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
169 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
171 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
172 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
173 ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
175 #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
177 #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
178 #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
179 #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
180 #define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
182 #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
184 #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
186 #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
187 #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
188 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
189 #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
190 #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
191 #define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
193 #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
194 #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
195 #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
196 #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
198 #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
199 #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
200 #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
201 #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
202 #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
203 #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
204 #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
205 #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
206 #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
207 #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
208 #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
209 #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
210 #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
211 #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
212 #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
213 #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
214 #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
215 #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
216 #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
217 #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
218 #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
219 #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
220 #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
221 #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
222 #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
223 #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
224 #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
225 #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
226 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
227 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
228 #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
229 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
230 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
231 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
232 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
233 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
234 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
235 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
236 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
237 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
238 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
239 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
240 #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
241 #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
242 #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
243 #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
244 #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
245 #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
246 #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
247 #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
248 #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
249 #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
250 #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
251 #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
252 #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
253 #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
254 #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
255 #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
256 #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
257 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
258 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
259 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
260 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
261 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
262 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
263 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
265 #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
266 #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
267 #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
268 #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
269 #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
270 #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
271 #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
272 #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
273 #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
274 #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
275 #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
277 #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
278 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
279 #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
280 #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
281 #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
282 #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
283 #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
284 #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
285 #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
286 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
287 #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
288 #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
289 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
290 #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
291 #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
292 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
293 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
294 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
295 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
296 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
297 #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
298 #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
299 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
300 #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
301 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
302 #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
303 #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
304 #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
306 /* VCCP Command Parser */
309 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
310 * git://anongit.freedesktop.org/vaapi/intel-driver
315 #define OP_MFX(pipeline, op, sub_opa, sub_opb) \
322 #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
323 #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
324 #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
325 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
326 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
327 #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
328 #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
329 #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
330 #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
331 #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
332 #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
334 #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
336 #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
337 #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
338 #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
339 #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
340 #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
341 #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
342 #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
343 #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
344 #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
345 #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
346 #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
347 #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
349 #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
350 #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
351 #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
352 #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
353 #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
355 #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
356 #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
357 #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
358 #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
359 #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
361 #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
362 #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
363 #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
365 #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
366 #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
367 #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
369 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
376 #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
377 #define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
378 #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
380 struct parser_exec_state
;
382 typedef int (*parser_cmd_handler
)(struct parser_exec_state
*s
);
384 #define GVT_CMD_HASH_BITS 7
386 /* which DWords need address fix */
387 #define ADDR_FIX_1(x1) (1 << (x1))
388 #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
389 #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
390 #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
391 #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
393 #define DWORD_FIELD(dword, end, start) \
394 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
396 #define OP_LENGTH_BIAS 2
397 #define CMD_LEN(value) (value + OP_LENGTH_BIAS)
399 static int gvt_check_valid_cmd_length(int len
, int valid_len
)
401 if (valid_len
!= len
) {
402 gvt_err("len is not valid: len=%u valid_len=%u\n",
413 #define F_LEN_MASK 3U
414 #define F_LEN_CONST 1U
416 /* value is const although LEN maybe variable */
417 #define F_LEN_VAR_FIXED (1<<1)
420 * command has its own ip advance logic
421 * e.g. MI_BATCH_START, MI_BATCH_END
423 #define F_IP_ADVANCE_CUSTOM (1<<2)
426 #define R_RCS BIT(RCS0)
427 #define R_VCS1 BIT(VCS0)
428 #define R_VCS2 BIT(VCS1)
429 #define R_VCS (R_VCS1 | R_VCS2)
430 #define R_BCS BIT(BCS0)
431 #define R_VECS BIT(VECS0)
432 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
433 /* rings that support this cmd: BLT/RCS/VCS/VECS */
434 intel_engine_mask_t rings
;
436 /* devices that support this cmd: SNB/IVB/HSW/... */
439 /* which DWords are address that need fix up.
440 * bit 0 means a 32-bit non address operand in command
441 * bit 1 means address operand, which could be 32-bit
442 * or 64-bit depending on different architectures.(
443 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
444 * No matter the address length, each address only takes
445 * one bit in the bitmap.
449 /* flag == F_LEN_CONST : command length
450 * flag == F_LEN_VAR : length bias bits
451 * Note: length is in DWord
455 parser_cmd_handler handler
;
457 /* valid length in DWord */
462 struct hlist_node hlist
;
463 const struct cmd_info
*info
;
467 RING_BUFFER_INSTRUCTION
,
468 BATCH_BUFFER_INSTRUCTION
,
469 BATCH_BUFFER_2ND_LEVEL
,
478 struct parser_exec_state
{
479 struct intel_vgpu
*vgpu
;
480 const struct intel_engine_cs
*engine
;
484 /* batch buffer address type */
487 /* graphics memory address of ring buffer start */
488 unsigned long ring_start
;
489 unsigned long ring_size
;
490 unsigned long ring_head
;
491 unsigned long ring_tail
;
493 /* instruction graphics memory address */
494 unsigned long ip_gma
;
496 /* mapped va of the instr_gma */
501 /* next instruction when return from batch buffer to ring buffer */
502 unsigned long ret_ip_gma_ring
;
504 /* next instruction when return from 2nd batch buffer to batch buffer */
505 unsigned long ret_ip_gma_bb
;
507 /* batch buffer address type (GTT or PPGTT)
508 * used when ret from 2nd level batch buffer
510 int saved_buf_addr_type
;
514 const struct cmd_info
*info
;
516 struct intel_vgpu_workload
*workload
;
519 #define gmadr_dw_number(s) \
520 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
522 static unsigned long bypass_scan_mask
= 0;
524 /* ring ALL, type = 0 */
525 static const struct sub_op_bits sub_op_mi
[] = {
530 static const struct decode_info decode_info_mi
= {
533 ARRAY_SIZE(sub_op_mi
),
537 /* ring RCS, command type 2 */
538 static const struct sub_op_bits sub_op_2d
[] = {
543 static const struct decode_info decode_info_2d
= {
546 ARRAY_SIZE(sub_op_2d
),
550 /* ring RCS, command type 3 */
551 static const struct sub_op_bits sub_op_3d_media
[] = {
558 static const struct decode_info decode_info_3d_media
= {
561 ARRAY_SIZE(sub_op_3d_media
),
565 /* ring VCS, command type 3 */
566 static const struct sub_op_bits sub_op_mfx_vc
[] = {
574 static const struct decode_info decode_info_mfx_vc
= {
577 ARRAY_SIZE(sub_op_mfx_vc
),
581 /* ring VECS, command type 3 */
582 static const struct sub_op_bits sub_op_vebox
[] = {
590 static const struct decode_info decode_info_vebox
= {
593 ARRAY_SIZE(sub_op_vebox
),
597 static const struct decode_info
*ring_decode_info
[I915_NUM_ENGINES
][8] = {
602 &decode_info_3d_media
,
654 static inline u32
get_opcode(u32 cmd
, const struct intel_engine_cs
*engine
)
656 const struct decode_info
*d_info
;
658 d_info
= ring_decode_info
[engine
->id
][CMD_TYPE(cmd
)];
662 return cmd
>> (32 - d_info
->op_len
);
665 static inline const struct cmd_info
*
666 find_cmd_entry(struct intel_gvt
*gvt
, unsigned int opcode
,
667 const struct intel_engine_cs
*engine
)
671 hash_for_each_possible(gvt
->cmd_table
, e
, hlist
, opcode
) {
672 if (opcode
== e
->info
->opcode
&&
673 e
->info
->rings
& engine
->mask
)
679 static inline const struct cmd_info
*
680 get_cmd_info(struct intel_gvt
*gvt
, u32 cmd
,
681 const struct intel_engine_cs
*engine
)
685 opcode
= get_opcode(cmd
, engine
);
686 if (opcode
== INVALID_OP
)
689 return find_cmd_entry(gvt
, opcode
, engine
);
692 static inline u32
sub_op_val(u32 cmd
, u32 hi
, u32 low
)
694 return (cmd
>> low
) & ((1U << (hi
- low
+ 1)) - 1);
697 static inline void print_opcode(u32 cmd
, const struct intel_engine_cs
*engine
)
699 const struct decode_info
*d_info
;
702 d_info
= ring_decode_info
[engine
->id
][CMD_TYPE(cmd
)];
706 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
707 cmd
>> (32 - d_info
->op_len
), d_info
->name
);
709 for (i
= 0; i
< d_info
->nr_sub_op
; i
++)
710 pr_err("0x%x ", sub_op_val(cmd
, d_info
->sub_op
[i
].hi
,
711 d_info
->sub_op
[i
].low
));
716 static inline u32
*cmd_ptr(struct parser_exec_state
*s
, int index
)
718 return s
->ip_va
+ (index
<< 2);
721 static inline u32
cmd_val(struct parser_exec_state
*s
, int index
)
723 return *cmd_ptr(s
, index
);
726 static inline bool is_init_ctx(struct parser_exec_state
*s
)
728 return (s
->buf_type
== RING_BUFFER_CTX
&& s
->is_init_ctx
);
731 static void parser_exec_state_dump(struct parser_exec_state
*s
)
736 gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
737 " ring_head(%08lx) ring_tail(%08lx)\n",
738 s
->vgpu
->id
, s
->engine
->name
,
739 s
->ring_start
, s
->ring_start
+ s
->ring_size
,
740 s
->ring_head
, s
->ring_tail
);
742 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
743 s
->buf_type
== RING_BUFFER_INSTRUCTION
?
744 "RING_BUFFER" : ((s
->buf_type
== RING_BUFFER_CTX
) ?
745 "CTX_BUFFER" : "BATCH_BUFFER"),
746 s
->buf_addr_type
== GTT_BUFFER
?
747 "GTT" : "PPGTT", s
->ip_gma
);
749 if (s
->ip_va
== NULL
) {
750 gvt_dbg_cmd(" ip_va(NULL)");
754 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
755 s
->ip_va
, cmd_val(s
, 0), cmd_val(s
, 1),
756 cmd_val(s
, 2), cmd_val(s
, 3));
758 print_opcode(cmd_val(s
, 0), s
->engine
);
760 s
->ip_va
= (u32
*)((((u64
)s
->ip_va
) >> 12) << 12);
763 gvt_dbg_cmd("ip_va=%p: ", s
->ip_va
);
764 for (i
= 0; i
< 8; i
++)
765 gvt_dbg_cmd("%08x ", cmd_val(s
, i
));
768 s
->ip_va
+= 8 * sizeof(u32
);
773 static inline void update_ip_va(struct parser_exec_state
*s
)
775 unsigned long len
= 0;
777 if (WARN_ON(s
->ring_head
== s
->ring_tail
))
780 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
||
781 s
->buf_type
== RING_BUFFER_CTX
) {
782 unsigned long ring_top
= s
->ring_start
+ s
->ring_size
;
784 if (s
->ring_head
> s
->ring_tail
) {
785 if (s
->ip_gma
>= s
->ring_head
&& s
->ip_gma
< ring_top
)
786 len
= (s
->ip_gma
- s
->ring_head
);
787 else if (s
->ip_gma
>= s
->ring_start
&&
788 s
->ip_gma
<= s
->ring_tail
)
789 len
= (ring_top
- s
->ring_head
) +
790 (s
->ip_gma
- s
->ring_start
);
792 len
= (s
->ip_gma
- s
->ring_head
);
794 s
->ip_va
= s
->rb_va
+ len
;
795 } else {/* shadow batch buffer */
796 s
->ip_va
= s
->ret_bb_va
;
800 static inline int ip_gma_set(struct parser_exec_state
*s
,
801 unsigned long ip_gma
)
803 WARN_ON(!IS_ALIGNED(ip_gma
, 4));
810 static inline int ip_gma_advance(struct parser_exec_state
*s
,
813 s
->ip_gma
+= (dw_len
<< 2);
815 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
816 if (s
->ip_gma
>= s
->ring_start
+ s
->ring_size
)
817 s
->ip_gma
-= s
->ring_size
;
820 s
->ip_va
+= (dw_len
<< 2);
826 static inline int get_cmd_length(const struct cmd_info
*info
, u32 cmd
)
828 if ((info
->flag
& F_LEN_MASK
) == F_LEN_CONST
)
831 return (cmd
& ((1U << info
->len
) - 1)) + 2;
835 static inline int cmd_length(struct parser_exec_state
*s
)
837 return get_cmd_length(s
->info
, cmd_val(s
, 0));
840 /* do not remove this, some platform may need clflush here */
841 #define patch_value(s, addr, val) do { \
845 static inline bool is_mocs_mmio(unsigned int offset
)
847 return ((offset
>= 0xc800) && (offset
<= 0xcff8)) ||
848 ((offset
>= 0xb020) && (offset
<= 0xb0a0));
851 static int is_cmd_update_pdps(unsigned int offset
,
852 struct parser_exec_state
*s
)
854 u32 base
= s
->workload
->engine
->mmio_base
;
855 return i915_mmio_reg_equal(_MMIO(offset
), GEN8_RING_PDP_UDW(base
, 0));
858 static int cmd_pdp_mmio_update_handler(struct parser_exec_state
*s
,
859 unsigned int offset
, unsigned int index
)
861 struct intel_vgpu
*vgpu
= s
->vgpu
;
862 struct intel_vgpu_mm
*shadow_mm
= s
->workload
->shadow_mm
;
863 struct intel_vgpu_mm
*mm
;
864 u64 pdps
[GEN8_3LVL_PDPES
];
866 if (shadow_mm
->ppgtt_mm
.root_entry_type
==
867 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
868 pdps
[0] = (u64
)cmd_val(s
, 2) << 32;
869 pdps
[0] |= cmd_val(s
, 4);
871 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
873 gvt_vgpu_err("failed to get the 4-level shadow vm\n");
876 intel_vgpu_mm_get(mm
);
877 list_add_tail(&mm
->ppgtt_mm
.link
,
878 &s
->workload
->lri_shadow_mm
);
879 *cmd_ptr(s
, 2) = upper_32_bits(mm
->ppgtt_mm
.shadow_pdps
[0]);
880 *cmd_ptr(s
, 4) = lower_32_bits(mm
->ppgtt_mm
.shadow_pdps
[0]);
882 /* Currently all guests use PML4 table and now can't
883 * have a guest with 3-level table but uses LRI for
884 * PPGTT update. So this is simply un-testable. */
886 gvt_vgpu_err("invalid shared shadow vm type\n");
892 static int cmd_reg_handler(struct parser_exec_state
*s
,
893 unsigned int offset
, unsigned int index
, char *cmd
)
895 struct intel_vgpu
*vgpu
= s
->vgpu
;
896 struct intel_gvt
*gvt
= vgpu
->gvt
;
900 if (offset
+ 4 > gvt
->device_info
.mmio_size
) {
901 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
906 if (is_init_ctx(s
)) {
907 struct intel_gvt_mmio_info
*mmio_info
;
909 intel_gvt_mmio_set_cmd_accessible(gvt
, offset
);
910 mmio_info
= intel_gvt_find_mmio_info(gvt
, offset
);
911 if (mmio_info
&& mmio_info
->write
)
912 intel_gvt_mmio_set_cmd_write_patch(gvt
, offset
);
916 if (!intel_gvt_mmio_is_cmd_accessible(gvt
, offset
)) {
917 gvt_vgpu_err("%s access to non-render register (%x)\n",
922 if (!strncmp(cmd
, "srm", 3) ||
923 !strncmp(cmd
, "lrm", 3)) {
924 if (offset
== i915_mmio_reg_offset(GEN8_L3SQCREG4
) ||
926 (IS_BROADWELL(gvt
->gt
->i915
) &&
927 offset
== i915_mmio_reg_offset(INSTPM
)))
930 gvt_vgpu_err("%s access to register (%x)\n",
936 if (!strncmp(cmd
, "lrr-src", 7) ||
937 !strncmp(cmd
, "lrr-dst", 7)) {
938 if (IS_BROADWELL(gvt
->gt
->i915
) && offset
== 0x215c)
941 gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd
, offset
);
946 if (!strncmp(cmd
, "pipe_ctrl", 9)) {
947 /* TODO: add LRI POST logic here */
951 if (strncmp(cmd
, "lri", 3))
954 /* below are all lri handlers */
955 vreg
= &vgpu_vreg(s
->vgpu
, offset
);
957 if (is_cmd_update_pdps(offset
, s
) &&
958 cmd_pdp_mmio_update_handler(s
, offset
, index
))
961 if (offset
== i915_mmio_reg_offset(DERRMR
) ||
962 offset
== i915_mmio_reg_offset(FORCEWAKE_MT
)) {
963 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
964 patch_value(s
, cmd_ptr(s
, index
), VGT_PVINFO_PAGE
);
967 if (is_mocs_mmio(offset
))
968 *vreg
= cmd_val(s
, index
+ 1);
972 if (intel_gvt_mmio_is_cmd_write_patch(gvt
, offset
)) {
973 u32 cmdval_new
, cmdval
;
974 struct intel_gvt_mmio_info
*mmio_info
;
976 cmdval
= cmd_val(s
, index
+ 1);
978 mmio_info
= intel_gvt_find_mmio_info(gvt
, offset
);
982 u64 ro_mask
= mmio_info
->ro_mask
;
985 if (likely(!ro_mask
))
986 ret
= mmio_info
->write(s
->vgpu
, offset
,
989 gvt_vgpu_err("try to write RO reg %x\n",
997 if (cmdval_new
!= cmdval
)
998 patch_value(s
, cmd_ptr(s
, index
+1), cmdval_new
);
1001 /* only patch cmd. restore vreg value if changed in mmio write handler*/
1005 * In order to let workload with inhibit context to generate
1006 * correct image data into memory, vregs values will be loaded to
1007 * hw via LRIs in the workload with inhibit context. But as
1008 * indirect context is loaded prior to LRIs in workload, we don't
1009 * want reg values specified in indirect context overwritten by
1010 * LRIs in workloads. So, when scanning an indirect context, we
1011 * update reg values in it into vregs, so LRIs in workload with
1012 * inhibit context will restore with correct values
1014 if (GRAPHICS_VER(s
->engine
->i915
) == 9 &&
1015 intel_gvt_mmio_is_sr_in_ctx(gvt
, offset
) &&
1016 !strncmp(cmd
, "lri", 3)) {
1017 intel_gvt_read_gpa(s
->vgpu
,
1018 s
->workload
->ring_context_gpa
+ 12, &ctx_sr_ctl
, 4);
1019 /* check inhibit context */
1020 if (ctx_sr_ctl
& 1) {
1021 u32 data
= cmd_val(s
, index
+ 1);
1023 if (intel_gvt_mmio_has_mode_mask(s
->vgpu
->gvt
, offset
))
1024 intel_vgpu_mask_mmio_write(vgpu
,
1027 vgpu_vreg(vgpu
, offset
) = data
;
1034 #define cmd_reg(s, i) \
1035 (cmd_val(s, i) & GENMASK(22, 2))
1037 #define cmd_reg_inhibit(s, i) \
1038 (cmd_val(s, i) & GENMASK(22, 18))
1040 #define cmd_gma(s, i) \
1041 (cmd_val(s, i) & GENMASK(31, 2))
1043 #define cmd_gma_hi(s, i) \
1044 (cmd_val(s, i) & GENMASK(15, 0))
1046 static int cmd_handler_lri(struct parser_exec_state
*s
)
1049 int cmd_len
= cmd_length(s
);
1051 for (i
= 1; i
< cmd_len
; i
+= 2) {
1052 if (IS_BROADWELL(s
->engine
->i915
) && s
->engine
->id
!= RCS0
) {
1053 if (s
->engine
->id
== BCS0
&&
1054 cmd_reg(s
, i
) == i915_mmio_reg_offset(DERRMR
))
1057 ret
|= cmd_reg_inhibit(s
, i
) ? -EBADRQC
: 0;
1061 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lri");
1068 static int cmd_handler_lrr(struct parser_exec_state
*s
)
1071 int cmd_len
= cmd_length(s
);
1073 for (i
= 1; i
< cmd_len
; i
+= 2) {
1074 if (IS_BROADWELL(s
->engine
->i915
))
1075 ret
|= ((cmd_reg_inhibit(s
, i
) ||
1076 (cmd_reg_inhibit(s
, i
+ 1)))) ?
1080 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrr-src");
1083 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
+ 1), i
, "lrr-dst");
1090 static inline int cmd_address_audit(struct parser_exec_state
*s
,
1091 unsigned long guest_gma
, int op_size
, bool index_mode
);
1093 static int cmd_handler_lrm(struct parser_exec_state
*s
)
1095 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
1096 int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
1099 int cmd_len
= cmd_length(s
);
1101 for (i
= 1; i
< cmd_len
;) {
1102 if (IS_BROADWELL(s
->engine
->i915
))
1103 ret
|= (cmd_reg_inhibit(s
, i
)) ? -EBADRQC
: 0;
1106 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrm");
1109 if (cmd_val(s
, 0) & (1 << 22)) {
1110 gma
= cmd_gma(s
, i
+ 1);
1111 if (gmadr_bytes
== 8)
1112 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
1113 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
1117 i
+= gmadr_dw_number(s
) + 1;
1122 static int cmd_handler_srm(struct parser_exec_state
*s
)
1124 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1127 int cmd_len
= cmd_length(s
);
1129 for (i
= 1; i
< cmd_len
;) {
1130 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "srm");
1133 if (cmd_val(s
, 0) & (1 << 22)) {
1134 gma
= cmd_gma(s
, i
+ 1);
1135 if (gmadr_bytes
== 8)
1136 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
1137 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
1141 i
+= gmadr_dw_number(s
) + 1;
1146 struct cmd_interrupt_event
{
1147 int pipe_control_notify
;
1149 int mi_user_interrupt
;
1152 static const struct cmd_interrupt_event cmd_interrupt_events
[] = {
1154 .pipe_control_notify
= RCS_PIPE_CONTROL
,
1155 .mi_flush_dw
= INTEL_GVT_EVENT_RESERVED
,
1156 .mi_user_interrupt
= RCS_MI_USER_INTERRUPT
,
1159 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1160 .mi_flush_dw
= BCS_MI_FLUSH_DW
,
1161 .mi_user_interrupt
= BCS_MI_USER_INTERRUPT
,
1164 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1165 .mi_flush_dw
= VCS_MI_FLUSH_DW
,
1166 .mi_user_interrupt
= VCS_MI_USER_INTERRUPT
,
1169 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1170 .mi_flush_dw
= VCS2_MI_FLUSH_DW
,
1171 .mi_user_interrupt
= VCS2_MI_USER_INTERRUPT
,
1174 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1175 .mi_flush_dw
= VECS_MI_FLUSH_DW
,
1176 .mi_user_interrupt
= VECS_MI_USER_INTERRUPT
,
1180 static int cmd_handler_pipe_control(struct parser_exec_state
*s
)
1182 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1184 bool index_mode
= false;
1185 unsigned int post_sync
;
1189 post_sync
= (cmd_val(s
, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK
) >> 14;
1192 if (cmd_val(s
, 1) & PIPE_CONTROL_MMIO_WRITE
)
1193 ret
= cmd_reg_handler(s
, cmd_reg(s
, 2), 1, "pipe_ctrl");
1195 else if (post_sync
) {
1197 ret
= cmd_reg_handler(s
, 0x2350, 1, "pipe_ctrl");
1198 else if (post_sync
== 3)
1199 ret
= cmd_reg_handler(s
, 0x2358, 1, "pipe_ctrl");
1200 else if (post_sync
== 1) {
1202 if ((cmd_val(s
, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB
)) {
1203 gma
= cmd_val(s
, 2) & GENMASK(31, 3);
1204 if (gmadr_bytes
== 8)
1205 gma
|= (cmd_gma_hi(s
, 3)) << 32;
1206 /* Store Data Index */
1207 if (cmd_val(s
, 1) & (1 << 21))
1209 ret
|= cmd_address_audit(s
, gma
, sizeof(u64
),
1214 hws_pga
= s
->vgpu
->hws_pga
[s
->engine
->id
];
1215 gma
= hws_pga
+ gma
;
1216 patch_value(s
, cmd_ptr(s
, 2), gma
);
1217 val
= cmd_val(s
, 1) & (~(1 << 21));
1218 patch_value(s
, cmd_ptr(s
, 1), val
);
1227 if (cmd_val(s
, 1) & PIPE_CONTROL_NOTIFY
)
1228 set_bit(cmd_interrupt_events
[s
->engine
->id
].pipe_control_notify
,
1229 s
->workload
->pending_events
);
1233 static int cmd_handler_mi_user_interrupt(struct parser_exec_state
*s
)
1235 set_bit(cmd_interrupt_events
[s
->engine
->id
].mi_user_interrupt
,
1236 s
->workload
->pending_events
);
1237 patch_value(s
, cmd_ptr(s
, 0), MI_NOOP
);
1241 static int cmd_advance_default(struct parser_exec_state
*s
)
1243 return ip_gma_advance(s
, cmd_length(s
));
1246 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state
*s
)
1250 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
1251 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
1252 ret
= ip_gma_set(s
, s
->ret_ip_gma_bb
);
1253 s
->buf_addr_type
= s
->saved_buf_addr_type
;
1254 } else if (s
->buf_type
== RING_BUFFER_CTX
) {
1255 ret
= ip_gma_set(s
, s
->ring_tail
);
1257 s
->buf_type
= RING_BUFFER_INSTRUCTION
;
1258 s
->buf_addr_type
= GTT_BUFFER
;
1259 if (s
->ret_ip_gma_ring
>= s
->ring_start
+ s
->ring_size
)
1260 s
->ret_ip_gma_ring
-= s
->ring_size
;
1261 ret
= ip_gma_set(s
, s
->ret_ip_gma_ring
);
1266 struct mi_display_flip_command_info
{
1270 i915_reg_t stride_reg
;
1271 i915_reg_t ctrl_reg
;
1272 i915_reg_t surf_reg
;
1279 struct plane_code_mapping
{
1285 static int gen8_decode_mi_display_flip(struct parser_exec_state
*s
,
1286 struct mi_display_flip_command_info
*info
)
1288 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1289 struct plane_code_mapping gen8_plane_code
[] = {
1290 [0] = {PIPE_A
, PLANE_A
, PRIMARY_A_FLIP_DONE
},
1291 [1] = {PIPE_B
, PLANE_A
, PRIMARY_B_FLIP_DONE
},
1292 [2] = {PIPE_A
, PLANE_B
, SPRITE_A_FLIP_DONE
},
1293 [3] = {PIPE_B
, PLANE_B
, SPRITE_B_FLIP_DONE
},
1294 [4] = {PIPE_C
, PLANE_A
, PRIMARY_C_FLIP_DONE
},
1295 [5] = {PIPE_C
, PLANE_B
, SPRITE_C_FLIP_DONE
},
1297 u32 dword0
, dword1
, dword2
;
1300 dword0
= cmd_val(s
, 0);
1301 dword1
= cmd_val(s
, 1);
1302 dword2
= cmd_val(s
, 2);
1304 v
= (dword0
& GENMASK(21, 19)) >> 19;
1305 if (drm_WARN_ON(&dev_priv
->drm
, v
>= ARRAY_SIZE(gen8_plane_code
)))
1308 info
->pipe
= gen8_plane_code
[v
].pipe
;
1309 info
->plane
= gen8_plane_code
[v
].plane
;
1310 info
->event
= gen8_plane_code
[v
].event
;
1311 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1312 info
->tile_val
= (dword1
& 0x1);
1313 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1314 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1316 if (info
->plane
== PLANE_A
) {
1317 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1318 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1319 info
->surf_reg
= DSPSURF(info
->pipe
);
1320 } else if (info
->plane
== PLANE_B
) {
1321 info
->ctrl_reg
= SPRCTL(info
->pipe
);
1322 info
->stride_reg
= SPRSTRIDE(info
->pipe
);
1323 info
->surf_reg
= SPRSURF(info
->pipe
);
1325 drm_WARN_ON(&dev_priv
->drm
, 1);
1331 static int skl_decode_mi_display_flip(struct parser_exec_state
*s
,
1332 struct mi_display_flip_command_info
*info
)
1334 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1335 struct intel_vgpu
*vgpu
= s
->vgpu
;
1336 u32 dword0
= cmd_val(s
, 0);
1337 u32 dword1
= cmd_val(s
, 1);
1338 u32 dword2
= cmd_val(s
, 2);
1339 u32 plane
= (dword0
& GENMASK(12, 8)) >> 8;
1341 info
->plane
= PRIMARY_PLANE
;
1344 case MI_DISPLAY_FLIP_SKL_PLANE_1_A
:
1345 info
->pipe
= PIPE_A
;
1346 info
->event
= PRIMARY_A_FLIP_DONE
;
1348 case MI_DISPLAY_FLIP_SKL_PLANE_1_B
:
1349 info
->pipe
= PIPE_B
;
1350 info
->event
= PRIMARY_B_FLIP_DONE
;
1352 case MI_DISPLAY_FLIP_SKL_PLANE_1_C
:
1353 info
->pipe
= PIPE_C
;
1354 info
->event
= PRIMARY_C_FLIP_DONE
;
1357 case MI_DISPLAY_FLIP_SKL_PLANE_2_A
:
1358 info
->pipe
= PIPE_A
;
1359 info
->event
= SPRITE_A_FLIP_DONE
;
1360 info
->plane
= SPRITE_PLANE
;
1362 case MI_DISPLAY_FLIP_SKL_PLANE_2_B
:
1363 info
->pipe
= PIPE_B
;
1364 info
->event
= SPRITE_B_FLIP_DONE
;
1365 info
->plane
= SPRITE_PLANE
;
1367 case MI_DISPLAY_FLIP_SKL_PLANE_2_C
:
1368 info
->pipe
= PIPE_C
;
1369 info
->event
= SPRITE_C_FLIP_DONE
;
1370 info
->plane
= SPRITE_PLANE
;
1374 gvt_vgpu_err("unknown plane code %d\n", plane
);
1378 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1379 info
->tile_val
= (dword1
& GENMASK(2, 0));
1380 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1381 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1383 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1384 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1385 info
->surf_reg
= DSPSURF(info
->pipe
);
1390 static int gen8_check_mi_display_flip(struct parser_exec_state
*s
,
1391 struct mi_display_flip_command_info
*info
)
1395 if (!info
->async_flip
)
1398 if (GRAPHICS_VER(s
->engine
->i915
) >= 9) {
1399 stride
= vgpu_vreg_t(s
->vgpu
, info
->stride_reg
) & GENMASK(9, 0);
1400 tile
= (vgpu_vreg_t(s
->vgpu
, info
->ctrl_reg
) &
1401 GENMASK(12, 10)) >> 10;
1403 stride
= (vgpu_vreg_t(s
->vgpu
, info
->stride_reg
) &
1404 GENMASK(15, 6)) >> 6;
1405 tile
= (vgpu_vreg_t(s
->vgpu
, info
->ctrl_reg
) & (1 << 10)) >> 10;
1408 if (stride
!= info
->stride_val
)
1409 gvt_dbg_cmd("cannot change stride during async flip\n");
1411 if (tile
!= info
->tile_val
)
1412 gvt_dbg_cmd("cannot change tile during async flip\n");
1417 static int gen8_update_plane_mmio_from_mi_display_flip(
1418 struct parser_exec_state
*s
,
1419 struct mi_display_flip_command_info
*info
)
1421 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1422 struct intel_vgpu
*vgpu
= s
->vgpu
;
1424 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->surf_reg
), GENMASK(31, 12),
1425 info
->surf_val
<< 12);
1426 if (GRAPHICS_VER(dev_priv
) >= 9) {
1427 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->stride_reg
), GENMASK(9, 0),
1429 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->ctrl_reg
), GENMASK(12, 10),
1430 info
->tile_val
<< 10);
1432 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->stride_reg
), GENMASK(15, 6),
1433 info
->stride_val
<< 6);
1434 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->ctrl_reg
), GENMASK(10, 10),
1435 info
->tile_val
<< 10);
1438 if (info
->plane
== PLANE_PRIMARY
)
1439 vgpu_vreg_t(vgpu
, PIPE_FLIPCOUNT_G4X(info
->pipe
))++;
1441 if (info
->async_flip
)
1442 intel_vgpu_trigger_virtual_event(vgpu
, info
->event
);
1444 set_bit(info
->event
, vgpu
->irq
.flip_done_event
[info
->pipe
]);
1449 static int decode_mi_display_flip(struct parser_exec_state
*s
,
1450 struct mi_display_flip_command_info
*info
)
1452 if (IS_BROADWELL(s
->engine
->i915
))
1453 return gen8_decode_mi_display_flip(s
, info
);
1454 if (GRAPHICS_VER(s
->engine
->i915
) >= 9)
1455 return skl_decode_mi_display_flip(s
, info
);
1460 static int check_mi_display_flip(struct parser_exec_state
*s
,
1461 struct mi_display_flip_command_info
*info
)
1463 return gen8_check_mi_display_flip(s
, info
);
1466 static int update_plane_mmio_from_mi_display_flip(
1467 struct parser_exec_state
*s
,
1468 struct mi_display_flip_command_info
*info
)
1470 return gen8_update_plane_mmio_from_mi_display_flip(s
, info
);
1473 static int cmd_handler_mi_display_flip(struct parser_exec_state
*s
)
1475 struct mi_display_flip_command_info info
;
1476 struct intel_vgpu
*vgpu
= s
->vgpu
;
1479 int len
= cmd_length(s
);
1480 u32 valid_len
= CMD_LEN(1);
1482 /* Flip Type == Stereo 3D Flip */
1483 if (DWORD_FIELD(2, 1, 0) == 2)
1485 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1490 ret
= decode_mi_display_flip(s
, &info
);
1492 gvt_vgpu_err("fail to decode MI display flip command\n");
1496 ret
= check_mi_display_flip(s
, &info
);
1498 gvt_vgpu_err("invalid MI display flip command\n");
1502 ret
= update_plane_mmio_from_mi_display_flip(s
, &info
);
1504 gvt_vgpu_err("fail to update plane mmio\n");
1508 for (i
= 0; i
< len
; i
++)
1509 patch_value(s
, cmd_ptr(s
, i
), MI_NOOP
);
1513 static bool is_wait_for_flip_pending(u32 cmd
)
1515 return cmd
& (MI_WAIT_FOR_PLANE_A_FLIP_PENDING
|
1516 MI_WAIT_FOR_PLANE_B_FLIP_PENDING
|
1517 MI_WAIT_FOR_PLANE_C_FLIP_PENDING
|
1518 MI_WAIT_FOR_SPRITE_A_FLIP_PENDING
|
1519 MI_WAIT_FOR_SPRITE_B_FLIP_PENDING
|
1520 MI_WAIT_FOR_SPRITE_C_FLIP_PENDING
);
1523 static int cmd_handler_mi_wait_for_event(struct parser_exec_state
*s
)
1525 u32 cmd
= cmd_val(s
, 0);
1527 if (!is_wait_for_flip_pending(cmd
))
1530 patch_value(s
, cmd_ptr(s
, 0), MI_NOOP
);
1534 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state
*s
, int index
)
1537 unsigned long gma_high
, gma_low
;
1538 struct intel_vgpu
*vgpu
= s
->vgpu
;
1539 int gmadr_bytes
= vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1541 if (WARN_ON(gmadr_bytes
!= 4 && gmadr_bytes
!= 8)) {
1542 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes
);
1543 return INTEL_GVT_INVALID_ADDR
;
1546 gma_low
= cmd_val(s
, index
) & BATCH_BUFFER_ADDR_MASK
;
1547 if (gmadr_bytes
== 4) {
1550 gma_high
= cmd_val(s
, index
+ 1) & BATCH_BUFFER_ADDR_HIGH_MASK
;
1551 addr
= (((unsigned long)gma_high
) << 32) | gma_low
;
1556 static inline int cmd_address_audit(struct parser_exec_state
*s
,
1557 unsigned long guest_gma
, int op_size
, bool index_mode
)
1559 struct intel_vgpu
*vgpu
= s
->vgpu
;
1560 u32 max_surface_size
= vgpu
->gvt
->device_info
.max_surface_size
;
1564 if (op_size
> max_surface_size
) {
1565 gvt_vgpu_err("command address audit fail name %s\n",
1571 if (guest_gma
>= I915_GTT_PAGE_SIZE
) {
1575 } else if (!intel_gvt_ggtt_validate_range(vgpu
, guest_gma
, op_size
)) {
1583 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1584 s
->info
->name
, guest_gma
, op_size
);
1586 pr_err("cmd dump: ");
1587 for (i
= 0; i
< cmd_length(s
); i
++) {
1589 pr_err("\n%08x ", cmd_val(s
, i
));
1591 pr_err("%08x ", cmd_val(s
, i
));
1593 pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1595 vgpu_aperture_gmadr_base(vgpu
),
1596 vgpu_aperture_gmadr_end(vgpu
),
1597 vgpu_hidden_gmadr_base(vgpu
),
1598 vgpu_hidden_gmadr_end(vgpu
));
1602 static int cmd_handler_mi_store_data_imm(struct parser_exec_state
*s
)
1604 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1605 int op_size
= (cmd_length(s
) - 3) * sizeof(u32
);
1606 int core_id
= (cmd_val(s
, 2) & (1 << 0)) ? 1 : 0;
1607 unsigned long gma
, gma_low
, gma_high
;
1608 u32 valid_len
= CMD_LEN(2);
1612 if (!(cmd_val(s
, 0) & (1 << 22)))
1615 /* check if QWORD */
1616 if (DWORD_FIELD(0, 21, 21))
1618 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1623 gma
= cmd_val(s
, 2) & GENMASK(31, 2);
1625 if (gmadr_bytes
== 8) {
1626 gma_low
= cmd_val(s
, 1) & GENMASK(31, 2);
1627 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1628 gma
= (gma_high
<< 32) | gma_low
;
1629 core_id
= (cmd_val(s
, 1) & (1 << 0)) ? 1 : 0;
1631 ret
= cmd_address_audit(s
, gma
+ op_size
* core_id
, op_size
, false);
1635 static inline int unexpected_cmd(struct parser_exec_state
*s
)
1637 struct intel_vgpu
*vgpu
= s
->vgpu
;
1639 gvt_vgpu_err("Unexpected %s in command buffer!\n", s
->info
->name
);
1644 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state
*s
)
1646 return unexpected_cmd(s
);
1649 static int cmd_handler_mi_report_perf_count(struct parser_exec_state
*s
)
1651 return unexpected_cmd(s
);
1654 static int cmd_handler_mi_op_2e(struct parser_exec_state
*s
)
1656 return unexpected_cmd(s
);
1659 static int cmd_handler_mi_op_2f(struct parser_exec_state
*s
)
1661 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1662 int op_size
= (1 << ((cmd_val(s
, 0) & GENMASK(20, 19)) >> 19)) *
1664 unsigned long gma
, gma_high
;
1665 u32 valid_len
= CMD_LEN(1);
1668 if (!(cmd_val(s
, 0) & (1 << 22)))
1671 /* check inline data */
1672 if (cmd_val(s
, 0) & BIT(18))
1673 valid_len
= CMD_LEN(9);
1674 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1679 gma
= cmd_val(s
, 1) & GENMASK(31, 2);
1680 if (gmadr_bytes
== 8) {
1681 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1682 gma
= (gma_high
<< 32) | gma
;
1684 ret
= cmd_address_audit(s
, gma
, op_size
, false);
1688 static int cmd_handler_mi_store_data_index(struct parser_exec_state
*s
)
1690 return unexpected_cmd(s
);
1693 static int cmd_handler_mi_clflush(struct parser_exec_state
*s
)
1695 return unexpected_cmd(s
);
1698 static int cmd_handler_mi_conditional_batch_buffer_end(
1699 struct parser_exec_state
*s
)
1701 return unexpected_cmd(s
);
1704 static int cmd_handler_mi_update_gtt(struct parser_exec_state
*s
)
1706 return unexpected_cmd(s
);
1709 static int cmd_handler_mi_flush_dw(struct parser_exec_state
*s
)
1711 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1713 bool index_mode
= false;
1716 u32 valid_len
= CMD_LEN(2);
1718 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1721 /* Check again for Qword */
1722 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1727 /* Check post-sync and ppgtt bit */
1728 if (((cmd_val(s
, 0) >> 14) & 0x3) && (cmd_val(s
, 1) & (1 << 2))) {
1729 gma
= cmd_val(s
, 1) & GENMASK(31, 3);
1730 if (gmadr_bytes
== 8)
1731 gma
|= (cmd_val(s
, 2) & GENMASK(15, 0)) << 32;
1732 /* Store Data Index */
1733 if (cmd_val(s
, 0) & (1 << 21))
1735 ret
= cmd_address_audit(s
, gma
, sizeof(u64
), index_mode
);
1739 hws_pga
= s
->vgpu
->hws_pga
[s
->engine
->id
];
1740 gma
= hws_pga
+ gma
;
1741 patch_value(s
, cmd_ptr(s
, 1), gma
);
1742 val
= cmd_val(s
, 0) & (~(1 << 21));
1743 patch_value(s
, cmd_ptr(s
, 0), val
);
1746 /* Check notify bit */
1747 if ((cmd_val(s
, 0) & (1 << 8)))
1748 set_bit(cmd_interrupt_events
[s
->engine
->id
].mi_flush_dw
,
1749 s
->workload
->pending_events
);
1753 static void addr_type_update_snb(struct parser_exec_state
*s
)
1755 if ((s
->buf_type
== RING_BUFFER_INSTRUCTION
) &&
1756 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s
, 0)) == 1)) {
1757 s
->buf_addr_type
= PPGTT_BUFFER
;
1762 static int copy_gma_to_hva(struct intel_vgpu
*vgpu
, struct intel_vgpu_mm
*mm
,
1763 unsigned long gma
, unsigned long end_gma
, void *va
)
1765 unsigned long copy_len
, offset
;
1766 unsigned long len
= 0;
1769 while (gma
!= end_gma
) {
1770 gpa
= intel_vgpu_gma_to_gpa(mm
, gma
);
1771 if (gpa
== INTEL_GVT_INVALID_ADDR
) {
1772 gvt_vgpu_err("invalid gma address: %lx\n", gma
);
1776 offset
= gma
& (I915_GTT_PAGE_SIZE
- 1);
1778 copy_len
= (end_gma
- gma
) >= (I915_GTT_PAGE_SIZE
- offset
) ?
1779 I915_GTT_PAGE_SIZE
- offset
: end_gma
- gma
;
1781 intel_gvt_read_gpa(vgpu
, gpa
, va
+ len
, copy_len
);
1791 * Check whether a batch buffer needs to be scanned. Currently
1792 * the only criteria is based on privilege.
1794 static int batch_buffer_needs_scan(struct parser_exec_state
*s
)
1796 /* Decide privilege based on address space */
1797 if (cmd_val(s
, 0) & BIT(8) &&
1798 !(s
->vgpu
->scan_nonprivbb
& s
->engine
->mask
))
1804 static const char *repr_addr_type(unsigned int type
)
1806 return type
== PPGTT_BUFFER
? "ppgtt" : "ggtt";
1809 static int find_bb_size(struct parser_exec_state
*s
,
1810 unsigned long *bb_size
,
1811 unsigned long *bb_end_cmd_offset
)
1813 unsigned long gma
= 0;
1814 const struct cmd_info
*info
;
1816 bool bb_end
= false;
1817 struct intel_vgpu
*vgpu
= s
->vgpu
;
1819 struct intel_vgpu_mm
*mm
= (s
->buf_addr_type
== GTT_BUFFER
) ?
1820 s
->vgpu
->gtt
.ggtt_mm
: s
->workload
->shadow_mm
;
1823 *bb_end_cmd_offset
= 0;
1825 /* get the start gm address of the batch buffer */
1826 gma
= get_gma_bb_from_cmd(s
, 1);
1827 if (gma
== INTEL_GVT_INVALID_ADDR
)
1830 cmd
= cmd_val(s
, 0);
1831 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1833 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1834 cmd
, get_opcode(cmd
, s
->engine
),
1835 repr_addr_type(s
->buf_addr_type
),
1836 s
->engine
->name
, s
->workload
);
1840 if (copy_gma_to_hva(s
->vgpu
, mm
,
1841 gma
, gma
+ 4, &cmd
) < 0)
1843 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1845 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1846 cmd
, get_opcode(cmd
, s
->engine
),
1847 repr_addr_type(s
->buf_addr_type
),
1848 s
->engine
->name
, s
->workload
);
1852 if (info
->opcode
== OP_MI_BATCH_BUFFER_END
) {
1854 } else if (info
->opcode
== OP_MI_BATCH_BUFFER_START
) {
1855 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd
) == 0)
1856 /* chained batch buffer */
1861 *bb_end_cmd_offset
= *bb_size
;
1863 cmd_len
= get_cmd_length(info
, cmd
) << 2;
1864 *bb_size
+= cmd_len
;
1871 static int audit_bb_end(struct parser_exec_state
*s
, void *va
)
1873 struct intel_vgpu
*vgpu
= s
->vgpu
;
1874 u32 cmd
= *(u32
*)va
;
1875 const struct cmd_info
*info
;
1877 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1879 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1880 cmd
, get_opcode(cmd
, s
->engine
),
1881 repr_addr_type(s
->buf_addr_type
),
1882 s
->engine
->name
, s
->workload
);
1886 if ((info
->opcode
== OP_MI_BATCH_BUFFER_END
) ||
1887 ((info
->opcode
== OP_MI_BATCH_BUFFER_START
) &&
1888 (BATCH_BUFFER_2ND_LEVEL_BIT(cmd
) == 0)))
1894 static int perform_bb_shadow(struct parser_exec_state
*s
)
1896 struct intel_vgpu
*vgpu
= s
->vgpu
;
1897 struct intel_vgpu_shadow_bb
*bb
;
1898 unsigned long gma
= 0;
1899 unsigned long bb_size
;
1900 unsigned long bb_end_cmd_offset
;
1902 struct intel_vgpu_mm
*mm
= (s
->buf_addr_type
== GTT_BUFFER
) ?
1903 s
->vgpu
->gtt
.ggtt_mm
: s
->workload
->shadow_mm
;
1904 unsigned long start_offset
= 0;
1906 /* get the start gm address of the batch buffer */
1907 gma
= get_gma_bb_from_cmd(s
, 1);
1908 if (gma
== INTEL_GVT_INVALID_ADDR
)
1911 ret
= find_bb_size(s
, &bb_size
, &bb_end_cmd_offset
);
1915 bb
= kzalloc(sizeof(*bb
), GFP_KERNEL
);
1919 bb
->ppgtt
= (s
->buf_addr_type
== GTT_BUFFER
) ? false : true;
1921 /* the start_offset stores the batch buffer's start gma's
1922 * offset relative to page boundary. so for non-privileged batch
1923 * buffer, the shadowed gem object holds exactly the same page
1924 * layout as original gem object. This is for the convience of
1925 * replacing the whole non-privilged batch buffer page to this
1926 * shadowed one in PPGTT at the same gma address. (this replacing
1927 * action is not implemented yet now, but may be necessary in
1929 * for prileged batch buffer, we just change start gma address to
1930 * that of shadowed page.
1933 start_offset
= gma
& ~I915_GTT_PAGE_MASK
;
1935 bb
->obj
= i915_gem_object_create_shmem(s
->engine
->i915
,
1936 round_up(bb_size
+ start_offset
,
1938 if (IS_ERR(bb
->obj
)) {
1939 ret
= PTR_ERR(bb
->obj
);
1943 bb
->va
= i915_gem_object_pin_map(bb
->obj
, I915_MAP_WB
);
1944 if (IS_ERR(bb
->va
)) {
1945 ret
= PTR_ERR(bb
->va
);
1949 ret
= copy_gma_to_hva(s
->vgpu
, mm
,
1951 bb
->va
+ start_offset
);
1953 gvt_vgpu_err("fail to copy guest ring buffer\n");
1958 ret
= audit_bb_end(s
, bb
->va
+ start_offset
+ bb_end_cmd_offset
);
1962 i915_gem_object_unlock(bb
->obj
);
1963 INIT_LIST_HEAD(&bb
->list
);
1964 list_add(&bb
->list
, &s
->workload
->shadow_bb
);
1966 bb
->bb_start_cmd_va
= s
->ip_va
;
1968 if ((s
->buf_type
== BATCH_BUFFER_INSTRUCTION
) && (!s
->is_ctx_wa
))
1969 bb
->bb_offset
= s
->ip_va
- s
->rb_va
;
1974 * ip_va saves the virtual address of the shadow batch buffer, while
1975 * ip_gma saves the graphics address of the original batch buffer.
1976 * As the shadow batch buffer is just a copy from the originial one,
1977 * it should be right to use shadow batch buffer'va and original batch
1978 * buffer's gma in pair. After all, we don't want to pin the shadow
1979 * buffer here (too early).
1981 s
->ip_va
= bb
->va
+ start_offset
;
1985 i915_gem_object_unpin_map(bb
->obj
);
1987 i915_gem_object_put(bb
->obj
);
1993 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state
*s
)
1997 struct intel_vgpu
*vgpu
= s
->vgpu
;
1999 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
2000 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
2004 second_level
= BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s
, 0)) == 1;
2005 if (second_level
&& (s
->buf_type
!= BATCH_BUFFER_INSTRUCTION
)) {
2006 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
2010 s
->saved_buf_addr_type
= s
->buf_addr_type
;
2011 addr_type_update_snb(s
);
2012 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
2013 s
->ret_ip_gma_ring
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
2014 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
2015 } else if (second_level
) {
2016 s
->buf_type
= BATCH_BUFFER_2ND_LEVEL
;
2017 s
->ret_ip_gma_bb
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
2018 s
->ret_bb_va
= s
->ip_va
+ cmd_length(s
) * sizeof(u32
);
2021 if (batch_buffer_needs_scan(s
)) {
2022 ret
= perform_bb_shadow(s
);
2024 gvt_vgpu_err("invalid shadow batch buffer\n");
2026 /* emulate a batch buffer end to do return right */
2027 ret
= cmd_handler_mi_batch_buffer_end(s
);
2034 static int mi_noop_index
;
2036 static const struct cmd_info cmd_info
[] = {
2037 {"MI_NOOP", OP_MI_NOOP
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2039 {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE
, F_LEN_CONST
, R_ALL
, D_ALL
,
2042 {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT
, F_LEN_CONST
, R_ALL
, D_ALL
,
2043 0, 1, cmd_handler_mi_user_interrupt
},
2045 {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT
, F_LEN_CONST
, R_RCS
| R_BCS
,
2046 D_ALL
, 0, 1, cmd_handler_mi_wait_for_event
},
2048 {"MI_FLUSH", OP_MI_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2050 {"MI_ARB_CHECK", OP_MI_ARB_CHECK
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2053 {"MI_RS_CONTROL", OP_MI_RS_CONTROL
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2056 {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2059 {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2062 {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC
, F_LEN_CONST
, R_RCS
,
2065 {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END
,
2066 F_IP_ADVANCE_CUSTOM
| F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2067 cmd_handler_mi_batch_buffer_end
},
2069 {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
,
2072 {"MI_PREDICATE", OP_MI_PREDICATE
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2075 {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER
, F_LEN_CONST
, R_ALL
,
2078 {"MI_SET_APPID", OP_MI_SET_APPID
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2081 {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2084 {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP
, F_LEN_VAR
,
2085 R_RCS
| R_BCS
, D_ALL
, 0, 8, cmd_handler_mi_display_flip
},
2087 {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX
, F_LEN_VAR
| F_LEN_VAR_FIXED
,
2088 R_ALL
, D_ALL
, 0, 8, NULL
, CMD_LEN(1)},
2090 {"MI_MATH", OP_MI_MATH
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 8, NULL
},
2092 {"MI_URB_CLEAR", OP_MI_URB_CLEAR
, F_LEN_VAR
| F_LEN_VAR_FIXED
, R_RCS
,
2093 D_ALL
, 0, 8, NULL
, CMD_LEN(0)},
2095 {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL
,
2096 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
, 0, 8,
2099 {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT
,
2100 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_1(2),
2101 8, cmd_handler_mi_semaphore_wait
, CMD_LEN(2)},
2103 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
,
2104 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm
},
2106 {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX
, F_LEN_VAR
, R_ALL
, D_ALL
,
2107 0, 8, cmd_handler_mi_store_data_index
},
2109 {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM
, F_LEN_VAR
, R_ALL
,
2110 D_ALL
, 0, 8, cmd_handler_lri
},
2112 {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, 0, 10,
2113 cmd_handler_mi_update_gtt
},
2115 {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM
,
2116 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2117 cmd_handler_srm
, CMD_LEN(2)},
2119 {"MI_FLUSH_DW", OP_MI_FLUSH_DW
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 6,
2120 cmd_handler_mi_flush_dw
},
2122 {"MI_CLFLUSH", OP_MI_CLFLUSH
, F_LEN_VAR
, R_ALL
, D_ALL
, ADDR_FIX_1(1),
2123 10, cmd_handler_mi_clflush
},
2125 {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT
,
2126 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(1), 6,
2127 cmd_handler_mi_report_perf_count
, CMD_LEN(2)},
2129 {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM
,
2130 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2131 cmd_handler_lrm
, CMD_LEN(2)},
2133 {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG
,
2134 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, 0, 8,
2135 cmd_handler_lrr
, CMD_LEN(1)},
2137 {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM
,
2138 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_RCS
, D_ALL
, 0,
2139 8, NULL
, CMD_LEN(2)},
2141 {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM
, F_LEN_VAR
| F_LEN_VAR_FIXED
,
2142 R_RCS
, D_ALL
, ADDR_FIX_1(2), 8, NULL
, CMD_LEN(2)},
2144 {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM
, F_LEN_VAR
, R_RCS
, D_ALL
,
2145 ADDR_FIX_1(2), 8, NULL
},
2147 {"MI_OP_2E", OP_MI_2E
, F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
,
2148 ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e
, CMD_LEN(3)},
2150 {"MI_OP_2F", OP_MI_2F
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_1(1),
2151 8, cmd_handler_mi_op_2f
},
2153 {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START
,
2154 F_IP_ADVANCE_CUSTOM
, R_ALL
, D_ALL
, 0, 8,
2155 cmd_handler_mi_batch_buffer_start
},
2157 {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END
,
2158 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2159 cmd_handler_mi_conditional_batch_buffer_end
, CMD_LEN(2)},
2161 {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL
, F_LEN_CONST
,
2162 R_RCS
| R_BCS
, D_ALL
, 0, 2, NULL
},
2164 {"XY_SETUP_BLT", OP_XY_SETUP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2165 ADDR_FIX_2(4, 7), 8, NULL
},
2167 {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2170 {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT
,
2171 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2173 {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
2175 {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2178 {"XY_TEXT_BLT", OP_XY_TEXT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2179 ADDR_FIX_1(3), 8, NULL
},
2181 {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT
, F_LEN_VAR
, R_BCS
,
2184 {"XY_COLOR_BLT", OP_XY_COLOR_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2185 ADDR_FIX_1(4), 8, NULL
},
2187 {"XY_PAT_BLT", OP_XY_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2188 ADDR_FIX_2(4, 5), 8, NULL
},
2190 {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2191 ADDR_FIX_1(4), 8, NULL
},
2193 {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2194 ADDR_FIX_2(4, 7), 8, NULL
},
2196 {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
,
2197 D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2199 {"XY_FULL_BLT", OP_XY_FULL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
2201 {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT
, F_LEN_VAR
, R_BCS
,
2202 D_ALL
, ADDR_FIX_3(4, 5, 8), 8, NULL
},
2204 {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT
, F_LEN_VAR
,
2205 R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2207 {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2208 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT
,
2209 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2211 {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT
, F_LEN_VAR
, R_BCS
,
2212 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2214 {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT
,
2215 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2217 {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE
, F_LEN_VAR
, R_BCS
,
2218 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2220 {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT
, F_LEN_VAR
, R_BCS
,
2221 D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2223 {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT
,
2224 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2226 {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2227 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT
,
2228 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2230 {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2231 ADDR_FIX_2(4, 5), 8, NULL
},
2233 {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE
,
2234 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2236 {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2237 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
2238 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2240 {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2241 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
2242 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2244 {"3DSTATE_BLEND_STATE_POINTERS",
2245 OP_3DSTATE_BLEND_STATE_POINTERS
,
2246 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2248 {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2249 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS
,
2250 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2252 {"3DSTATE_BINDING_TABLE_POINTERS_VS",
2253 OP_3DSTATE_BINDING_TABLE_POINTERS_VS
,
2254 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2256 {"3DSTATE_BINDING_TABLE_POINTERS_HS",
2257 OP_3DSTATE_BINDING_TABLE_POINTERS_HS
,
2258 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2260 {"3DSTATE_BINDING_TABLE_POINTERS_DS",
2261 OP_3DSTATE_BINDING_TABLE_POINTERS_DS
,
2262 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2264 {"3DSTATE_BINDING_TABLE_POINTERS_GS",
2265 OP_3DSTATE_BINDING_TABLE_POINTERS_GS
,
2266 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2268 {"3DSTATE_BINDING_TABLE_POINTERS_PS",
2269 OP_3DSTATE_BINDING_TABLE_POINTERS_PS
,
2270 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2272 {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2273 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
2274 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2276 {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2277 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS
,
2278 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2280 {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2281 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS
,
2282 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2284 {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2285 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS
,
2286 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2288 {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2289 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS
,
2290 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2292 {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2295 {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2298 {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2301 {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2304 {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS
,
2305 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2307 {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS
,
2308 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2310 {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS
,
2311 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2313 {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS
,
2314 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2316 {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS
,
2317 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2319 {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS
,
2320 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
2322 {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS
,
2323 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
2325 {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS
,
2326 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2328 {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS
,
2329 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2331 {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS
,
2332 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2334 {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS
,
2335 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2337 {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS
,
2338 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2340 {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS
,
2341 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2343 {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS
,
2344 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2346 {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS
,
2347 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2349 {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS
,
2350 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2352 {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS
,
2353 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2355 {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS
,
2356 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2358 {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS
,
2359 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2361 {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS
,
2362 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2364 {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING
, F_LEN_VAR
, R_RCS
,
2365 D_BDW_PLUS
, 0, 8, NULL
},
2367 {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2370 {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY
, F_LEN_VAR
, R_RCS
,
2371 D_BDW_PLUS
, 0, 8, NULL
},
2373 {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY
, F_LEN_VAR
, R_RCS
,
2374 D_BDW_PLUS
, 0, 8, NULL
},
2376 {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2379 {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL
, F_LEN_VAR
,
2380 R_RCS
, D_BDW_PLUS
, 0, 8, NULL
},
2382 {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2385 {"3DSTATE_RASTER", OP_3DSTATE_RASTER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2388 {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2391 {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2394 {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS
, F_LEN_VAR
, R_RCS
,
2395 D_BDW_PLUS
, 0, 8, NULL
},
2397 {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS
, F_LEN_VAR
,
2398 R_RCS
, D_ALL
, 0, 8, NULL
},
2400 {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER
, F_LEN_VAR
, R_RCS
,
2401 D_BDW_PLUS
, ADDR_FIX_1(2), 8, NULL
},
2403 {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS
, F_LEN_CONST
,
2404 R_RCS
, D_ALL
, 0, 1, NULL
},
2406 {"3DSTATE_VF", OP_3DSTATE_VF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2408 {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS
, F_LEN_VAR
,
2409 R_RCS
, D_ALL
, 0, 8, NULL
},
2411 {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS
,
2412 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2414 {"3DSTATE_GS", OP_3DSTATE_GS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2416 {"3DSTATE_CLIP", OP_3DSTATE_CLIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2418 {"3DSTATE_WM", OP_3DSTATE_WM
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2420 {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS
, F_LEN_VAR
, R_RCS
,
2421 D_BDW_PLUS
, 0, 8, NULL
},
2423 {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS
, F_LEN_VAR
, R_RCS
,
2424 D_BDW_PLUS
, 0, 8, NULL
},
2426 {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK
, F_LEN_VAR
, R_RCS
,
2429 {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS
, F_LEN_VAR
, R_RCS
,
2430 D_BDW_PLUS
, 0, 8, NULL
},
2432 {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS
, F_LEN_VAR
, R_RCS
,
2433 D_BDW_PLUS
, 0, 8, NULL
},
2435 {"3DSTATE_HS", OP_3DSTATE_HS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2437 {"3DSTATE_TE", OP_3DSTATE_TE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2439 {"3DSTATE_DS", OP_3DSTATE_DS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2441 {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT
, F_LEN_VAR
, R_RCS
,
2444 {"3DSTATE_SBE", OP_3DSTATE_SBE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2446 {"3DSTATE_PS", OP_3DSTATE_PS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2448 {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE
, F_LEN_VAR
,
2449 R_RCS
, D_ALL
, 0, 8, NULL
},
2451 {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0
,
2452 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2454 {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY
, F_LEN_VAR
, R_RCS
, D_ALL
,
2457 {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER
, F_LEN_VAR
, R_RCS
,
2458 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2460 {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET
,
2461 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2463 {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN
,
2464 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2466 {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE
, F_LEN_VAR
, R_RCS
,
2469 {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS
, F_LEN_VAR
, R_RCS
,
2472 {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX
, F_LEN_VAR
, R_RCS
,
2475 {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1
,
2476 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2478 {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW
, F_LEN_VAR
, R_RCS
,
2479 D_BDW_PLUS
, 0, 8, NULL
},
2481 {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER
, F_LEN_VAR
, R_RCS
,
2482 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2484 {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER
, F_LEN_VAR
,
2485 R_RCS
, D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2487 {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS
, F_LEN_VAR
,
2488 R_RCS
, D_ALL
, 0, 8, NULL
},
2490 {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
2491 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2493 {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS
,
2494 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2496 {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS
,
2497 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2499 {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
2500 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2502 {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
2503 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2505 {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE
, F_LEN_VAR
,
2506 R_RCS
, D_ALL
, 0, 8, NULL
},
2508 {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST
, F_LEN_VAR
, R_RCS
,
2511 {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2512 ADDR_FIX_2(2, 4), 8, NULL
},
2514 {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2515 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC
,
2516 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2518 {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC
,
2519 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2521 {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2522 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC
,
2523 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2525 {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN
, F_LEN_VAR
, R_RCS
,
2526 D_BDW_PLUS
, 0, 8, NULL
},
2528 {"PIPE_CONTROL", OP_PIPE_CONTROL
, F_LEN_VAR
, R_RCS
, D_ALL
,
2529 ADDR_FIX_1(2), 8, cmd_handler_pipe_control
},
2531 {"3DPRIMITIVE", OP_3DPRIMITIVE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2533 {"PIPELINE_SELECT", OP_PIPELINE_SELECT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0,
2536 {"STATE_PREFETCH", OP_STATE_PREFETCH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2537 ADDR_FIX_1(1), 8, NULL
},
2539 {"STATE_SIP", OP_STATE_SIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2541 {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2542 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL
},
2544 {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4
, F_LEN_VAR
, R_RCS
, D_ALL
,
2545 ADDR_FIX_1(1), 8, NULL
},
2547 {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS
,
2548 F_LEN_VAR
, R_RCS
, D_ALL
, ADDR_FIX_2(1, 2), 3, NULL
},
2550 {"3DSTATE_VS", OP_3DSTATE_VS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2552 {"3DSTATE_SF", OP_3DSTATE_SF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2554 {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2557 {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING
, F_LEN_VAR
, R_RCS
,
2558 D_SKL_PLUS
, 0, 8, NULL
},
2560 {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD
,
2561 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2563 {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
,
2566 {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2569 {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
,
2572 {"MEDIA_OBJECT", OP_MEDIA_OBJECT
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2574 {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD
, F_LEN_VAR
, R_RCS
, D_ALL
,
2577 {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT
, F_LEN_VAR
, R_RCS
, D_ALL
,
2580 {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2583 {"GPGPU_WALKER", OP_GPGPU_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2586 {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16,
2589 {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45
,
2590 F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2592 {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT
, F_LEN_VAR
,
2593 R_VCS
, D_ALL
, 0, 12, NULL
},
2595 {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE
, F_LEN_VAR
,
2596 R_VCS
, D_ALL
, 0, 12, NULL
},
2598 {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE
, F_LEN_VAR
,
2599 R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2601 {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE
,
2602 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2604 {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE
,
2605 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, ADDR_FIX_3(1, 3, 5), 12, NULL
},
2607 {"OP_2_0_0_5", OP_2_0_0_5
, F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2609 {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER
, F_LEN_VAR
,
2610 R_VCS
, D_ALL
, 0, 12, NULL
},
2612 {"MFX_QM_STATE", OP_MFX_QM_STATE
, F_LEN_VAR
,
2613 R_VCS
, D_ALL
, 0, 12, NULL
},
2615 {"MFX_FQM_STATE", OP_MFX_FQM_STATE
, F_LEN_VAR
,
2616 R_VCS
, D_ALL
, 0, 12, NULL
},
2618 {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT
, F_LEN_VAR
,
2619 R_VCS
, D_ALL
, 0, 12, NULL
},
2621 {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT
, F_LEN_VAR
,
2622 R_VCS
, D_ALL
, 0, 12, NULL
},
2624 {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT
, F_LEN_VAR
,
2625 R_VCS
, D_ALL
, 0, 12, NULL
},
2627 {"MFX_WAIT", OP_MFX_WAIT
, F_LEN_VAR
,
2628 R_VCS
, D_ALL
, 0, 6, NULL
},
2630 {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE
, F_LEN_VAR
,
2631 R_VCS
, D_ALL
, 0, 12, NULL
},
2633 {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE
, F_LEN_VAR
,
2634 R_VCS
, D_ALL
, 0, 12, NULL
},
2636 {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE
, F_LEN_VAR
,
2637 R_VCS
, D_ALL
, 0, 12, NULL
},
2639 {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE
, F_LEN_VAR
,
2640 R_VCS
, D_ALL
, 0, 12, NULL
},
2642 {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE
, F_LEN_VAR
,
2643 R_VCS
, D_ALL
, 0, 12, NULL
},
2645 {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE
, F_LEN_VAR
,
2646 R_VCS
, D_ALL
, 0, 12, NULL
},
2648 {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE
, F_LEN_VAR
,
2649 R_VCS
, D_ALL
, 0, 12, NULL
},
2650 {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE
, F_LEN_VAR
,
2651 R_VCS
, D_ALL
, 0, 12, NULL
},
2653 {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT
, F_LEN_VAR
,
2654 R_VCS
, D_ALL
, 0, 12, NULL
},
2656 {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR
, F_LEN_VAR
,
2657 R_VCS
, D_ALL
, ADDR_FIX_1(2), 12, NULL
},
2659 {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT
, F_LEN_VAR
,
2660 R_VCS
, D_ALL
, 0, 12, NULL
},
2662 {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE
, F_LEN_VAR
,
2663 R_VCS
, D_ALL
, 0, 12, NULL
},
2665 {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE
, F_LEN_VAR
,
2666 R_VCS
, D_ALL
, 0, 12, NULL
},
2668 {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE
, F_LEN_VAR
,
2669 R_VCS
, D_ALL
, 0, 12, NULL
},
2671 {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE
, F_LEN_VAR
,
2672 R_VCS
, D_ALL
, 0, 12, NULL
},
2674 {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT
, F_LEN_VAR
,
2675 R_VCS
, D_ALL
, 0, 12, NULL
},
2677 {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE
, F_LEN_VAR
,
2678 R_VCS
, D_ALL
, 0, 12, NULL
},
2680 {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT
, F_LEN_VAR
,
2681 R_VCS
, D_ALL
, 0, 12, NULL
},
2683 {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE
, F_LEN_VAR
,
2684 R_VCS
, D_ALL
, 0, 12, NULL
},
2686 {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE
, F_LEN_VAR
,
2687 R_VCS
, D_ALL
, 0, 12, NULL
},
2689 {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT
, F_LEN_VAR
,
2690 R_VCS
, D_ALL
, 0, 12, NULL
},
2692 {"MFX_2_6_0_0", OP_MFX_2_6_0_0
, F_LEN_VAR
, R_VCS
, D_ALL
,
2695 {"MFX_2_6_0_9", OP_MFX_2_6_0_9
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2697 {"MFX_2_6_0_8", OP_MFX_2_6_0_8
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2699 {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE
, F_LEN_VAR
,
2700 R_VCS
, D_ALL
, 0, 12, NULL
},
2702 {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE
, F_LEN_VAR
,
2703 R_VCS
, D_ALL
, 0, 12, NULL
},
2705 {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT
, F_LEN_VAR
,
2706 R_VCS
, D_ALL
, 0, 12, NULL
},
2708 {"VEBOX_STATE", OP_VEB_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
, 0, 12, NULL
},
2710 {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
,
2713 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE
, F_LEN_VAR
, R_VECS
, D_BDW_PLUS
,
2717 static void add_cmd_entry(struct intel_gvt
*gvt
, struct cmd_entry
*e
)
2719 hash_add(gvt
->cmd_table
, &e
->hlist
, e
->info
->opcode
);
2722 /* call the cmd handler, and advance ip */
2723 static int cmd_parser_exec(struct parser_exec_state
*s
)
2725 struct intel_vgpu
*vgpu
= s
->vgpu
;
2726 const struct cmd_info
*info
;
2730 cmd
= cmd_val(s
, 0);
2732 /* fastpath for MI_NOOP */
2734 info
= &cmd_info
[mi_noop_index
];
2736 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
2739 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
2740 cmd
, get_opcode(cmd
, s
->engine
),
2741 repr_addr_type(s
->buf_addr_type
),
2742 s
->engine
->name
, s
->workload
);
2748 trace_gvt_command(vgpu
->id
, s
->engine
->id
, s
->ip_gma
, s
->ip_va
,
2749 cmd_length(s
), s
->buf_type
, s
->buf_addr_type
,
2750 s
->workload
, info
->name
);
2752 if ((info
->flag
& F_LEN_MASK
) == F_LEN_VAR_FIXED
) {
2753 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
2759 if (info
->handler
) {
2760 ret
= info
->handler(s
);
2762 gvt_vgpu_err("%s handler error\n", info
->name
);
2767 if (!(info
->flag
& F_IP_ADVANCE_CUSTOM
)) {
2768 ret
= cmd_advance_default(s
);
2770 gvt_vgpu_err("%s IP advance error\n", info
->name
);
2777 static inline bool gma_out_of_range(unsigned long gma
,
2778 unsigned long gma_head
, unsigned int gma_tail
)
2780 if (gma_tail
>= gma_head
)
2781 return (gma
< gma_head
) || (gma
> gma_tail
);
2783 return (gma
> gma_tail
) && (gma
< gma_head
);
2786 /* Keep the consistent return type, e.g EBADRQC for unknown
2787 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2788 * works as the input of VM healthy status.
2790 static int command_scan(struct parser_exec_state
*s
,
2791 unsigned long rb_head
, unsigned long rb_tail
,
2792 unsigned long rb_start
, unsigned long rb_len
)
2795 unsigned long gma_head
, gma_tail
, gma_bottom
;
2797 struct intel_vgpu
*vgpu
= s
->vgpu
;
2799 gma_head
= rb_start
+ rb_head
;
2800 gma_tail
= rb_start
+ rb_tail
;
2801 gma_bottom
= rb_start
+ rb_len
;
2803 while (s
->ip_gma
!= gma_tail
) {
2804 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
||
2805 s
->buf_type
== RING_BUFFER_CTX
) {
2806 if (!(s
->ip_gma
>= rb_start
) ||
2807 !(s
->ip_gma
< gma_bottom
)) {
2808 gvt_vgpu_err("ip_gma %lx out of ring scope."
2809 "(base:0x%lx, bottom: 0x%lx)\n",
2810 s
->ip_gma
, rb_start
,
2812 parser_exec_state_dump(s
);
2815 if (gma_out_of_range(s
->ip_gma
, gma_head
, gma_tail
)) {
2816 gvt_vgpu_err("ip_gma %lx out of range."
2817 "base 0x%lx head 0x%lx tail 0x%lx\n",
2818 s
->ip_gma
, rb_start
,
2820 parser_exec_state_dump(s
);
2824 ret
= cmd_parser_exec(s
);
2826 gvt_vgpu_err("cmd parser error\n");
2827 parser_exec_state_dump(s
);
2835 static int scan_workload(struct intel_vgpu_workload
*workload
)
2837 unsigned long gma_head
, gma_tail
;
2838 struct parser_exec_state s
;
2841 /* ring base is page aligned */
2842 if (WARN_ON(!IS_ALIGNED(workload
->rb_start
, I915_GTT_PAGE_SIZE
)))
2845 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2846 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2848 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2849 s
.buf_addr_type
= GTT_BUFFER
;
2850 s
.vgpu
= workload
->vgpu
;
2851 s
.engine
= workload
->engine
;
2852 s
.ring_start
= workload
->rb_start
;
2853 s
.ring_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2854 s
.ring_head
= gma_head
;
2855 s
.ring_tail
= gma_tail
;
2856 s
.rb_va
= workload
->shadow_ring_buffer_va
;
2857 s
.workload
= workload
;
2858 s
.is_ctx_wa
= false;
2860 if (bypass_scan_mask
& workload
->engine
->mask
|| gma_head
== gma_tail
)
2863 ret
= ip_gma_set(&s
, gma_head
);
2867 ret
= command_scan(&s
, workload
->rb_head
, workload
->rb_tail
,
2868 workload
->rb_start
, _RING_CTL_BUF_SIZE(workload
->rb_ctl
));
2874 static int scan_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2877 unsigned long gma_head
, gma_tail
, ring_size
, ring_tail
;
2878 struct parser_exec_state s
;
2880 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
2881 struct intel_vgpu_workload
,
2884 /* ring base is page aligned */
2885 if (WARN_ON(!IS_ALIGNED(wa_ctx
->indirect_ctx
.guest_gma
,
2886 I915_GTT_PAGE_SIZE
)))
2889 ring_tail
= wa_ctx
->indirect_ctx
.size
+ 3 * sizeof(u32
);
2890 ring_size
= round_up(wa_ctx
->indirect_ctx
.size
+ CACHELINE_BYTES
,
2892 gma_head
= wa_ctx
->indirect_ctx
.guest_gma
;
2893 gma_tail
= wa_ctx
->indirect_ctx
.guest_gma
+ ring_tail
;
2895 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2896 s
.buf_addr_type
= GTT_BUFFER
;
2897 s
.vgpu
= workload
->vgpu
;
2898 s
.engine
= workload
->engine
;
2899 s
.ring_start
= wa_ctx
->indirect_ctx
.guest_gma
;
2900 s
.ring_size
= ring_size
;
2901 s
.ring_head
= gma_head
;
2902 s
.ring_tail
= gma_tail
;
2903 s
.rb_va
= wa_ctx
->indirect_ctx
.shadow_va
;
2904 s
.workload
= workload
;
2907 ret
= ip_gma_set(&s
, gma_head
);
2911 ret
= command_scan(&s
, 0, ring_tail
,
2912 wa_ctx
->indirect_ctx
.guest_gma
, ring_size
);
2917 static int shadow_workload_ring_buffer(struct intel_vgpu_workload
*workload
)
2919 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2920 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
2921 unsigned long gma_head
, gma_tail
, gma_top
, guest_rb_size
;
2922 void *shadow_ring_buffer_va
;
2925 guest_rb_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2927 /* calculate workload ring buffer size */
2928 workload
->rb_len
= (workload
->rb_tail
+ guest_rb_size
-
2929 workload
->rb_head
) % guest_rb_size
;
2931 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2932 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2933 gma_top
= workload
->rb_start
+ guest_rb_size
;
2935 if (workload
->rb_len
> s
->ring_scan_buffer_size
[workload
->engine
->id
]) {
2938 /* realloc the new ring buffer if needed */
2939 p
= krealloc(s
->ring_scan_buffer
[workload
->engine
->id
],
2940 workload
->rb_len
, GFP_KERNEL
);
2942 gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2945 s
->ring_scan_buffer
[workload
->engine
->id
] = p
;
2946 s
->ring_scan_buffer_size
[workload
->engine
->id
] = workload
->rb_len
;
2949 shadow_ring_buffer_va
= s
->ring_scan_buffer
[workload
->engine
->id
];
2951 /* get shadow ring buffer va */
2952 workload
->shadow_ring_buffer_va
= shadow_ring_buffer_va
;
2954 /* head > tail --> copy head <-> top */
2955 if (gma_head
> gma_tail
) {
2956 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
,
2957 gma_head
, gma_top
, shadow_ring_buffer_va
);
2959 gvt_vgpu_err("fail to copy guest ring buffer\n");
2962 shadow_ring_buffer_va
+= ret
;
2963 gma_head
= workload
->rb_start
;
2966 /* copy head or start <-> tail */
2967 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
, gma_head
, gma_tail
,
2968 shadow_ring_buffer_va
);
2970 gvt_vgpu_err("fail to copy guest ring buffer\n");
2976 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload
*workload
)
2979 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2981 ret
= shadow_workload_ring_buffer(workload
);
2983 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2987 ret
= scan_workload(workload
);
2989 gvt_vgpu_err("scan workload error\n");
2995 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2997 int ctx_size
= wa_ctx
->indirect_ctx
.size
;
2998 unsigned long guest_gma
= wa_ctx
->indirect_ctx
.guest_gma
;
2999 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
3000 struct intel_vgpu_workload
,
3002 struct intel_vgpu
*vgpu
= workload
->vgpu
;
3003 struct drm_i915_gem_object
*obj
;
3007 obj
= i915_gem_object_create_shmem(workload
->engine
->i915
,
3008 roundup(ctx_size
+ CACHELINE_BYTES
,
3011 return PTR_ERR(obj
);
3013 /* get the va of the shadow batch buffer */
3014 map
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
3016 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
3021 i915_gem_object_lock(obj
, NULL
);
3022 ret
= i915_gem_object_set_to_cpu_domain(obj
, false);
3023 i915_gem_object_unlock(obj
);
3025 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
3029 ret
= copy_gma_to_hva(workload
->vgpu
,
3030 workload
->vgpu
->gtt
.ggtt_mm
,
3031 guest_gma
, guest_gma
+ ctx_size
,
3034 gvt_vgpu_err("fail to copy guest indirect ctx\n");
3038 wa_ctx
->indirect_ctx
.obj
= obj
;
3039 wa_ctx
->indirect_ctx
.shadow_va
= map
;
3043 i915_gem_object_unpin_map(obj
);
3045 i915_gem_object_put(obj
);
3049 static int combine_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
3051 u32 per_ctx_start
[CACHELINE_DWORDS
] = {};
3052 unsigned char *bb_start_sva
;
3054 if (!wa_ctx
->per_ctx
.valid
)
3057 per_ctx_start
[0] = 0x18800001;
3058 per_ctx_start
[1] = wa_ctx
->per_ctx
.guest_gma
;
3060 bb_start_sva
= (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
3061 wa_ctx
->indirect_ctx
.size
;
3063 memcpy(bb_start_sva
, per_ctx_start
, CACHELINE_BYTES
);
3068 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
3071 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
3072 struct intel_vgpu_workload
,
3074 struct intel_vgpu
*vgpu
= workload
->vgpu
;
3076 if (wa_ctx
->indirect_ctx
.size
== 0)
3079 ret
= shadow_indirect_ctx(wa_ctx
);
3081 gvt_vgpu_err("fail to shadow indirect ctx\n");
3085 combine_wa_ctx(wa_ctx
);
3087 ret
= scan_wa_ctx(wa_ctx
);
3089 gvt_vgpu_err("scan wa ctx error\n");
3096 /* generate dummy contexts by sending empty requests to HW, and let
3097 * the HW to fill Engine Contexts. This dummy contexts are used for
3098 * initialization purpose (update reg whitelist), so referred to as
3101 void intel_gvt_update_reg_whitelist(struct intel_vgpu
*vgpu
)
3103 const unsigned long start
= LRC_STATE_PN
* PAGE_SIZE
;
3104 struct intel_gvt
*gvt
= vgpu
->gvt
;
3105 struct intel_engine_cs
*engine
;
3106 enum intel_engine_id id
;
3108 if (gvt
->is_reg_whitelist_updated
)
3111 /* scan init ctx to update cmd accessible list */
3112 for_each_engine(engine
, gvt
->gt
, id
) {
3113 struct parser_exec_state s
;
3117 if (!engine
->default_state
)
3120 vaddr
= shmem_pin_map(engine
->default_state
);
3122 gvt_err("failed to map %s->default state\n",
3127 s
.buf_type
= RING_BUFFER_CTX
;
3128 s
.buf_addr_type
= GTT_BUFFER
;
3132 s
.ring_size
= engine
->context_size
- start
;
3134 s
.ring_tail
= s
.ring_size
;
3135 s
.rb_va
= vaddr
+ start
;
3137 s
.is_ctx_wa
= false;
3138 s
.is_init_ctx
= true;
3140 /* skipping the first RING_CTX_SIZE(0x50) dwords */
3141 ret
= ip_gma_set(&s
, RING_CTX_SIZE
);
3143 ret
= command_scan(&s
, 0, s
.ring_size
, 0, s
.ring_size
);
3145 gvt_err("Scan init ctx error\n");
3148 shmem_unpin_map(engine
->default_state
, vaddr
);
3153 gvt
->is_reg_whitelist_updated
= true;
3156 int intel_gvt_scan_engine_context(struct intel_vgpu_workload
*workload
)
3158 struct intel_vgpu
*vgpu
= workload
->vgpu
;
3159 unsigned long gma_head
, gma_tail
, gma_start
, ctx_size
;
3160 struct parser_exec_state s
;
3161 int ring_id
= workload
->engine
->id
;
3162 struct intel_context
*ce
= vgpu
->submission
.shadow
[ring_id
];
3165 GEM_BUG_ON(atomic_read(&ce
->pin_count
) < 0);
3167 ctx_size
= workload
->engine
->context_size
- PAGE_SIZE
;
3169 /* Only ring contxt is loaded to HW for inhibit context, no need to
3170 * scan engine context
3172 if (is_inhibit_context(ce
))
3175 gma_start
= i915_ggtt_offset(ce
->state
) + LRC_STATE_PN
*PAGE_SIZE
;
3177 gma_tail
= ctx_size
;
3179 s
.buf_type
= RING_BUFFER_CTX
;
3180 s
.buf_addr_type
= GTT_BUFFER
;
3181 s
.vgpu
= workload
->vgpu
;
3182 s
.engine
= workload
->engine
;
3183 s
.ring_start
= gma_start
;
3184 s
.ring_size
= ctx_size
;
3185 s
.ring_head
= gma_start
+ gma_head
;
3186 s
.ring_tail
= gma_start
+ gma_tail
;
3187 s
.rb_va
= ce
->lrc_reg_state
;
3188 s
.workload
= workload
;
3189 s
.is_ctx_wa
= false;
3190 s
.is_init_ctx
= false;
3192 /* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
3195 ret
= ip_gma_set(&s
, gma_start
+ gma_head
+ RING_CTX_SIZE
);
3199 ret
= command_scan(&s
, gma_head
, gma_tail
,
3200 gma_start
, ctx_size
);
3203 gvt_vgpu_err("scan shadow ctx error\n");
3208 static int init_cmd_table(struct intel_gvt
*gvt
)
3210 unsigned int gen_type
= intel_gvt_get_device_type(gvt
);
3213 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); i
++) {
3214 struct cmd_entry
*e
;
3216 if (!(cmd_info
[i
].devices
& gen_type
))
3219 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
3223 e
->info
= &cmd_info
[i
];
3224 if (cmd_info
[i
].opcode
== OP_MI_NOOP
)
3227 INIT_HLIST_NODE(&e
->hlist
);
3228 add_cmd_entry(gvt
, e
);
3229 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3230 e
->info
->name
, e
->info
->opcode
, e
->info
->flag
,
3231 e
->info
->devices
, e
->info
->rings
);
3237 static void clean_cmd_table(struct intel_gvt
*gvt
)
3239 struct hlist_node
*tmp
;
3240 struct cmd_entry
*e
;
3243 hash_for_each_safe(gvt
->cmd_table
, i
, tmp
, e
, hlist
)
3246 hash_init(gvt
->cmd_table
);
3249 void intel_gvt_clean_cmd_parser(struct intel_gvt
*gvt
)
3251 clean_cmd_table(gvt
);
3254 int intel_gvt_init_cmd_parser(struct intel_gvt
*gvt
)
3258 ret
= init_cmd_table(gvt
);
3260 intel_gvt_clean_cmd_parser(gvt
);