2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
29 * Min He <min.he@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Yulei Zhang <yulei.zhang@intel.com>
33 * Zhi Wang <zhi.a.wang@intel.com>
37 #include <linux/slab.h>
40 #include "gt/intel_ring.h"
42 #include "i915_pvinfo.h"
45 #define INVALID_OP (~0U)
49 #define OP_LEN_3D_MEDIA 16
50 #define OP_LEN_MFX_VC 16
51 #define OP_LEN_VEBOX 16
53 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
63 const struct sub_op_bits
*sub_op
;
66 #define MAX_CMD_BUDGET 0x7fffffff
67 #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
68 #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
69 #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
71 #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
72 #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
73 #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
75 /* Render Command Map */
77 /* MI_* command Opcode (28:23) */
78 #define OP_MI_NOOP 0x0
79 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
80 #define OP_MI_USER_INTERRUPT 0x2
81 #define OP_MI_WAIT_FOR_EVENT 0x3
82 #define OP_MI_FLUSH 0x4
83 #define OP_MI_ARB_CHECK 0x5
84 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */
85 #define OP_MI_REPORT_HEAD 0x7
86 #define OP_MI_ARB_ON_OFF 0x8
87 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
88 #define OP_MI_BATCH_BUFFER_END 0xA
89 #define OP_MI_SUSPEND_FLUSH 0xB
90 #define OP_MI_PREDICATE 0xC /* IVB+ */
91 #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
92 #define OP_MI_SET_APPID 0xE /* IVB+ */
93 #define OP_MI_RS_CONTEXT 0xF /* HSW+ */
94 #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
95 #define OP_MI_DISPLAY_FLIP 0x14
96 #define OP_MI_SEMAPHORE_MBOX 0x16
97 #define OP_MI_SET_CONTEXT 0x18
98 #define OP_MI_MATH 0x1A
99 #define OP_MI_URB_CLEAR 0x19
100 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
101 #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
103 #define OP_MI_STORE_DATA_IMM 0x20
104 #define OP_MI_STORE_DATA_INDEX 0x21
105 #define OP_MI_LOAD_REGISTER_IMM 0x22
106 #define OP_MI_UPDATE_GTT 0x23
107 #define OP_MI_STORE_REGISTER_MEM 0x24
108 #define OP_MI_FLUSH_DW 0x26
109 #define OP_MI_CLFLUSH 0x27
110 #define OP_MI_REPORT_PERF_COUNT 0x28
111 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
112 #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
113 #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
114 #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
115 #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
116 #define OP_MI_2E 0x2E /* BDW+ */
117 #define OP_MI_2F 0x2F /* BDW+ */
118 #define OP_MI_BATCH_BUFFER_START 0x31
120 /* Bit definition for dword 0 */
121 #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
123 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
125 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
126 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
127 #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
128 #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
130 /* 2D command: Opcode (28:22) */
131 #define OP_2D(x) ((2<<7) | x)
133 #define OP_XY_SETUP_BLT OP_2D(0x1)
134 #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
135 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
136 #define OP_XY_PIXEL_BLT OP_2D(0x24)
137 #define OP_XY_SCANLINES_BLT OP_2D(0x25)
138 #define OP_XY_TEXT_BLT OP_2D(0x26)
139 #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
140 #define OP_XY_COLOR_BLT OP_2D(0x50)
141 #define OP_XY_PAT_BLT OP_2D(0x51)
142 #define OP_XY_MONO_PAT_BLT OP_2D(0x52)
143 #define OP_XY_SRC_COPY_BLT OP_2D(0x53)
144 #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
145 #define OP_XY_FULL_BLT OP_2D(0x55)
146 #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
147 #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
148 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
149 #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
150 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
151 #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
152 #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
153 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
154 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
155 #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
156 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
158 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
159 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
160 ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
162 #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
164 #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
165 #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
166 #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
167 #define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
169 #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
171 #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
173 #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
174 #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
175 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
176 #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
177 #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
178 #define OP_MEDIA_POOL_STATE OP_3D_MEDIA(0x2, 0x0, 0x5)
180 #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
181 #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
182 #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
183 #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
185 #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
186 #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
187 #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
188 #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
189 #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
190 #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
191 #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
192 #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
193 #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
194 #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
195 #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
196 #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
197 #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
198 #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
199 #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
200 #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
201 #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
202 #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
203 #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
204 #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
205 #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
206 #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
207 #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
208 #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
209 #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
210 #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
211 #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
212 #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
213 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
214 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
215 #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
216 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
218 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
219 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
220 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
221 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
223 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
224 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
225 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
226 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
227 #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
228 #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
229 #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
230 #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
232 #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
233 #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
234 #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
235 #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
238 #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
239 #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
240 #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
241 #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
242 #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
243 #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
244 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
245 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
247 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
248 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
249 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
250 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
252 #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
253 #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
254 #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
255 #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
256 #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
257 #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
258 #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
259 #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
260 #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
261 #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
262 #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
264 #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
265 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
266 #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
267 #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
268 #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
269 #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
270 #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
271 #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
272 #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
273 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
274 #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
275 #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
276 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
277 #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
278 #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
280 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
281 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
282 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
283 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
284 #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
285 #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
286 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
287 #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
288 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
289 #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
290 #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
291 #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
293 /* VCCP Command Parser */
296 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
297 * git://anongit.freedesktop.org/vaapi/intel-driver
302 #define OP_MFX(pipeline, op, sub_opa, sub_opb) \
309 #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
310 #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
311 #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
312 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
313 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
314 #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
315 #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
316 #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
317 #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
318 #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
319 #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
321 #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
323 #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
324 #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
325 #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
326 #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
327 #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
328 #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
329 #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
330 #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
331 #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
332 #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
333 #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
334 #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
336 #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
337 #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
338 #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
339 #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
340 #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
342 #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
343 #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
344 #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
345 #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
346 #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
348 #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
349 #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
350 #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
352 #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
353 #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
354 #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
356 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
363 #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
364 #define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
365 #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
367 struct parser_exec_state
;
369 typedef int (*parser_cmd_handler
)(struct parser_exec_state
*s
);
371 #define GVT_CMD_HASH_BITS 7
373 /* which DWords need address fix */
374 #define ADDR_FIX_1(x1) (1 << (x1))
375 #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
376 #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
377 #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
378 #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
380 #define DWORD_FIELD(dword, end, start) \
381 FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
383 #define OP_LENGTH_BIAS 2
384 #define CMD_LEN(value) (value + OP_LENGTH_BIAS)
386 static int gvt_check_valid_cmd_length(int len
, int valid_len
)
388 if (valid_len
!= len
) {
389 gvt_err("len is not valid: len=%u valid_len=%u\n",
400 #define F_LEN_MASK 3U
401 #define F_LEN_CONST 1U
403 /* value is const although LEN maybe variable */
404 #define F_LEN_VAR_FIXED (1<<1)
407 * command has its own ip advance logic
408 * e.g. MI_BATCH_START, MI_BATCH_END
410 #define F_IP_ADVANCE_CUSTOM (1<<2)
413 #define R_RCS BIT(RCS0)
414 #define R_VCS1 BIT(VCS0)
415 #define R_VCS2 BIT(VCS1)
416 #define R_VCS (R_VCS1 | R_VCS2)
417 #define R_BCS BIT(BCS0)
418 #define R_VECS BIT(VECS0)
419 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
420 /* rings that support this cmd: BLT/RCS/VCS/VECS */
423 /* devices that support this cmd: SNB/IVB/HSW/... */
426 /* which DWords are address that need fix up.
427 * bit 0 means a 32-bit non address operand in command
428 * bit 1 means address operand, which could be 32-bit
429 * or 64-bit depending on different architectures.(
430 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
431 * No matter the address length, each address only takes
432 * one bit in the bitmap.
436 /* flag == F_LEN_CONST : command length
437 * flag == F_LEN_VAR : length bias bits
438 * Note: length is in DWord
442 parser_cmd_handler handler
;
444 /* valid length in DWord */
449 struct hlist_node hlist
;
450 const struct cmd_info
*info
;
454 RING_BUFFER_INSTRUCTION
,
455 BATCH_BUFFER_INSTRUCTION
,
456 BATCH_BUFFER_2ND_LEVEL
,
464 struct parser_exec_state
{
465 struct intel_vgpu
*vgpu
;
466 const struct intel_engine_cs
*engine
;
470 /* batch buffer address type */
473 /* graphics memory address of ring buffer start */
474 unsigned long ring_start
;
475 unsigned long ring_size
;
476 unsigned long ring_head
;
477 unsigned long ring_tail
;
479 /* instruction graphics memory address */
480 unsigned long ip_gma
;
482 /* mapped va of the instr_gma */
487 /* next instruction when return from batch buffer to ring buffer */
488 unsigned long ret_ip_gma_ring
;
490 /* next instruction when return from 2nd batch buffer to batch buffer */
491 unsigned long ret_ip_gma_bb
;
493 /* batch buffer address type (GTT or PPGTT)
494 * used when ret from 2nd level batch buffer
496 int saved_buf_addr_type
;
499 const struct cmd_info
*info
;
501 struct intel_vgpu_workload
*workload
;
504 #define gmadr_dw_number(s) \
505 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
507 static unsigned long bypass_scan_mask
= 0;
509 /* ring ALL, type = 0 */
510 static const struct sub_op_bits sub_op_mi
[] = {
515 static const struct decode_info decode_info_mi
= {
518 ARRAY_SIZE(sub_op_mi
),
522 /* ring RCS, command type 2 */
523 static const struct sub_op_bits sub_op_2d
[] = {
528 static const struct decode_info decode_info_2d
= {
531 ARRAY_SIZE(sub_op_2d
),
535 /* ring RCS, command type 3 */
536 static const struct sub_op_bits sub_op_3d_media
[] = {
543 static const struct decode_info decode_info_3d_media
= {
546 ARRAY_SIZE(sub_op_3d_media
),
550 /* ring VCS, command type 3 */
551 static const struct sub_op_bits sub_op_mfx_vc
[] = {
559 static const struct decode_info decode_info_mfx_vc
= {
562 ARRAY_SIZE(sub_op_mfx_vc
),
566 /* ring VECS, command type 3 */
567 static const struct sub_op_bits sub_op_vebox
[] = {
575 static const struct decode_info decode_info_vebox
= {
578 ARRAY_SIZE(sub_op_vebox
),
582 static const struct decode_info
*ring_decode_info
[I915_NUM_ENGINES
][8] = {
587 &decode_info_3d_media
,
639 static inline u32
get_opcode(u32 cmd
, const struct intel_engine_cs
*engine
)
641 const struct decode_info
*d_info
;
643 d_info
= ring_decode_info
[engine
->id
][CMD_TYPE(cmd
)];
647 return cmd
>> (32 - d_info
->op_len
);
650 static inline const struct cmd_info
*
651 find_cmd_entry(struct intel_gvt
*gvt
, unsigned int opcode
,
652 const struct intel_engine_cs
*engine
)
656 hash_for_each_possible(gvt
->cmd_table
, e
, hlist
, opcode
) {
657 if (opcode
== e
->info
->opcode
&&
658 e
->info
->rings
& engine
->mask
)
664 static inline const struct cmd_info
*
665 get_cmd_info(struct intel_gvt
*gvt
, u32 cmd
,
666 const struct intel_engine_cs
*engine
)
670 opcode
= get_opcode(cmd
, engine
);
671 if (opcode
== INVALID_OP
)
674 return find_cmd_entry(gvt
, opcode
, engine
);
677 static inline u32
sub_op_val(u32 cmd
, u32 hi
, u32 low
)
679 return (cmd
>> low
) & ((1U << (hi
- low
+ 1)) - 1);
682 static inline void print_opcode(u32 cmd
, const struct intel_engine_cs
*engine
)
684 const struct decode_info
*d_info
;
687 d_info
= ring_decode_info
[engine
->id
][CMD_TYPE(cmd
)];
691 gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
692 cmd
>> (32 - d_info
->op_len
), d_info
->name
);
694 for (i
= 0; i
< d_info
->nr_sub_op
; i
++)
695 pr_err("0x%x ", sub_op_val(cmd
, d_info
->sub_op
[i
].hi
,
696 d_info
->sub_op
[i
].low
));
701 static inline u32
*cmd_ptr(struct parser_exec_state
*s
, int index
)
703 return s
->ip_va
+ (index
<< 2);
706 static inline u32
cmd_val(struct parser_exec_state
*s
, int index
)
708 return *cmd_ptr(s
, index
);
711 static void parser_exec_state_dump(struct parser_exec_state
*s
)
716 gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
717 " ring_head(%08lx) ring_tail(%08lx)\n",
718 s
->vgpu
->id
, s
->engine
->name
,
719 s
->ring_start
, s
->ring_start
+ s
->ring_size
,
720 s
->ring_head
, s
->ring_tail
);
722 gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
723 s
->buf_type
== RING_BUFFER_INSTRUCTION
?
724 "RING_BUFFER" : "BATCH_BUFFER",
725 s
->buf_addr_type
== GTT_BUFFER
?
726 "GTT" : "PPGTT", s
->ip_gma
);
728 if (s
->ip_va
== NULL
) {
729 gvt_dbg_cmd(" ip_va(NULL)");
733 gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
734 s
->ip_va
, cmd_val(s
, 0), cmd_val(s
, 1),
735 cmd_val(s
, 2), cmd_val(s
, 3));
737 print_opcode(cmd_val(s
, 0), s
->engine
);
739 s
->ip_va
= (u32
*)((((u64
)s
->ip_va
) >> 12) << 12);
742 gvt_dbg_cmd("ip_va=%p: ", s
->ip_va
);
743 for (i
= 0; i
< 8; i
++)
744 gvt_dbg_cmd("%08x ", cmd_val(s
, i
));
747 s
->ip_va
+= 8 * sizeof(u32
);
752 static inline void update_ip_va(struct parser_exec_state
*s
)
754 unsigned long len
= 0;
756 if (WARN_ON(s
->ring_head
== s
->ring_tail
))
759 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
760 unsigned long ring_top
= s
->ring_start
+ s
->ring_size
;
762 if (s
->ring_head
> s
->ring_tail
) {
763 if (s
->ip_gma
>= s
->ring_head
&& s
->ip_gma
< ring_top
)
764 len
= (s
->ip_gma
- s
->ring_head
);
765 else if (s
->ip_gma
>= s
->ring_start
&&
766 s
->ip_gma
<= s
->ring_tail
)
767 len
= (ring_top
- s
->ring_head
) +
768 (s
->ip_gma
- s
->ring_start
);
770 len
= (s
->ip_gma
- s
->ring_head
);
772 s
->ip_va
= s
->rb_va
+ len
;
773 } else {/* shadow batch buffer */
774 s
->ip_va
= s
->ret_bb_va
;
778 static inline int ip_gma_set(struct parser_exec_state
*s
,
779 unsigned long ip_gma
)
781 WARN_ON(!IS_ALIGNED(ip_gma
, 4));
788 static inline int ip_gma_advance(struct parser_exec_state
*s
,
791 s
->ip_gma
+= (dw_len
<< 2);
793 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
794 if (s
->ip_gma
>= s
->ring_start
+ s
->ring_size
)
795 s
->ip_gma
-= s
->ring_size
;
798 s
->ip_va
+= (dw_len
<< 2);
804 static inline int get_cmd_length(const struct cmd_info
*info
, u32 cmd
)
806 if ((info
->flag
& F_LEN_MASK
) == F_LEN_CONST
)
809 return (cmd
& ((1U << info
->len
) - 1)) + 2;
813 static inline int cmd_length(struct parser_exec_state
*s
)
815 return get_cmd_length(s
->info
, cmd_val(s
, 0));
818 /* do not remove this, some platform may need clflush here */
819 #define patch_value(s, addr, val) do { \
823 static bool is_shadowed_mmio(unsigned int offset
)
827 if ((offset
== 0x2168) || /*BB current head register UDW */
828 (offset
== 0x2140) || /*BB current header register */
829 (offset
== 0x211c) || /*second BB header register UDW */
830 (offset
== 0x2114)) { /*second BB header register UDW */
836 static inline bool is_force_nonpriv_mmio(unsigned int offset
)
838 return (offset
>= 0x24d0 && offset
< 0x2500);
841 static int force_nonpriv_reg_handler(struct parser_exec_state
*s
,
842 unsigned int offset
, unsigned int index
, char *cmd
)
844 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
849 if (!strcmp(cmd
, "lri"))
850 data
= cmd_val(s
, index
+ 1);
852 gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
857 ring_base
= s
->engine
->mmio_base
;
858 nopid
= i915_mmio_reg_offset(RING_NOPID(ring_base
));
860 if (!intel_gvt_in_force_nonpriv_whitelist(gvt
, data
) &&
862 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
864 patch_value(s
, cmd_ptr(s
, index
), nopid
);
870 static inline bool is_mocs_mmio(unsigned int offset
)
872 return ((offset
>= 0xc800) && (offset
<= 0xcff8)) ||
873 ((offset
>= 0xb020) && (offset
<= 0xb0a0));
876 static int mocs_cmd_reg_handler(struct parser_exec_state
*s
,
877 unsigned int offset
, unsigned int index
)
879 if (!is_mocs_mmio(offset
))
881 vgpu_vreg(s
->vgpu
, offset
) = cmd_val(s
, index
+ 1);
885 static int is_cmd_update_pdps(unsigned int offset
,
886 struct parser_exec_state
*s
)
888 u32 base
= s
->workload
->engine
->mmio_base
;
889 return i915_mmio_reg_equal(_MMIO(offset
), GEN8_RING_PDP_UDW(base
, 0));
892 static int cmd_pdp_mmio_update_handler(struct parser_exec_state
*s
,
893 unsigned int offset
, unsigned int index
)
895 struct intel_vgpu
*vgpu
= s
->vgpu
;
896 struct intel_vgpu_mm
*shadow_mm
= s
->workload
->shadow_mm
;
897 struct intel_vgpu_mm
*mm
;
898 u64 pdps
[GEN8_3LVL_PDPES
];
900 if (shadow_mm
->ppgtt_mm
.root_entry_type
==
901 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
902 pdps
[0] = (u64
)cmd_val(s
, 2) << 32;
903 pdps
[0] |= cmd_val(s
, 4);
905 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, pdps
);
907 gvt_vgpu_err("failed to get the 4-level shadow vm\n");
910 intel_vgpu_mm_get(mm
);
911 list_add_tail(&mm
->ppgtt_mm
.link
,
912 &s
->workload
->lri_shadow_mm
);
913 *cmd_ptr(s
, 2) = upper_32_bits(mm
->ppgtt_mm
.shadow_pdps
[0]);
914 *cmd_ptr(s
, 4) = lower_32_bits(mm
->ppgtt_mm
.shadow_pdps
[0]);
916 /* Currently all guests use PML4 table and now can't
917 * have a guest with 3-level table but uses LRI for
918 * PPGTT update. So this is simply un-testable. */
920 gvt_vgpu_err("invalid shared shadow vm type\n");
926 static int cmd_reg_handler(struct parser_exec_state
*s
,
927 unsigned int offset
, unsigned int index
, char *cmd
)
929 struct intel_vgpu
*vgpu
= s
->vgpu
;
930 struct intel_gvt
*gvt
= vgpu
->gvt
;
933 if (offset
+ 4 > gvt
->device_info
.mmio_size
) {
934 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
939 if (!intel_gvt_mmio_is_cmd_accessible(gvt
, offset
)) {
940 gvt_vgpu_err("%s access to non-render register (%x)\n",
945 if (is_shadowed_mmio(offset
)) {
946 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset
);
950 if (is_mocs_mmio(offset
) &&
951 mocs_cmd_reg_handler(s
, offset
, index
))
954 if (is_force_nonpriv_mmio(offset
) &&
955 force_nonpriv_reg_handler(s
, offset
, index
, cmd
))
958 if (offset
== i915_mmio_reg_offset(DERRMR
) ||
959 offset
== i915_mmio_reg_offset(FORCEWAKE_MT
)) {
960 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
961 patch_value(s
, cmd_ptr(s
, index
), VGT_PVINFO_PAGE
);
964 if (is_cmd_update_pdps(offset
, s
) &&
965 cmd_pdp_mmio_update_handler(s
, offset
, index
))
969 * In order to let workload with inhibit context to generate
970 * correct image data into memory, vregs values will be loaded to
971 * hw via LRIs in the workload with inhibit context. But as
972 * indirect context is loaded prior to LRIs in workload, we don't
973 * want reg values specified in indirect context overwritten by
974 * LRIs in workloads. So, when scanning an indirect context, we
975 * update reg values in it into vregs, so LRIs in workload with
976 * inhibit context will restore with correct values
978 if (IS_GEN(s
->engine
->i915
, 9) &&
979 intel_gvt_mmio_is_sr_in_ctx(gvt
, offset
) &&
980 !strncmp(cmd
, "lri", 3)) {
981 intel_gvt_hypervisor_read_gpa(s
->vgpu
,
982 s
->workload
->ring_context_gpa
+ 12, &ctx_sr_ctl
, 4);
983 /* check inhibit context */
984 if (ctx_sr_ctl
& 1) {
985 u32 data
= cmd_val(s
, index
+ 1);
987 if (intel_gvt_mmio_has_mode_mask(s
->vgpu
->gvt
, offset
))
988 intel_vgpu_mask_mmio_write(vgpu
,
991 vgpu_vreg(vgpu
, offset
) = data
;
998 #define cmd_reg(s, i) \
999 (cmd_val(s, i) & GENMASK(22, 2))
1001 #define cmd_reg_inhibit(s, i) \
1002 (cmd_val(s, i) & GENMASK(22, 18))
1004 #define cmd_gma(s, i) \
1005 (cmd_val(s, i) & GENMASK(31, 2))
1007 #define cmd_gma_hi(s, i) \
1008 (cmd_val(s, i) & GENMASK(15, 0))
1010 static int cmd_handler_lri(struct parser_exec_state
*s
)
1013 int cmd_len
= cmd_length(s
);
1015 for (i
= 1; i
< cmd_len
; i
+= 2) {
1016 if (IS_BROADWELL(s
->engine
->i915
) && s
->engine
->id
!= RCS0
) {
1017 if (s
->engine
->id
== BCS0
&&
1018 cmd_reg(s
, i
) == i915_mmio_reg_offset(DERRMR
))
1021 ret
|= cmd_reg_inhibit(s
, i
) ? -EBADRQC
: 0;
1025 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lri");
1032 static int cmd_handler_lrr(struct parser_exec_state
*s
)
1035 int cmd_len
= cmd_length(s
);
1037 for (i
= 1; i
< cmd_len
; i
+= 2) {
1038 if (IS_BROADWELL(s
->engine
->i915
))
1039 ret
|= ((cmd_reg_inhibit(s
, i
) ||
1040 (cmd_reg_inhibit(s
, i
+ 1)))) ?
1044 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrr-src");
1047 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
+ 1), i
, "lrr-dst");
1054 static inline int cmd_address_audit(struct parser_exec_state
*s
,
1055 unsigned long guest_gma
, int op_size
, bool index_mode
);
1057 static int cmd_handler_lrm(struct parser_exec_state
*s
)
1059 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
1060 int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
1063 int cmd_len
= cmd_length(s
);
1065 for (i
= 1; i
< cmd_len
;) {
1066 if (IS_BROADWELL(s
->engine
->i915
))
1067 ret
|= (cmd_reg_inhibit(s
, i
)) ? -EBADRQC
: 0;
1070 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrm");
1073 if (cmd_val(s
, 0) & (1 << 22)) {
1074 gma
= cmd_gma(s
, i
+ 1);
1075 if (gmadr_bytes
== 8)
1076 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
1077 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
1081 i
+= gmadr_dw_number(s
) + 1;
1086 static int cmd_handler_srm(struct parser_exec_state
*s
)
1088 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1091 int cmd_len
= cmd_length(s
);
1093 for (i
= 1; i
< cmd_len
;) {
1094 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "srm");
1097 if (cmd_val(s
, 0) & (1 << 22)) {
1098 gma
= cmd_gma(s
, i
+ 1);
1099 if (gmadr_bytes
== 8)
1100 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
1101 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
1105 i
+= gmadr_dw_number(s
) + 1;
1110 struct cmd_interrupt_event
{
1111 int pipe_control_notify
;
1113 int mi_user_interrupt
;
1116 static struct cmd_interrupt_event cmd_interrupt_events
[] = {
1118 .pipe_control_notify
= RCS_PIPE_CONTROL
,
1119 .mi_flush_dw
= INTEL_GVT_EVENT_RESERVED
,
1120 .mi_user_interrupt
= RCS_MI_USER_INTERRUPT
,
1123 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1124 .mi_flush_dw
= BCS_MI_FLUSH_DW
,
1125 .mi_user_interrupt
= BCS_MI_USER_INTERRUPT
,
1128 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1129 .mi_flush_dw
= VCS_MI_FLUSH_DW
,
1130 .mi_user_interrupt
= VCS_MI_USER_INTERRUPT
,
1133 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1134 .mi_flush_dw
= VCS2_MI_FLUSH_DW
,
1135 .mi_user_interrupt
= VCS2_MI_USER_INTERRUPT
,
1138 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
1139 .mi_flush_dw
= VECS_MI_FLUSH_DW
,
1140 .mi_user_interrupt
= VECS_MI_USER_INTERRUPT
,
1144 static int cmd_handler_pipe_control(struct parser_exec_state
*s
)
1146 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1148 bool index_mode
= false;
1149 unsigned int post_sync
;
1153 post_sync
= (cmd_val(s
, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK
) >> 14;
1156 if (cmd_val(s
, 1) & PIPE_CONTROL_MMIO_WRITE
)
1157 ret
= cmd_reg_handler(s
, cmd_reg(s
, 2), 1, "pipe_ctrl");
1159 else if (post_sync
) {
1161 ret
= cmd_reg_handler(s
, 0x2350, 1, "pipe_ctrl");
1162 else if (post_sync
== 3)
1163 ret
= cmd_reg_handler(s
, 0x2358, 1, "pipe_ctrl");
1164 else if (post_sync
== 1) {
1166 if ((cmd_val(s
, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB
)) {
1167 gma
= cmd_val(s
, 2) & GENMASK(31, 3);
1168 if (gmadr_bytes
== 8)
1169 gma
|= (cmd_gma_hi(s
, 3)) << 32;
1170 /* Store Data Index */
1171 if (cmd_val(s
, 1) & (1 << 21))
1173 ret
|= cmd_address_audit(s
, gma
, sizeof(u64
),
1178 hws_pga
= s
->vgpu
->hws_pga
[s
->engine
->id
];
1179 gma
= hws_pga
+ gma
;
1180 patch_value(s
, cmd_ptr(s
, 2), gma
);
1181 val
= cmd_val(s
, 1) & (~(1 << 21));
1182 patch_value(s
, cmd_ptr(s
, 1), val
);
1191 if (cmd_val(s
, 1) & PIPE_CONTROL_NOTIFY
)
1192 set_bit(cmd_interrupt_events
[s
->engine
->id
].pipe_control_notify
,
1193 s
->workload
->pending_events
);
1197 static int cmd_handler_mi_user_interrupt(struct parser_exec_state
*s
)
1199 set_bit(cmd_interrupt_events
[s
->engine
->id
].mi_user_interrupt
,
1200 s
->workload
->pending_events
);
1201 patch_value(s
, cmd_ptr(s
, 0), MI_NOOP
);
1205 static int cmd_advance_default(struct parser_exec_state
*s
)
1207 return ip_gma_advance(s
, cmd_length(s
));
1210 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state
*s
)
1214 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
1215 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
1216 ret
= ip_gma_set(s
, s
->ret_ip_gma_bb
);
1217 s
->buf_addr_type
= s
->saved_buf_addr_type
;
1219 s
->buf_type
= RING_BUFFER_INSTRUCTION
;
1220 s
->buf_addr_type
= GTT_BUFFER
;
1221 if (s
->ret_ip_gma_ring
>= s
->ring_start
+ s
->ring_size
)
1222 s
->ret_ip_gma_ring
-= s
->ring_size
;
1223 ret
= ip_gma_set(s
, s
->ret_ip_gma_ring
);
1228 struct mi_display_flip_command_info
{
1232 i915_reg_t stride_reg
;
1233 i915_reg_t ctrl_reg
;
1234 i915_reg_t surf_reg
;
1241 struct plane_code_mapping
{
1247 static int gen8_decode_mi_display_flip(struct parser_exec_state
*s
,
1248 struct mi_display_flip_command_info
*info
)
1250 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1251 struct plane_code_mapping gen8_plane_code
[] = {
1252 [0] = {PIPE_A
, PLANE_A
, PRIMARY_A_FLIP_DONE
},
1253 [1] = {PIPE_B
, PLANE_A
, PRIMARY_B_FLIP_DONE
},
1254 [2] = {PIPE_A
, PLANE_B
, SPRITE_A_FLIP_DONE
},
1255 [3] = {PIPE_B
, PLANE_B
, SPRITE_B_FLIP_DONE
},
1256 [4] = {PIPE_C
, PLANE_A
, PRIMARY_C_FLIP_DONE
},
1257 [5] = {PIPE_C
, PLANE_B
, SPRITE_C_FLIP_DONE
},
1259 u32 dword0
, dword1
, dword2
;
1262 dword0
= cmd_val(s
, 0);
1263 dword1
= cmd_val(s
, 1);
1264 dword2
= cmd_val(s
, 2);
1266 v
= (dword0
& GENMASK(21, 19)) >> 19;
1267 if (drm_WARN_ON(&dev_priv
->drm
, v
>= ARRAY_SIZE(gen8_plane_code
)))
1270 info
->pipe
= gen8_plane_code
[v
].pipe
;
1271 info
->plane
= gen8_plane_code
[v
].plane
;
1272 info
->event
= gen8_plane_code
[v
].event
;
1273 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1274 info
->tile_val
= (dword1
& 0x1);
1275 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1276 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1278 if (info
->plane
== PLANE_A
) {
1279 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1280 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1281 info
->surf_reg
= DSPSURF(info
->pipe
);
1282 } else if (info
->plane
== PLANE_B
) {
1283 info
->ctrl_reg
= SPRCTL(info
->pipe
);
1284 info
->stride_reg
= SPRSTRIDE(info
->pipe
);
1285 info
->surf_reg
= SPRSURF(info
->pipe
);
1287 drm_WARN_ON(&dev_priv
->drm
, 1);
1293 static int skl_decode_mi_display_flip(struct parser_exec_state
*s
,
1294 struct mi_display_flip_command_info
*info
)
1296 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1297 struct intel_vgpu
*vgpu
= s
->vgpu
;
1298 u32 dword0
= cmd_val(s
, 0);
1299 u32 dword1
= cmd_val(s
, 1);
1300 u32 dword2
= cmd_val(s
, 2);
1301 u32 plane
= (dword0
& GENMASK(12, 8)) >> 8;
1303 info
->plane
= PRIMARY_PLANE
;
1306 case MI_DISPLAY_FLIP_SKL_PLANE_1_A
:
1307 info
->pipe
= PIPE_A
;
1308 info
->event
= PRIMARY_A_FLIP_DONE
;
1310 case MI_DISPLAY_FLIP_SKL_PLANE_1_B
:
1311 info
->pipe
= PIPE_B
;
1312 info
->event
= PRIMARY_B_FLIP_DONE
;
1314 case MI_DISPLAY_FLIP_SKL_PLANE_1_C
:
1315 info
->pipe
= PIPE_C
;
1316 info
->event
= PRIMARY_C_FLIP_DONE
;
1319 case MI_DISPLAY_FLIP_SKL_PLANE_2_A
:
1320 info
->pipe
= PIPE_A
;
1321 info
->event
= SPRITE_A_FLIP_DONE
;
1322 info
->plane
= SPRITE_PLANE
;
1324 case MI_DISPLAY_FLIP_SKL_PLANE_2_B
:
1325 info
->pipe
= PIPE_B
;
1326 info
->event
= SPRITE_B_FLIP_DONE
;
1327 info
->plane
= SPRITE_PLANE
;
1329 case MI_DISPLAY_FLIP_SKL_PLANE_2_C
:
1330 info
->pipe
= PIPE_C
;
1331 info
->event
= SPRITE_C_FLIP_DONE
;
1332 info
->plane
= SPRITE_PLANE
;
1336 gvt_vgpu_err("unknown plane code %d\n", plane
);
1340 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1341 info
->tile_val
= (dword1
& GENMASK(2, 0));
1342 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1343 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1345 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1346 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1347 info
->surf_reg
= DSPSURF(info
->pipe
);
1352 static int gen8_check_mi_display_flip(struct parser_exec_state
*s
,
1353 struct mi_display_flip_command_info
*info
)
1357 if (!info
->async_flip
)
1360 if (INTEL_GEN(s
->engine
->i915
) >= 9) {
1361 stride
= vgpu_vreg_t(s
->vgpu
, info
->stride_reg
) & GENMASK(9, 0);
1362 tile
= (vgpu_vreg_t(s
->vgpu
, info
->ctrl_reg
) &
1363 GENMASK(12, 10)) >> 10;
1365 stride
= (vgpu_vreg_t(s
->vgpu
, info
->stride_reg
) &
1366 GENMASK(15, 6)) >> 6;
1367 tile
= (vgpu_vreg_t(s
->vgpu
, info
->ctrl_reg
) & (1 << 10)) >> 10;
1370 if (stride
!= info
->stride_val
)
1371 gvt_dbg_cmd("cannot change stride during async flip\n");
1373 if (tile
!= info
->tile_val
)
1374 gvt_dbg_cmd("cannot change tile during async flip\n");
1379 static int gen8_update_plane_mmio_from_mi_display_flip(
1380 struct parser_exec_state
*s
,
1381 struct mi_display_flip_command_info
*info
)
1383 struct drm_i915_private
*dev_priv
= s
->engine
->i915
;
1384 struct intel_vgpu
*vgpu
= s
->vgpu
;
1386 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->surf_reg
), GENMASK(31, 12),
1387 info
->surf_val
<< 12);
1388 if (INTEL_GEN(dev_priv
) >= 9) {
1389 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->stride_reg
), GENMASK(9, 0),
1391 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->ctrl_reg
), GENMASK(12, 10),
1392 info
->tile_val
<< 10);
1394 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->stride_reg
), GENMASK(15, 6),
1395 info
->stride_val
<< 6);
1396 set_mask_bits(&vgpu_vreg_t(vgpu
, info
->ctrl_reg
), GENMASK(10, 10),
1397 info
->tile_val
<< 10);
1400 if (info
->plane
== PLANE_PRIMARY
)
1401 vgpu_vreg_t(vgpu
, PIPE_FLIPCOUNT_G4X(info
->pipe
))++;
1403 if (info
->async_flip
)
1404 intel_vgpu_trigger_virtual_event(vgpu
, info
->event
);
1406 set_bit(info
->event
, vgpu
->irq
.flip_done_event
[info
->pipe
]);
1411 static int decode_mi_display_flip(struct parser_exec_state
*s
,
1412 struct mi_display_flip_command_info
*info
)
1414 if (IS_BROADWELL(s
->engine
->i915
))
1415 return gen8_decode_mi_display_flip(s
, info
);
1416 if (INTEL_GEN(s
->engine
->i915
) >= 9)
1417 return skl_decode_mi_display_flip(s
, info
);
1422 static int check_mi_display_flip(struct parser_exec_state
*s
,
1423 struct mi_display_flip_command_info
*info
)
1425 return gen8_check_mi_display_flip(s
, info
);
1428 static int update_plane_mmio_from_mi_display_flip(
1429 struct parser_exec_state
*s
,
1430 struct mi_display_flip_command_info
*info
)
1432 return gen8_update_plane_mmio_from_mi_display_flip(s
, info
);
1435 static int cmd_handler_mi_display_flip(struct parser_exec_state
*s
)
1437 struct mi_display_flip_command_info info
;
1438 struct intel_vgpu
*vgpu
= s
->vgpu
;
1441 int len
= cmd_length(s
);
1442 u32 valid_len
= CMD_LEN(1);
1444 /* Flip Type == Stereo 3D Flip */
1445 if (DWORD_FIELD(2, 1, 0) == 2)
1447 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1452 ret
= decode_mi_display_flip(s
, &info
);
1454 gvt_vgpu_err("fail to decode MI display flip command\n");
1458 ret
= check_mi_display_flip(s
, &info
);
1460 gvt_vgpu_err("invalid MI display flip command\n");
1464 ret
= update_plane_mmio_from_mi_display_flip(s
, &info
);
1466 gvt_vgpu_err("fail to update plane mmio\n");
1470 for (i
= 0; i
< len
; i
++)
1471 patch_value(s
, cmd_ptr(s
, i
), MI_NOOP
);
1475 static bool is_wait_for_flip_pending(u32 cmd
)
1477 return cmd
& (MI_WAIT_FOR_PLANE_A_FLIP_PENDING
|
1478 MI_WAIT_FOR_PLANE_B_FLIP_PENDING
|
1479 MI_WAIT_FOR_PLANE_C_FLIP_PENDING
|
1480 MI_WAIT_FOR_SPRITE_A_FLIP_PENDING
|
1481 MI_WAIT_FOR_SPRITE_B_FLIP_PENDING
|
1482 MI_WAIT_FOR_SPRITE_C_FLIP_PENDING
);
1485 static int cmd_handler_mi_wait_for_event(struct parser_exec_state
*s
)
1487 u32 cmd
= cmd_val(s
, 0);
1489 if (!is_wait_for_flip_pending(cmd
))
1492 patch_value(s
, cmd_ptr(s
, 0), MI_NOOP
);
1496 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state
*s
, int index
)
1499 unsigned long gma_high
, gma_low
;
1500 struct intel_vgpu
*vgpu
= s
->vgpu
;
1501 int gmadr_bytes
= vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1503 if (WARN_ON(gmadr_bytes
!= 4 && gmadr_bytes
!= 8)) {
1504 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes
);
1505 return INTEL_GVT_INVALID_ADDR
;
1508 gma_low
= cmd_val(s
, index
) & BATCH_BUFFER_ADDR_MASK
;
1509 if (gmadr_bytes
== 4) {
1512 gma_high
= cmd_val(s
, index
+ 1) & BATCH_BUFFER_ADDR_HIGH_MASK
;
1513 addr
= (((unsigned long)gma_high
) << 32) | gma_low
;
1518 static inline int cmd_address_audit(struct parser_exec_state
*s
,
1519 unsigned long guest_gma
, int op_size
, bool index_mode
)
1521 struct intel_vgpu
*vgpu
= s
->vgpu
;
1522 u32 max_surface_size
= vgpu
->gvt
->device_info
.max_surface_size
;
1526 if (op_size
> max_surface_size
) {
1527 gvt_vgpu_err("command address audit fail name %s\n",
1533 if (guest_gma
>= I915_GTT_PAGE_SIZE
) {
1537 } else if (!intel_gvt_ggtt_validate_range(vgpu
, guest_gma
, op_size
)) {
1545 gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1546 s
->info
->name
, guest_gma
, op_size
);
1548 pr_err("cmd dump: ");
1549 for (i
= 0; i
< cmd_length(s
); i
++) {
1551 pr_err("\n%08x ", cmd_val(s
, i
));
1553 pr_err("%08x ", cmd_val(s
, i
));
1555 pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1557 vgpu_aperture_gmadr_base(vgpu
),
1558 vgpu_aperture_gmadr_end(vgpu
),
1559 vgpu_hidden_gmadr_base(vgpu
),
1560 vgpu_hidden_gmadr_end(vgpu
));
1564 static int cmd_handler_mi_store_data_imm(struct parser_exec_state
*s
)
1566 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1567 int op_size
= (cmd_length(s
) - 3) * sizeof(u32
);
1568 int core_id
= (cmd_val(s
, 2) & (1 << 0)) ? 1 : 0;
1569 unsigned long gma
, gma_low
, gma_high
;
1570 u32 valid_len
= CMD_LEN(2);
1574 if (!(cmd_val(s
, 0) & (1 << 22)))
1577 /* check if QWORD */
1578 if (DWORD_FIELD(0, 21, 21))
1580 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1585 gma
= cmd_val(s
, 2) & GENMASK(31, 2);
1587 if (gmadr_bytes
== 8) {
1588 gma_low
= cmd_val(s
, 1) & GENMASK(31, 2);
1589 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1590 gma
= (gma_high
<< 32) | gma_low
;
1591 core_id
= (cmd_val(s
, 1) & (1 << 0)) ? 1 : 0;
1593 ret
= cmd_address_audit(s
, gma
+ op_size
* core_id
, op_size
, false);
1597 static inline int unexpected_cmd(struct parser_exec_state
*s
)
1599 struct intel_vgpu
*vgpu
= s
->vgpu
;
1601 gvt_vgpu_err("Unexpected %s in command buffer!\n", s
->info
->name
);
1606 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state
*s
)
1608 return unexpected_cmd(s
);
1611 static int cmd_handler_mi_report_perf_count(struct parser_exec_state
*s
)
1613 return unexpected_cmd(s
);
1616 static int cmd_handler_mi_op_2e(struct parser_exec_state
*s
)
1618 return unexpected_cmd(s
);
1621 static int cmd_handler_mi_op_2f(struct parser_exec_state
*s
)
1623 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1624 int op_size
= (1 << ((cmd_val(s
, 0) & GENMASK(20, 19)) >> 19)) *
1626 unsigned long gma
, gma_high
;
1627 u32 valid_len
= CMD_LEN(1);
1630 if (!(cmd_val(s
, 0) & (1 << 22)))
1633 /* check inline data */
1634 if (cmd_val(s
, 0) & BIT(18))
1635 valid_len
= CMD_LEN(9);
1636 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1641 gma
= cmd_val(s
, 1) & GENMASK(31, 2);
1642 if (gmadr_bytes
== 8) {
1643 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1644 gma
= (gma_high
<< 32) | gma
;
1646 ret
= cmd_address_audit(s
, gma
, op_size
, false);
1650 static int cmd_handler_mi_store_data_index(struct parser_exec_state
*s
)
1652 return unexpected_cmd(s
);
1655 static int cmd_handler_mi_clflush(struct parser_exec_state
*s
)
1657 return unexpected_cmd(s
);
1660 static int cmd_handler_mi_conditional_batch_buffer_end(
1661 struct parser_exec_state
*s
)
1663 return unexpected_cmd(s
);
1666 static int cmd_handler_mi_update_gtt(struct parser_exec_state
*s
)
1668 return unexpected_cmd(s
);
1671 static int cmd_handler_mi_flush_dw(struct parser_exec_state
*s
)
1673 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1675 bool index_mode
= false;
1678 u32 valid_len
= CMD_LEN(2);
1680 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1683 /* Check again for Qword */
1684 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
1689 /* Check post-sync and ppgtt bit */
1690 if (((cmd_val(s
, 0) >> 14) & 0x3) && (cmd_val(s
, 1) & (1 << 2))) {
1691 gma
= cmd_val(s
, 1) & GENMASK(31, 3);
1692 if (gmadr_bytes
== 8)
1693 gma
|= (cmd_val(s
, 2) & GENMASK(15, 0)) << 32;
1694 /* Store Data Index */
1695 if (cmd_val(s
, 0) & (1 << 21))
1697 ret
= cmd_address_audit(s
, gma
, sizeof(u64
), index_mode
);
1701 hws_pga
= s
->vgpu
->hws_pga
[s
->engine
->id
];
1702 gma
= hws_pga
+ gma
;
1703 patch_value(s
, cmd_ptr(s
, 1), gma
);
1704 val
= cmd_val(s
, 0) & (~(1 << 21));
1705 patch_value(s
, cmd_ptr(s
, 0), val
);
1708 /* Check notify bit */
1709 if ((cmd_val(s
, 0) & (1 << 8)))
1710 set_bit(cmd_interrupt_events
[s
->engine
->id
].mi_flush_dw
,
1711 s
->workload
->pending_events
);
1715 static void addr_type_update_snb(struct parser_exec_state
*s
)
1717 if ((s
->buf_type
== RING_BUFFER_INSTRUCTION
) &&
1718 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s
, 0)) == 1)) {
1719 s
->buf_addr_type
= PPGTT_BUFFER
;
1724 static int copy_gma_to_hva(struct intel_vgpu
*vgpu
, struct intel_vgpu_mm
*mm
,
1725 unsigned long gma
, unsigned long end_gma
, void *va
)
1727 unsigned long copy_len
, offset
;
1728 unsigned long len
= 0;
1731 while (gma
!= end_gma
) {
1732 gpa
= intel_vgpu_gma_to_gpa(mm
, gma
);
1733 if (gpa
== INTEL_GVT_INVALID_ADDR
) {
1734 gvt_vgpu_err("invalid gma address: %lx\n", gma
);
1738 offset
= gma
& (I915_GTT_PAGE_SIZE
- 1);
1740 copy_len
= (end_gma
- gma
) >= (I915_GTT_PAGE_SIZE
- offset
) ?
1741 I915_GTT_PAGE_SIZE
- offset
: end_gma
- gma
;
1743 intel_gvt_hypervisor_read_gpa(vgpu
, gpa
, va
+ len
, copy_len
);
1753 * Check whether a batch buffer needs to be scanned. Currently
1754 * the only criteria is based on privilege.
1756 static int batch_buffer_needs_scan(struct parser_exec_state
*s
)
1758 /* Decide privilege based on address space */
1759 if (cmd_val(s
, 0) & BIT(8) &&
1760 !(s
->vgpu
->scan_nonprivbb
& s
->engine
->mask
))
1766 static const char *repr_addr_type(unsigned int type
)
1768 return type
== PPGTT_BUFFER
? "ppgtt" : "ggtt";
1771 static int find_bb_size(struct parser_exec_state
*s
,
1772 unsigned long *bb_size
,
1773 unsigned long *bb_end_cmd_offset
)
1775 unsigned long gma
= 0;
1776 const struct cmd_info
*info
;
1778 bool bb_end
= false;
1779 struct intel_vgpu
*vgpu
= s
->vgpu
;
1781 struct intel_vgpu_mm
*mm
= (s
->buf_addr_type
== GTT_BUFFER
) ?
1782 s
->vgpu
->gtt
.ggtt_mm
: s
->workload
->shadow_mm
;
1785 *bb_end_cmd_offset
= 0;
1787 /* get the start gm address of the batch buffer */
1788 gma
= get_gma_bb_from_cmd(s
, 1);
1789 if (gma
== INTEL_GVT_INVALID_ADDR
)
1792 cmd
= cmd_val(s
, 0);
1793 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1795 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1796 cmd
, get_opcode(cmd
, s
->engine
),
1797 repr_addr_type(s
->buf_addr_type
),
1798 s
->engine
->name
, s
->workload
);
1802 if (copy_gma_to_hva(s
->vgpu
, mm
,
1803 gma
, gma
+ 4, &cmd
) < 0)
1805 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1807 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1808 cmd
, get_opcode(cmd
, s
->engine
),
1809 repr_addr_type(s
->buf_addr_type
),
1810 s
->engine
->name
, s
->workload
);
1814 if (info
->opcode
== OP_MI_BATCH_BUFFER_END
) {
1816 } else if (info
->opcode
== OP_MI_BATCH_BUFFER_START
) {
1817 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd
) == 0)
1818 /* chained batch buffer */
1823 *bb_end_cmd_offset
= *bb_size
;
1825 cmd_len
= get_cmd_length(info
, cmd
) << 2;
1826 *bb_size
+= cmd_len
;
1833 static int audit_bb_end(struct parser_exec_state
*s
, void *va
)
1835 struct intel_vgpu
*vgpu
= s
->vgpu
;
1836 u32 cmd
= *(u32
*)va
;
1837 const struct cmd_info
*info
;
1839 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
1841 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1842 cmd
, get_opcode(cmd
, s
->engine
),
1843 repr_addr_type(s
->buf_addr_type
),
1844 s
->engine
->name
, s
->workload
);
1848 if ((info
->opcode
== OP_MI_BATCH_BUFFER_END
) ||
1849 ((info
->opcode
== OP_MI_BATCH_BUFFER_START
) &&
1850 (BATCH_BUFFER_2ND_LEVEL_BIT(cmd
) == 0)))
1856 static int perform_bb_shadow(struct parser_exec_state
*s
)
1858 struct intel_vgpu
*vgpu
= s
->vgpu
;
1859 struct intel_vgpu_shadow_bb
*bb
;
1860 unsigned long gma
= 0;
1861 unsigned long bb_size
;
1862 unsigned long bb_end_cmd_offset
;
1864 struct intel_vgpu_mm
*mm
= (s
->buf_addr_type
== GTT_BUFFER
) ?
1865 s
->vgpu
->gtt
.ggtt_mm
: s
->workload
->shadow_mm
;
1866 unsigned long start_offset
= 0;
1868 /* get the start gm address of the batch buffer */
1869 gma
= get_gma_bb_from_cmd(s
, 1);
1870 if (gma
== INTEL_GVT_INVALID_ADDR
)
1873 ret
= find_bb_size(s
, &bb_size
, &bb_end_cmd_offset
);
1877 bb
= kzalloc(sizeof(*bb
), GFP_KERNEL
);
1881 bb
->ppgtt
= (s
->buf_addr_type
== GTT_BUFFER
) ? false : true;
1883 /* the start_offset stores the batch buffer's start gma's
1884 * offset relative to page boundary. so for non-privileged batch
1885 * buffer, the shadowed gem object holds exactly the same page
1886 * layout as original gem object. This is for the convience of
1887 * replacing the whole non-privilged batch buffer page to this
1888 * shadowed one in PPGTT at the same gma address. (this replacing
1889 * action is not implemented yet now, but may be necessary in
1891 * for prileged batch buffer, we just change start gma address to
1892 * that of shadowed page.
1895 start_offset
= gma
& ~I915_GTT_PAGE_MASK
;
1897 bb
->obj
= i915_gem_object_create_shmem(s
->engine
->i915
,
1898 round_up(bb_size
+ start_offset
,
1900 if (IS_ERR(bb
->obj
)) {
1901 ret
= PTR_ERR(bb
->obj
);
1905 bb
->va
= i915_gem_object_pin_map(bb
->obj
, I915_MAP_WB
);
1906 if (IS_ERR(bb
->va
)) {
1907 ret
= PTR_ERR(bb
->va
);
1911 ret
= copy_gma_to_hva(s
->vgpu
, mm
,
1913 bb
->va
+ start_offset
);
1915 gvt_vgpu_err("fail to copy guest ring buffer\n");
1920 ret
= audit_bb_end(s
, bb
->va
+ start_offset
+ bb_end_cmd_offset
);
1924 i915_gem_object_unlock(bb
->obj
);
1925 INIT_LIST_HEAD(&bb
->list
);
1926 list_add(&bb
->list
, &s
->workload
->shadow_bb
);
1928 bb
->bb_start_cmd_va
= s
->ip_va
;
1930 if ((s
->buf_type
== BATCH_BUFFER_INSTRUCTION
) && (!s
->is_ctx_wa
))
1931 bb
->bb_offset
= s
->ip_va
- s
->rb_va
;
1936 * ip_va saves the virtual address of the shadow batch buffer, while
1937 * ip_gma saves the graphics address of the original batch buffer.
1938 * As the shadow batch buffer is just a copy from the originial one,
1939 * it should be right to use shadow batch buffer'va and original batch
1940 * buffer's gma in pair. After all, we don't want to pin the shadow
1941 * buffer here (too early).
1943 s
->ip_va
= bb
->va
+ start_offset
;
1947 i915_gem_object_unpin_map(bb
->obj
);
1949 i915_gem_object_put(bb
->obj
);
1955 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state
*s
)
1959 struct intel_vgpu
*vgpu
= s
->vgpu
;
1961 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
1962 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1966 second_level
= BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s
, 0)) == 1;
1967 if (second_level
&& (s
->buf_type
!= BATCH_BUFFER_INSTRUCTION
)) {
1968 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1972 s
->saved_buf_addr_type
= s
->buf_addr_type
;
1973 addr_type_update_snb(s
);
1974 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
1975 s
->ret_ip_gma_ring
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
1976 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
1977 } else if (second_level
) {
1978 s
->buf_type
= BATCH_BUFFER_2ND_LEVEL
;
1979 s
->ret_ip_gma_bb
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
1980 s
->ret_bb_va
= s
->ip_va
+ cmd_length(s
) * sizeof(u32
);
1983 if (batch_buffer_needs_scan(s
)) {
1984 ret
= perform_bb_shadow(s
);
1986 gvt_vgpu_err("invalid shadow batch buffer\n");
1988 /* emulate a batch buffer end to do return right */
1989 ret
= cmd_handler_mi_batch_buffer_end(s
);
1996 static int mi_noop_index
;
1998 static const struct cmd_info cmd_info
[] = {
1999 {"MI_NOOP", OP_MI_NOOP
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2001 {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE
, F_LEN_CONST
, R_ALL
, D_ALL
,
2004 {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT
, F_LEN_CONST
, R_ALL
, D_ALL
,
2005 0, 1, cmd_handler_mi_user_interrupt
},
2007 {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT
, F_LEN_CONST
, R_RCS
| R_BCS
,
2008 D_ALL
, 0, 1, cmd_handler_mi_wait_for_event
},
2010 {"MI_FLUSH", OP_MI_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2012 {"MI_ARB_CHECK", OP_MI_ARB_CHECK
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2015 {"MI_RS_CONTROL", OP_MI_RS_CONTROL
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2018 {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2021 {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2024 {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC
, F_LEN_CONST
, R_RCS
,
2027 {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END
,
2028 F_IP_ADVANCE_CUSTOM
| F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2029 cmd_handler_mi_batch_buffer_end
},
2031 {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
,
2034 {"MI_PREDICATE", OP_MI_PREDICATE
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2037 {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER
, F_LEN_CONST
, R_ALL
,
2040 {"MI_SET_APPID", OP_MI_SET_APPID
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
2043 {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
2046 {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP
, F_LEN_VAR
,
2047 R_RCS
| R_BCS
, D_ALL
, 0, 8, cmd_handler_mi_display_flip
},
2049 {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX
, F_LEN_VAR
| F_LEN_VAR_FIXED
,
2050 R_ALL
, D_ALL
, 0, 8, NULL
, CMD_LEN(1)},
2052 {"MI_MATH", OP_MI_MATH
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 8, NULL
},
2054 {"MI_URB_CLEAR", OP_MI_URB_CLEAR
, F_LEN_VAR
| F_LEN_VAR_FIXED
, R_RCS
,
2055 D_ALL
, 0, 8, NULL
, CMD_LEN(0)},
2057 {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL
,
2058 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
, 0, 8,
2061 {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT
,
2062 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_1(2),
2063 8, cmd_handler_mi_semaphore_wait
, CMD_LEN(2)},
2065 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
,
2066 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm
},
2068 {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX
, F_LEN_VAR
, R_ALL
, D_ALL
,
2069 0, 8, cmd_handler_mi_store_data_index
},
2071 {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM
, F_LEN_VAR
, R_ALL
,
2072 D_ALL
, 0, 8, cmd_handler_lri
},
2074 {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, 0, 10,
2075 cmd_handler_mi_update_gtt
},
2077 {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM
,
2078 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2079 cmd_handler_srm
, CMD_LEN(2)},
2081 {"MI_FLUSH_DW", OP_MI_FLUSH_DW
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 6,
2082 cmd_handler_mi_flush_dw
},
2084 {"MI_CLFLUSH", OP_MI_CLFLUSH
, F_LEN_VAR
, R_ALL
, D_ALL
, ADDR_FIX_1(1),
2085 10, cmd_handler_mi_clflush
},
2087 {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT
,
2088 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(1), 6,
2089 cmd_handler_mi_report_perf_count
, CMD_LEN(2)},
2091 {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM
,
2092 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2093 cmd_handler_lrm
, CMD_LEN(2)},
2095 {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG
,
2096 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, 0, 8,
2097 cmd_handler_lrr
, CMD_LEN(1)},
2099 {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM
,
2100 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_RCS
, D_ALL
, 0,
2101 8, NULL
, CMD_LEN(2)},
2103 {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM
, F_LEN_VAR
| F_LEN_VAR_FIXED
,
2104 R_RCS
, D_ALL
, ADDR_FIX_1(2), 8, NULL
, CMD_LEN(2)},
2106 {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM
, F_LEN_VAR
, R_RCS
, D_ALL
,
2107 ADDR_FIX_1(2), 8, NULL
},
2109 {"MI_OP_2E", OP_MI_2E
, F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_BDW_PLUS
,
2110 ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e
, CMD_LEN(3)},
2112 {"MI_OP_2F", OP_MI_2F
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_1(1),
2113 8, cmd_handler_mi_op_2f
},
2115 {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START
,
2116 F_IP_ADVANCE_CUSTOM
, R_ALL
, D_ALL
, 0, 8,
2117 cmd_handler_mi_batch_buffer_start
},
2119 {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END
,
2120 F_LEN_VAR
| F_LEN_VAR_FIXED
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
2121 cmd_handler_mi_conditional_batch_buffer_end
, CMD_LEN(2)},
2123 {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL
, F_LEN_CONST
,
2124 R_RCS
| R_BCS
, D_ALL
, 0, 2, NULL
},
2126 {"XY_SETUP_BLT", OP_XY_SETUP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2127 ADDR_FIX_2(4, 7), 8, NULL
},
2129 {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2132 {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT
,
2133 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2135 {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
2137 {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2140 {"XY_TEXT_BLT", OP_XY_TEXT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2141 ADDR_FIX_1(3), 8, NULL
},
2143 {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT
, F_LEN_VAR
, R_BCS
,
2146 {"XY_COLOR_BLT", OP_XY_COLOR_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2147 ADDR_FIX_1(4), 8, NULL
},
2149 {"XY_PAT_BLT", OP_XY_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2150 ADDR_FIX_2(4, 5), 8, NULL
},
2152 {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2153 ADDR_FIX_1(4), 8, NULL
},
2155 {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2156 ADDR_FIX_2(4, 7), 8, NULL
},
2158 {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
,
2159 D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2161 {"XY_FULL_BLT", OP_XY_FULL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
2163 {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT
, F_LEN_VAR
, R_BCS
,
2164 D_ALL
, ADDR_FIX_3(4, 5, 8), 8, NULL
},
2166 {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT
, F_LEN_VAR
,
2167 R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2169 {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2170 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT
,
2171 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2173 {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT
, F_LEN_VAR
, R_BCS
,
2174 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2176 {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT
,
2177 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2179 {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE
, F_LEN_VAR
, R_BCS
,
2180 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2182 {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT
, F_LEN_VAR
, R_BCS
,
2183 D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2185 {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT
,
2186 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
2188 {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2189 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT
,
2190 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
2192 {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
2193 ADDR_FIX_2(4, 5), 8, NULL
},
2195 {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE
,
2196 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
2198 {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2199 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
2200 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2202 {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2203 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
2204 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2206 {"3DSTATE_BLEND_STATE_POINTERS",
2207 OP_3DSTATE_BLEND_STATE_POINTERS
,
2208 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2210 {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2211 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS
,
2212 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2214 {"3DSTATE_BINDING_TABLE_POINTERS_VS",
2215 OP_3DSTATE_BINDING_TABLE_POINTERS_VS
,
2216 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2218 {"3DSTATE_BINDING_TABLE_POINTERS_HS",
2219 OP_3DSTATE_BINDING_TABLE_POINTERS_HS
,
2220 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2222 {"3DSTATE_BINDING_TABLE_POINTERS_DS",
2223 OP_3DSTATE_BINDING_TABLE_POINTERS_DS
,
2224 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2226 {"3DSTATE_BINDING_TABLE_POINTERS_GS",
2227 OP_3DSTATE_BINDING_TABLE_POINTERS_GS
,
2228 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2230 {"3DSTATE_BINDING_TABLE_POINTERS_PS",
2231 OP_3DSTATE_BINDING_TABLE_POINTERS_PS
,
2232 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2234 {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2235 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
2236 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2238 {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2239 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS
,
2240 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2242 {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2243 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS
,
2244 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2246 {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2247 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS
,
2248 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2250 {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2251 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS
,
2252 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2254 {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2257 {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2260 {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2263 {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS
, F_LEN_VAR
, R_RCS
, D_ALL
,
2266 {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS
,
2267 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2269 {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS
,
2270 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2272 {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS
,
2273 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2275 {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS
,
2276 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2278 {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS
,
2279 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2281 {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS
,
2282 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
2284 {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS
,
2285 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
2287 {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS
,
2288 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2290 {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS
,
2291 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2293 {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS
,
2294 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2296 {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS
,
2297 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2299 {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS
,
2300 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2302 {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS
,
2303 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2305 {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS
,
2306 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2308 {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS
,
2309 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2311 {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS
,
2312 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2314 {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS
,
2315 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2317 {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS
,
2318 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2320 {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS
,
2321 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2323 {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS
,
2324 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2326 {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING
, F_LEN_VAR
, R_RCS
,
2327 D_BDW_PLUS
, 0, 8, NULL
},
2329 {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2332 {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY
, F_LEN_VAR
, R_RCS
,
2333 D_BDW_PLUS
, 0, 8, NULL
},
2335 {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY
, F_LEN_VAR
, R_RCS
,
2336 D_BDW_PLUS
, 0, 8, NULL
},
2338 {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2341 {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL
, F_LEN_VAR
,
2342 R_RCS
, D_BDW_PLUS
, 0, 8, NULL
},
2344 {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2347 {"3DSTATE_RASTER", OP_3DSTATE_RASTER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2350 {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2353 {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2356 {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS
, F_LEN_VAR
, R_RCS
,
2357 D_BDW_PLUS
, 0, 8, NULL
},
2359 {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS
, F_LEN_VAR
,
2360 R_RCS
, D_ALL
, 0, 8, NULL
},
2362 {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER
, F_LEN_VAR
, R_RCS
,
2363 D_BDW_PLUS
, ADDR_FIX_1(2), 8, NULL
},
2365 {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS
, F_LEN_CONST
,
2366 R_RCS
, D_ALL
, 0, 1, NULL
},
2368 {"3DSTATE_VF", OP_3DSTATE_VF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2370 {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS
, F_LEN_VAR
,
2371 R_RCS
, D_ALL
, 0, 8, NULL
},
2373 {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS
,
2374 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2376 {"3DSTATE_GS", OP_3DSTATE_GS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2378 {"3DSTATE_CLIP", OP_3DSTATE_CLIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2380 {"3DSTATE_WM", OP_3DSTATE_WM
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2382 {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS
, F_LEN_VAR
, R_RCS
,
2383 D_BDW_PLUS
, 0, 8, NULL
},
2385 {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS
, F_LEN_VAR
, R_RCS
,
2386 D_BDW_PLUS
, 0, 8, NULL
},
2388 {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK
, F_LEN_VAR
, R_RCS
,
2391 {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS
, F_LEN_VAR
, R_RCS
,
2392 D_BDW_PLUS
, 0, 8, NULL
},
2394 {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS
, F_LEN_VAR
, R_RCS
,
2395 D_BDW_PLUS
, 0, 8, NULL
},
2397 {"3DSTATE_HS", OP_3DSTATE_HS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2399 {"3DSTATE_TE", OP_3DSTATE_TE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2401 {"3DSTATE_DS", OP_3DSTATE_DS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2403 {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT
, F_LEN_VAR
, R_RCS
,
2406 {"3DSTATE_SBE", OP_3DSTATE_SBE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2408 {"3DSTATE_PS", OP_3DSTATE_PS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2410 {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE
, F_LEN_VAR
,
2411 R_RCS
, D_ALL
, 0, 8, NULL
},
2413 {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0
,
2414 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2416 {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY
, F_LEN_VAR
, R_RCS
, D_ALL
,
2419 {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER
, F_LEN_VAR
, R_RCS
,
2420 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2422 {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET
,
2423 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2425 {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN
,
2426 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2428 {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE
, F_LEN_VAR
, R_RCS
,
2431 {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS
, F_LEN_VAR
, R_RCS
,
2434 {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX
, F_LEN_VAR
, R_RCS
,
2437 {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1
,
2438 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2440 {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW
, F_LEN_VAR
, R_RCS
,
2441 D_BDW_PLUS
, 0, 8, NULL
},
2443 {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER
, F_LEN_VAR
, R_RCS
,
2444 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2446 {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER
, F_LEN_VAR
,
2447 R_RCS
, D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2449 {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS
, F_LEN_VAR
,
2450 R_RCS
, D_ALL
, 0, 8, NULL
},
2452 {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
2453 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2455 {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS
,
2456 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2458 {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS
,
2459 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2461 {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
2462 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2464 {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
2465 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2467 {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE
, F_LEN_VAR
,
2468 R_RCS
, D_ALL
, 0, 8, NULL
},
2470 {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST
, F_LEN_VAR
, R_RCS
,
2473 {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2474 ADDR_FIX_2(2, 4), 8, NULL
},
2476 {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2477 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC
,
2478 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2480 {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC
,
2481 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2483 {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2484 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC
,
2485 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2487 {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN
, F_LEN_VAR
, R_RCS
,
2488 D_BDW_PLUS
, 0, 8, NULL
},
2490 {"PIPE_CONTROL", OP_PIPE_CONTROL
, F_LEN_VAR
, R_RCS
, D_ALL
,
2491 ADDR_FIX_1(2), 8, cmd_handler_pipe_control
},
2493 {"3DPRIMITIVE", OP_3DPRIMITIVE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2495 {"PIPELINE_SELECT", OP_PIPELINE_SELECT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0,
2498 {"STATE_PREFETCH", OP_STATE_PREFETCH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2499 ADDR_FIX_1(1), 8, NULL
},
2501 {"STATE_SIP", OP_STATE_SIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2503 {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2504 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL
},
2506 {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4
, F_LEN_VAR
, R_RCS
, D_ALL
,
2507 ADDR_FIX_1(1), 8, NULL
},
2509 {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS
,
2510 F_LEN_VAR
, R_RCS
, D_ALL
, ADDR_FIX_2(1, 2), 3, NULL
},
2512 {"3DSTATE_VS", OP_3DSTATE_VS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2514 {"3DSTATE_SF", OP_3DSTATE_SF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2516 {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2519 {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING
, F_LEN_VAR
, R_RCS
,
2520 D_SKL_PLUS
, 0, 8, NULL
},
2522 {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD
,
2523 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2525 {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
,
2528 {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2531 {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
,
2534 {"MEDIA_OBJECT", OP_MEDIA_OBJECT
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2536 {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD
, F_LEN_VAR
, R_RCS
, D_ALL
,
2539 {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT
, F_LEN_VAR
, R_RCS
, D_ALL
,
2542 {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2545 {"GPGPU_WALKER", OP_GPGPU_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2548 {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16,
2551 {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45
,
2552 F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2554 {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT
, F_LEN_VAR
,
2555 R_VCS
, D_ALL
, 0, 12, NULL
},
2557 {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE
, F_LEN_VAR
,
2558 R_VCS
, D_ALL
, 0, 12, NULL
},
2560 {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE
, F_LEN_VAR
,
2561 R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2563 {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE
,
2564 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2566 {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE
,
2567 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, ADDR_FIX_3(1, 3, 5), 12, NULL
},
2569 {"OP_2_0_0_5", OP_2_0_0_5
, F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2571 {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER
, F_LEN_VAR
,
2572 R_VCS
, D_ALL
, 0, 12, NULL
},
2574 {"MFX_QM_STATE", OP_MFX_QM_STATE
, F_LEN_VAR
,
2575 R_VCS
, D_ALL
, 0, 12, NULL
},
2577 {"MFX_FQM_STATE", OP_MFX_FQM_STATE
, F_LEN_VAR
,
2578 R_VCS
, D_ALL
, 0, 12, NULL
},
2580 {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT
, F_LEN_VAR
,
2581 R_VCS
, D_ALL
, 0, 12, NULL
},
2583 {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT
, F_LEN_VAR
,
2584 R_VCS
, D_ALL
, 0, 12, NULL
},
2586 {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT
, F_LEN_VAR
,
2587 R_VCS
, D_ALL
, 0, 12, NULL
},
2589 {"MFX_WAIT", OP_MFX_WAIT
, F_LEN_VAR
,
2590 R_VCS
, D_ALL
, 0, 6, NULL
},
2592 {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE
, F_LEN_VAR
,
2593 R_VCS
, D_ALL
, 0, 12, NULL
},
2595 {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE
, F_LEN_VAR
,
2596 R_VCS
, D_ALL
, 0, 12, NULL
},
2598 {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE
, F_LEN_VAR
,
2599 R_VCS
, D_ALL
, 0, 12, NULL
},
2601 {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE
, F_LEN_VAR
,
2602 R_VCS
, D_ALL
, 0, 12, NULL
},
2604 {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE
, F_LEN_VAR
,
2605 R_VCS
, D_ALL
, 0, 12, NULL
},
2607 {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE
, F_LEN_VAR
,
2608 R_VCS
, D_ALL
, 0, 12, NULL
},
2610 {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE
, F_LEN_VAR
,
2611 R_VCS
, D_ALL
, 0, 12, NULL
},
2612 {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE
, F_LEN_VAR
,
2613 R_VCS
, D_ALL
, 0, 12, NULL
},
2615 {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT
, F_LEN_VAR
,
2616 R_VCS
, D_ALL
, 0, 12, NULL
},
2618 {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR
, F_LEN_VAR
,
2619 R_VCS
, D_ALL
, ADDR_FIX_1(2), 12, NULL
},
2621 {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT
, F_LEN_VAR
,
2622 R_VCS
, D_ALL
, 0, 12, NULL
},
2624 {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE
, F_LEN_VAR
,
2625 R_VCS
, D_ALL
, 0, 12, NULL
},
2627 {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE
, F_LEN_VAR
,
2628 R_VCS
, D_ALL
, 0, 12, NULL
},
2630 {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE
, F_LEN_VAR
,
2631 R_VCS
, D_ALL
, 0, 12, NULL
},
2633 {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE
, F_LEN_VAR
,
2634 R_VCS
, D_ALL
, 0, 12, NULL
},
2636 {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT
, F_LEN_VAR
,
2637 R_VCS
, D_ALL
, 0, 12, NULL
},
2639 {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE
, F_LEN_VAR
,
2640 R_VCS
, D_ALL
, 0, 12, NULL
},
2642 {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT
, F_LEN_VAR
,
2643 R_VCS
, D_ALL
, 0, 12, NULL
},
2645 {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE
, F_LEN_VAR
,
2646 R_VCS
, D_ALL
, 0, 12, NULL
},
2648 {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE
, F_LEN_VAR
,
2649 R_VCS
, D_ALL
, 0, 12, NULL
},
2651 {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT
, F_LEN_VAR
,
2652 R_VCS
, D_ALL
, 0, 12, NULL
},
2654 {"MFX_2_6_0_0", OP_MFX_2_6_0_0
, F_LEN_VAR
, R_VCS
, D_ALL
,
2657 {"MFX_2_6_0_9", OP_MFX_2_6_0_9
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2659 {"MFX_2_6_0_8", OP_MFX_2_6_0_8
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2661 {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE
, F_LEN_VAR
,
2662 R_VCS
, D_ALL
, 0, 12, NULL
},
2664 {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE
, F_LEN_VAR
,
2665 R_VCS
, D_ALL
, 0, 12, NULL
},
2667 {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT
, F_LEN_VAR
,
2668 R_VCS
, D_ALL
, 0, 12, NULL
},
2670 {"VEBOX_STATE", OP_VEB_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
, 0, 12, NULL
},
2672 {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
,
2675 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE
, F_LEN_VAR
, R_VECS
, D_BDW_PLUS
,
2679 static void add_cmd_entry(struct intel_gvt
*gvt
, struct cmd_entry
*e
)
2681 hash_add(gvt
->cmd_table
, &e
->hlist
, e
->info
->opcode
);
2684 /* call the cmd handler, and advance ip */
2685 static int cmd_parser_exec(struct parser_exec_state
*s
)
2687 struct intel_vgpu
*vgpu
= s
->vgpu
;
2688 const struct cmd_info
*info
;
2692 cmd
= cmd_val(s
, 0);
2694 /* fastpath for MI_NOOP */
2696 info
= &cmd_info
[mi_noop_index
];
2698 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->engine
);
2701 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
2702 cmd
, get_opcode(cmd
, s
->engine
),
2703 repr_addr_type(s
->buf_addr_type
),
2704 s
->engine
->name
, s
->workload
);
2710 trace_gvt_command(vgpu
->id
, s
->engine
->id
, s
->ip_gma
, s
->ip_va
,
2711 cmd_length(s
), s
->buf_type
, s
->buf_addr_type
,
2712 s
->workload
, info
->name
);
2714 if ((info
->flag
& F_LEN_MASK
) == F_LEN_VAR_FIXED
) {
2715 ret
= gvt_check_valid_cmd_length(cmd_length(s
),
2721 if (info
->handler
) {
2722 ret
= info
->handler(s
);
2724 gvt_vgpu_err("%s handler error\n", info
->name
);
2729 if (!(info
->flag
& F_IP_ADVANCE_CUSTOM
)) {
2730 ret
= cmd_advance_default(s
);
2732 gvt_vgpu_err("%s IP advance error\n", info
->name
);
2739 static inline bool gma_out_of_range(unsigned long gma
,
2740 unsigned long gma_head
, unsigned int gma_tail
)
2742 if (gma_tail
>= gma_head
)
2743 return (gma
< gma_head
) || (gma
> gma_tail
);
2745 return (gma
> gma_tail
) && (gma
< gma_head
);
2748 /* Keep the consistent return type, e.g EBADRQC for unknown
2749 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2750 * works as the input of VM healthy status.
2752 static int command_scan(struct parser_exec_state
*s
,
2753 unsigned long rb_head
, unsigned long rb_tail
,
2754 unsigned long rb_start
, unsigned long rb_len
)
2757 unsigned long gma_head
, gma_tail
, gma_bottom
;
2759 struct intel_vgpu
*vgpu
= s
->vgpu
;
2761 gma_head
= rb_start
+ rb_head
;
2762 gma_tail
= rb_start
+ rb_tail
;
2763 gma_bottom
= rb_start
+ rb_len
;
2765 while (s
->ip_gma
!= gma_tail
) {
2766 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
2767 if (!(s
->ip_gma
>= rb_start
) ||
2768 !(s
->ip_gma
< gma_bottom
)) {
2769 gvt_vgpu_err("ip_gma %lx out of ring scope."
2770 "(base:0x%lx, bottom: 0x%lx)\n",
2771 s
->ip_gma
, rb_start
,
2773 parser_exec_state_dump(s
);
2776 if (gma_out_of_range(s
->ip_gma
, gma_head
, gma_tail
)) {
2777 gvt_vgpu_err("ip_gma %lx out of range."
2778 "base 0x%lx head 0x%lx tail 0x%lx\n",
2779 s
->ip_gma
, rb_start
,
2781 parser_exec_state_dump(s
);
2785 ret
= cmd_parser_exec(s
);
2787 gvt_vgpu_err("cmd parser error\n");
2788 parser_exec_state_dump(s
);
2796 static int scan_workload(struct intel_vgpu_workload
*workload
)
2798 unsigned long gma_head
, gma_tail
, gma_bottom
;
2799 struct parser_exec_state s
;
2802 /* ring base is page aligned */
2803 if (WARN_ON(!IS_ALIGNED(workload
->rb_start
, I915_GTT_PAGE_SIZE
)))
2806 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2807 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2808 gma_bottom
= workload
->rb_start
+ _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2810 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2811 s
.buf_addr_type
= GTT_BUFFER
;
2812 s
.vgpu
= workload
->vgpu
;
2813 s
.engine
= workload
->engine
;
2814 s
.ring_start
= workload
->rb_start
;
2815 s
.ring_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2816 s
.ring_head
= gma_head
;
2817 s
.ring_tail
= gma_tail
;
2818 s
.rb_va
= workload
->shadow_ring_buffer_va
;
2819 s
.workload
= workload
;
2820 s
.is_ctx_wa
= false;
2822 if (bypass_scan_mask
& workload
->engine
->mask
|| gma_head
== gma_tail
)
2825 ret
= ip_gma_set(&s
, gma_head
);
2829 ret
= command_scan(&s
, workload
->rb_head
, workload
->rb_tail
,
2830 workload
->rb_start
, _RING_CTL_BUF_SIZE(workload
->rb_ctl
));
2836 static int scan_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2839 unsigned long gma_head
, gma_tail
, gma_bottom
, ring_size
, ring_tail
;
2840 struct parser_exec_state s
;
2842 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
2843 struct intel_vgpu_workload
,
2846 /* ring base is page aligned */
2847 if (WARN_ON(!IS_ALIGNED(wa_ctx
->indirect_ctx
.guest_gma
,
2848 I915_GTT_PAGE_SIZE
)))
2851 ring_tail
= wa_ctx
->indirect_ctx
.size
+ 3 * sizeof(u32
);
2852 ring_size
= round_up(wa_ctx
->indirect_ctx
.size
+ CACHELINE_BYTES
,
2854 gma_head
= wa_ctx
->indirect_ctx
.guest_gma
;
2855 gma_tail
= wa_ctx
->indirect_ctx
.guest_gma
+ ring_tail
;
2856 gma_bottom
= wa_ctx
->indirect_ctx
.guest_gma
+ ring_size
;
2858 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2859 s
.buf_addr_type
= GTT_BUFFER
;
2860 s
.vgpu
= workload
->vgpu
;
2861 s
.engine
= workload
->engine
;
2862 s
.ring_start
= wa_ctx
->indirect_ctx
.guest_gma
;
2863 s
.ring_size
= ring_size
;
2864 s
.ring_head
= gma_head
;
2865 s
.ring_tail
= gma_tail
;
2866 s
.rb_va
= wa_ctx
->indirect_ctx
.shadow_va
;
2867 s
.workload
= workload
;
2870 ret
= ip_gma_set(&s
, gma_head
);
2874 ret
= command_scan(&s
, 0, ring_tail
,
2875 wa_ctx
->indirect_ctx
.guest_gma
, ring_size
);
2880 static int shadow_workload_ring_buffer(struct intel_vgpu_workload
*workload
)
2882 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2883 struct intel_vgpu_submission
*s
= &vgpu
->submission
;
2884 unsigned long gma_head
, gma_tail
, gma_top
, guest_rb_size
;
2885 void *shadow_ring_buffer_va
;
2888 guest_rb_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2890 /* calculate workload ring buffer size */
2891 workload
->rb_len
= (workload
->rb_tail
+ guest_rb_size
-
2892 workload
->rb_head
) % guest_rb_size
;
2894 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2895 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2896 gma_top
= workload
->rb_start
+ guest_rb_size
;
2898 if (workload
->rb_len
> s
->ring_scan_buffer_size
[workload
->engine
->id
]) {
2901 /* realloc the new ring buffer if needed */
2902 p
= krealloc(s
->ring_scan_buffer
[workload
->engine
->id
],
2903 workload
->rb_len
, GFP_KERNEL
);
2905 gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2908 s
->ring_scan_buffer
[workload
->engine
->id
] = p
;
2909 s
->ring_scan_buffer_size
[workload
->engine
->id
] = workload
->rb_len
;
2912 shadow_ring_buffer_va
= s
->ring_scan_buffer
[workload
->engine
->id
];
2914 /* get shadow ring buffer va */
2915 workload
->shadow_ring_buffer_va
= shadow_ring_buffer_va
;
2917 /* head > tail --> copy head <-> top */
2918 if (gma_head
> gma_tail
) {
2919 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
,
2920 gma_head
, gma_top
, shadow_ring_buffer_va
);
2922 gvt_vgpu_err("fail to copy guest ring buffer\n");
2925 shadow_ring_buffer_va
+= ret
;
2926 gma_head
= workload
->rb_start
;
2929 /* copy head or start <-> tail */
2930 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
, gma_head
, gma_tail
,
2931 shadow_ring_buffer_va
);
2933 gvt_vgpu_err("fail to copy guest ring buffer\n");
2939 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload
*workload
)
2942 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2944 ret
= shadow_workload_ring_buffer(workload
);
2946 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2950 ret
= scan_workload(workload
);
2952 gvt_vgpu_err("scan workload error\n");
2958 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2960 int ctx_size
= wa_ctx
->indirect_ctx
.size
;
2961 unsigned long guest_gma
= wa_ctx
->indirect_ctx
.guest_gma
;
2962 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
2963 struct intel_vgpu_workload
,
2965 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2966 struct drm_i915_gem_object
*obj
;
2970 obj
= i915_gem_object_create_shmem(workload
->engine
->i915
,
2971 roundup(ctx_size
+ CACHELINE_BYTES
,
2974 return PTR_ERR(obj
);
2976 /* get the va of the shadow batch buffer */
2977 map
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
2979 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2984 i915_gem_object_lock(obj
, NULL
);
2985 ret
= i915_gem_object_set_to_cpu_domain(obj
, false);
2986 i915_gem_object_unlock(obj
);
2988 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2992 ret
= copy_gma_to_hva(workload
->vgpu
,
2993 workload
->vgpu
->gtt
.ggtt_mm
,
2994 guest_gma
, guest_gma
+ ctx_size
,
2997 gvt_vgpu_err("fail to copy guest indirect ctx\n");
3001 wa_ctx
->indirect_ctx
.obj
= obj
;
3002 wa_ctx
->indirect_ctx
.shadow_va
= map
;
3006 i915_gem_object_unpin_map(obj
);
3008 i915_gem_object_put(obj
);
3012 static int combine_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
3014 u32 per_ctx_start
[CACHELINE_DWORDS
] = {0};
3015 unsigned char *bb_start_sva
;
3017 if (!wa_ctx
->per_ctx
.valid
)
3020 per_ctx_start
[0] = 0x18800001;
3021 per_ctx_start
[1] = wa_ctx
->per_ctx
.guest_gma
;
3023 bb_start_sva
= (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
3024 wa_ctx
->indirect_ctx
.size
;
3026 memcpy(bb_start_sva
, per_ctx_start
, CACHELINE_BYTES
);
3031 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
3034 struct intel_vgpu_workload
*workload
= container_of(wa_ctx
,
3035 struct intel_vgpu_workload
,
3037 struct intel_vgpu
*vgpu
= workload
->vgpu
;
3039 if (wa_ctx
->indirect_ctx
.size
== 0)
3042 ret
= shadow_indirect_ctx(wa_ctx
);
3044 gvt_vgpu_err("fail to shadow indirect ctx\n");
3048 combine_wa_ctx(wa_ctx
);
3050 ret
= scan_wa_ctx(wa_ctx
);
3052 gvt_vgpu_err("scan wa ctx error\n");
3059 static int init_cmd_table(struct intel_gvt
*gvt
)
3061 unsigned int gen_type
= intel_gvt_get_device_type(gvt
);
3064 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); i
++) {
3065 struct cmd_entry
*e
;
3067 if (!(cmd_info
[i
].devices
& gen_type
))
3070 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
3074 e
->info
= &cmd_info
[i
];
3075 if (cmd_info
[i
].opcode
== OP_MI_NOOP
)
3078 INIT_HLIST_NODE(&e
->hlist
);
3079 add_cmd_entry(gvt
, e
);
3080 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3081 e
->info
->name
, e
->info
->opcode
, e
->info
->flag
,
3082 e
->info
->devices
, e
->info
->rings
);
3088 static void clean_cmd_table(struct intel_gvt
*gvt
)
3090 struct hlist_node
*tmp
;
3091 struct cmd_entry
*e
;
3094 hash_for_each_safe(gvt
->cmd_table
, i
, tmp
, e
, hlist
)
3097 hash_init(gvt
->cmd_table
);
3100 void intel_gvt_clean_cmd_parser(struct intel_gvt
*gvt
)
3102 clean_cmd_table(gvt
);
3105 int intel_gvt_init_cmd_parser(struct intel_gvt
*gvt
)
3109 ret
= init_cmd_table(gvt
);
3111 intel_gvt_clean_cmd_parser(gvt
);