1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Ke Yu
25 *    Kevin Tian <kevin.tian@intel.com>
26 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27 *
28 * Contributors:
29 *    Min He <min.he@intel.com>
30 *    Ping Gao <ping.a.gao@intel.com>
31 *    Tina Zhang <tina.zhang@intel.com>
32 *    Yulei Zhang <yulei.zhang@intel.com>
33 *    Zhi Wang <zhi.a.wang@intel.com>
34 *
35 */
36
37#include <linux/slab.h>
38
39#include "i915_drv.h"
40#include "gt/intel_ring.h"
41#include "gvt.h"
42#include "i915_pvinfo.h"
43#include "trace.h"
44
45#define INVALID_OP    (~0U)
46
47#define OP_LEN_MI           9
48#define OP_LEN_2D           10
49#define OP_LEN_3D_MEDIA     16
50#define OP_LEN_MFX_VC       16
51#define OP_LEN_VEBOX	    16
52
53#define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
54
55struct sub_op_bits {
56	int hi;
57	int low;
58};
59struct decode_info {
60	const char *name;
61	int op_len;
62	int nr_sub_op;
63	const struct sub_op_bits *sub_op;
64};
65
66#define   MAX_CMD_BUDGET			0x7fffffff
67#define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
68#define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
69#define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
70
71#define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
72#define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
73#define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
74
75/* Render Command Map */
76
77/* MI_* command Opcode (28:23) */
78#define OP_MI_NOOP                          0x0
79#define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
80#define OP_MI_USER_INTERRUPT                0x2
81#define OP_MI_WAIT_FOR_EVENT                0x3
82#define OP_MI_FLUSH                         0x4
83#define OP_MI_ARB_CHECK                     0x5
84#define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
85#define OP_MI_REPORT_HEAD                   0x7
86#define OP_MI_ARB_ON_OFF                    0x8
87#define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
88#define OP_MI_BATCH_BUFFER_END              0xA
89#define OP_MI_SUSPEND_FLUSH                 0xB
90#define OP_MI_PREDICATE                     0xC  /* IVB+ */
91#define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
92#define OP_MI_SET_APPID                     0xE  /* IVB+ */
93#define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
94#define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
95#define OP_MI_DISPLAY_FLIP                  0x14
96#define OP_MI_SEMAPHORE_MBOX                0x16
97#define OP_MI_SET_CONTEXT                   0x18
98#define OP_MI_MATH                          0x1A
99#define OP_MI_URB_CLEAR                     0x19
100#define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
101#define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
102
103#define OP_MI_STORE_DATA_IMM                0x20
104#define OP_MI_STORE_DATA_INDEX              0x21
105#define OP_MI_LOAD_REGISTER_IMM             0x22
106#define OP_MI_UPDATE_GTT                    0x23
107#define OP_MI_STORE_REGISTER_MEM            0x24
108#define OP_MI_FLUSH_DW                      0x26
109#define OP_MI_CLFLUSH                       0x27
110#define OP_MI_REPORT_PERF_COUNT             0x28
111#define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
112#define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
113#define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
114#define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
115#define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
116#define OP_MI_2E			    0x2E  /* BDW+ */
117#define OP_MI_2F			    0x2F  /* BDW+ */
118#define OP_MI_BATCH_BUFFER_START            0x31
119
120/* Bit definition for dword 0 */
121#define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
122
123#define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
124
125#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
126#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
127#define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
128#define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
129
130/* 2D command: Opcode (28:22) */
131#define OP_2D(x)    ((2<<7) | x)
132
133#define OP_XY_SETUP_BLT                             OP_2D(0x1)
134#define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
135#define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
136#define OP_XY_PIXEL_BLT                             OP_2D(0x24)
137#define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
138#define OP_XY_TEXT_BLT                              OP_2D(0x26)
139#define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
140#define OP_XY_COLOR_BLT                             OP_2D(0x50)
141#define OP_XY_PAT_BLT                               OP_2D(0x51)
142#define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
143#define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
144#define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
145#define OP_XY_FULL_BLT                              OP_2D(0x55)
146#define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
147#define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
148#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
149#define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
150#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
151#define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
152#define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
153#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
154#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
155#define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
156#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
157
158/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
159#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
160	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
161
162#define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
163
164#define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
165#define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
166#define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
167#define OP_SWTESS_BASE_ADDRESS			OP_3D_MEDIA(0x0, 0x1, 0x03)
168
169#define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
170
171#define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
172
173#define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
174#define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
175#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
176#define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
177#define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
178#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
179
180#define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
181#define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
182#define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
183#define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
184
185#define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
186#define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
187#define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
188#define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
189#define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
190#define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
191#define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
192#define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
193#define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
194#define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
195#define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
196#define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
197#define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
198#define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
199#define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
200#define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
201#define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
202#define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
203#define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
204#define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
205#define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
206#define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
207#define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
208#define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
209#define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
210#define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
211#define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
212#define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
213#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
214#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
215#define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
216#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
217#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
218#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
219#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
220#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
221#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
222#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
223#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
224#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
225#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
226#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
227#define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
228#define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
229#define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
230#define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
231#define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
232#define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
233#define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
234#define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
235#define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
236#define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
237#define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
238#define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
239#define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
240#define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
241#define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
242#define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
243#define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
244#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
245#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
246#define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
247#define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
248#define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
249#define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
250#define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
251
252#define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
253#define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
254#define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
255#define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
256#define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
257#define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
258#define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
259#define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
260#define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
261#define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
262#define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
263
264#define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
265#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
266#define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
267#define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
268#define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
269#define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
270#define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
271#define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
272#define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
273#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
274#define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
275#define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
276#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
277#define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
278#define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
279#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
280#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
281#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
282#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
283#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
284#define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
285#define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
286#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
287#define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
288#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
289#define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
290#define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
291#define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
292
293/* VCCP Command Parser */
294
295/*
296 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
297 * git://anongit.freedesktop.org/vaapi/intel-driver
298 * src/i965_defines.h
299 *
300 */
301
302#define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
303	(3 << 13 | \
304	 (pipeline) << 11 | \
305	 (op) << 8 | \
306	 (sub_opa) << 5 | \
307	 (sub_opb))
308
309#define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
310#define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
311#define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
312#define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
313#define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
314#define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
315#define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
316#define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
317#define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
318#define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
319#define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
320
321#define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
322
323#define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
324#define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
325#define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
326#define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
327#define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
328#define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
329#define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
330#define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
331#define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
332#define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
333#define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
334#define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
335
336#define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
337#define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
338#define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
339#define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
340#define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
341
342#define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
343#define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
344#define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
345#define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
346#define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
347
348#define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
349#define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
350#define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
351
352#define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
353#define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
354#define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
355
356#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
357	(3 << 13 | \
358	 (pipeline) << 11 | \
359	 (op) << 8 | \
360	 (sub_opa) << 5 | \
361	 (sub_opb))
362
363#define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
364#define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
365#define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
366
367struct parser_exec_state;
368
369typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
370
371#define GVT_CMD_HASH_BITS   7
372
373/* which DWords need address fix */
374#define ADDR_FIX_1(x1)			(1 << (x1))
375#define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
376#define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
377#define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
378#define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
379
380#define DWORD_FIELD(dword, end, start) \
381	FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
382
383#define OP_LENGTH_BIAS 2
384#define CMD_LEN(value)  (value + OP_LENGTH_BIAS)
385
386static int gvt_check_valid_cmd_length(int len, int valid_len)
387{
388	if (valid_len != len) {
389		gvt_err("len is not valid:  len=%u  valid_len=%u\n",
390			len, valid_len);
391		return -EFAULT;
392	}
393	return 0;
394}
395
396struct cmd_info {
397	const char *name;
398	u32 opcode;
399
400#define F_LEN_MASK	3U
401#define F_LEN_CONST  1U
402#define F_LEN_VAR    0U
403/* value is const although LEN maybe variable */
404#define F_LEN_VAR_FIXED    (1<<1)
405
406/*
407 * command has its own ip advance logic
408 * e.g. MI_BATCH_START, MI_BATCH_END
409 */
410#define F_IP_ADVANCE_CUSTOM (1<<2)
411	u32 flag;
412
413#define R_RCS	BIT(RCS0)
414#define R_VCS1  BIT(VCS0)
415#define R_VCS2  BIT(VCS1)
416#define R_VCS	(R_VCS1 | R_VCS2)
417#define R_BCS	BIT(BCS0)
418#define R_VECS	BIT(VECS0)
419#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
420	/* rings that support this cmd: BLT/RCS/VCS/VECS */
421	u16 rings;
422
423	/* devices that support this cmd: SNB/IVB/HSW/... */
424	u16 devices;
425
426	/* which DWords are address that need fix up.
427	 * bit 0 means a 32-bit non address operand in command
428	 * bit 1 means address operand, which could be 32-bit
429	 * or 64-bit depending on different architectures.(
430	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
431	 * No matter the address length, each address only takes
432	 * one bit in the bitmap.
433	 */
434	u16 addr_bitmap;
435
436	/* flag == F_LEN_CONST : command length
437	 * flag == F_LEN_VAR : length bias bits
438	 * Note: length is in DWord
439	 */
440	u32 len;
441
442	parser_cmd_handler handler;
443
444	/* valid length in DWord */
445	u32 valid_len;
446};
447
448struct cmd_entry {
449	struct hlist_node hlist;
450	const struct cmd_info *info;
451};
452
453enum {
454	RING_BUFFER_INSTRUCTION,
455	BATCH_BUFFER_INSTRUCTION,
456	BATCH_BUFFER_2ND_LEVEL,
457};
458
459enum {
460	GTT_BUFFER,
461	PPGTT_BUFFER
462};
463
464struct parser_exec_state {
465	struct intel_vgpu *vgpu;
466	const struct intel_engine_cs *engine;
467
468	int buf_type;
469
470	/* batch buffer address type */
471	int buf_addr_type;
472
473	/* graphics memory address of ring buffer start */
474	unsigned long ring_start;
475	unsigned long ring_size;
476	unsigned long ring_head;
477	unsigned long ring_tail;
478
479	/* instruction graphics memory address */
480	unsigned long ip_gma;
481
482	/* mapped va of the instr_gma */
483	void *ip_va;
484	void *rb_va;
485
486	void *ret_bb_va;
487	/* next instruction when return from  batch buffer to ring buffer */
488	unsigned long ret_ip_gma_ring;
489
490	/* next instruction when return from 2nd batch buffer to batch buffer */
491	unsigned long ret_ip_gma_bb;
492
493	/* batch buffer address type (GTT or PPGTT)
494	 * used when ret from 2nd level batch buffer
495	 */
496	int saved_buf_addr_type;
497	bool is_ctx_wa;
498
499	const struct cmd_info *info;
500
501	struct intel_vgpu_workload *workload;
502};
503
504#define gmadr_dw_number(s)	\
505	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
506
507static unsigned long bypass_scan_mask = 0;
508
509/* ring ALL, type = 0 */
510static const struct sub_op_bits sub_op_mi[] = {
511	{31, 29},
512	{28, 23},
513};
514
515static const struct decode_info decode_info_mi = {
516	"MI",
517	OP_LEN_MI,
518	ARRAY_SIZE(sub_op_mi),
519	sub_op_mi,
520};
521
522/* ring RCS, command type 2 */
523static const struct sub_op_bits sub_op_2d[] = {
524	{31, 29},
525	{28, 22},
526};
527
528static const struct decode_info decode_info_2d = {
529	"2D",
530	OP_LEN_2D,
531	ARRAY_SIZE(sub_op_2d),
532	sub_op_2d,
533};
534
535/* ring RCS, command type 3 */
536static const struct sub_op_bits sub_op_3d_media[] = {
537	{31, 29},
538	{28, 27},
539	{26, 24},
540	{23, 16},
541};
542
543static const struct decode_info decode_info_3d_media = {
544	"3D_Media",
545	OP_LEN_3D_MEDIA,
546	ARRAY_SIZE(sub_op_3d_media),
547	sub_op_3d_media,
548};
549
550/* ring VCS, command type 3 */
551static const struct sub_op_bits sub_op_mfx_vc[] = {
552	{31, 29},
553	{28, 27},
554	{26, 24},
555	{23, 21},
556	{20, 16},
557};
558
559static const struct decode_info decode_info_mfx_vc = {
560	"MFX_VC",
561	OP_LEN_MFX_VC,
562	ARRAY_SIZE(sub_op_mfx_vc),
563	sub_op_mfx_vc,
564};
565
566/* ring VECS, command type 3 */
567static const struct sub_op_bits sub_op_vebox[] = {
568	{31, 29},
569	{28, 27},
570	{26, 24},
571	{23, 21},
572	{20, 16},
573};
574
575static const struct decode_info decode_info_vebox = {
576	"VEBOX",
577	OP_LEN_VEBOX,
578	ARRAY_SIZE(sub_op_vebox),
579	sub_op_vebox,
580};
581
582static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
583	[RCS0] = {
584		&decode_info_mi,
585		NULL,
586		NULL,
587		&decode_info_3d_media,
588		NULL,
589		NULL,
590		NULL,
591		NULL,
592	},
593
594	[VCS0] = {
595		&decode_info_mi,
596		NULL,
597		NULL,
598		&decode_info_mfx_vc,
599		NULL,
600		NULL,
601		NULL,
602		NULL,
603	},
604
605	[BCS0] = {
606		&decode_info_mi,
607		NULL,
608		&decode_info_2d,
609		NULL,
610		NULL,
611		NULL,
612		NULL,
613		NULL,
614	},
615
616	[VECS0] = {
617		&decode_info_mi,
618		NULL,
619		NULL,
620		&decode_info_vebox,
621		NULL,
622		NULL,
623		NULL,
624		NULL,
625	},
626
627	[VCS1] = {
628		&decode_info_mi,
629		NULL,
630		NULL,
631		&decode_info_mfx_vc,
632		NULL,
633		NULL,
634		NULL,
635		NULL,
636	},
637};
638
639static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
640{
641	const struct decode_info *d_info;
642
643	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
644	if (d_info == NULL)
645		return INVALID_OP;
646
647	return cmd >> (32 - d_info->op_len);
648}
649
650static inline const struct cmd_info *
651find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
652	       const struct intel_engine_cs *engine)
653{
654	struct cmd_entry *e;
655
656	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
657		if (opcode == e->info->opcode &&
658		    e->info->rings & engine->mask)
659			return e->info;
660	}
661	return NULL;
662}
663
664static inline const struct cmd_info *
665get_cmd_info(struct intel_gvt *gvt, u32 cmd,
666	     const struct intel_engine_cs *engine)
667{
668	u32 opcode;
669
670	opcode = get_opcode(cmd, engine);
671	if (opcode == INVALID_OP)
672		return NULL;
673
674	return find_cmd_entry(gvt, opcode, engine);
675}
676
677static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
678{
679	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
680}
681
682static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
683{
684	const struct decode_info *d_info;
685	int i;
686
687	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
688	if (d_info == NULL)
689		return;
690
691	gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
692			cmd >> (32 - d_info->op_len), d_info->name);
693
694	for (i = 0; i < d_info->nr_sub_op; i++)
695		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
696					d_info->sub_op[i].low));
697
698	pr_err("\n");
699}
700
701static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
702{
703	return s->ip_va + (index << 2);
704}
705
706static inline u32 cmd_val(struct parser_exec_state *s, int index)
707{
708	return *cmd_ptr(s, index);
709}
710
711static void parser_exec_state_dump(struct parser_exec_state *s)
712{
713	int cnt = 0;
714	int i;
715
716	gvt_dbg_cmd("  vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
717		    " ring_head(%08lx) ring_tail(%08lx)\n",
718		    s->vgpu->id, s->engine->name,
719		    s->ring_start, s->ring_start + s->ring_size,
720		    s->ring_head, s->ring_tail);
721
722	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
723			s->buf_type == RING_BUFFER_INSTRUCTION ?
724			"RING_BUFFER" : "BATCH_BUFFER",
725			s->buf_addr_type == GTT_BUFFER ?
726			"GTT" : "PPGTT", s->ip_gma);
727
728	if (s->ip_va == NULL) {
729		gvt_dbg_cmd(" ip_va(NULL)");
730		return;
731	}
732
733	gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
734			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
735			cmd_val(s, 2), cmd_val(s, 3));
736
737	print_opcode(cmd_val(s, 0), s->engine);
738
739	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
740
741	while (cnt < 1024) {
742		gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
743		for (i = 0; i < 8; i++)
744			gvt_dbg_cmd("%08x ", cmd_val(s, i));
745		gvt_dbg_cmd("\n");
746
747		s->ip_va += 8 * sizeof(u32);
748		cnt += 8;
749	}
750}
751
752static inline void update_ip_va(struct parser_exec_state *s)
753{
754	unsigned long len = 0;
755
756	if (WARN_ON(s->ring_head == s->ring_tail))
757		return;
758
759	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
760		unsigned long ring_top = s->ring_start + s->ring_size;
761
762		if (s->ring_head > s->ring_tail) {
763			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
764				len = (s->ip_gma - s->ring_head);
765			else if (s->ip_gma >= s->ring_start &&
766					s->ip_gma <= s->ring_tail)
767				len = (ring_top - s->ring_head) +
768					(s->ip_gma - s->ring_start);
769		} else
770			len = (s->ip_gma - s->ring_head);
771
772		s->ip_va = s->rb_va + len;
773	} else {/* shadow batch buffer */
774		s->ip_va = s->ret_bb_va;
775	}
776}
777
778static inline int ip_gma_set(struct parser_exec_state *s,
779		unsigned long ip_gma)
780{
781	WARN_ON(!IS_ALIGNED(ip_gma, 4));
782
783	s->ip_gma = ip_gma;
784	update_ip_va(s);
785	return 0;
786}
787
788static inline int ip_gma_advance(struct parser_exec_state *s,
789		unsigned int dw_len)
790{
791	s->ip_gma += (dw_len << 2);
792
793	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
794		if (s->ip_gma >= s->ring_start + s->ring_size)
795			s->ip_gma -= s->ring_size;
796		update_ip_va(s);
797	} else {
798		s->ip_va += (dw_len << 2);
799	}
800
801	return 0;
802}
803
804static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
805{
806	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
807		return info->len;
808	else
809		return (cmd & ((1U << info->len) - 1)) + 2;
810	return 0;
811}
812
813static inline int cmd_length(struct parser_exec_state *s)
814{
815	return get_cmd_length(s->info, cmd_val(s, 0));
816}
817
818/* do not remove this, some platform may need clflush here */
819#define patch_value(s, addr, val) do { \
820	*addr = val; \
821} while (0)
822
823static bool is_shadowed_mmio(unsigned int offset)
824{
825	bool ret = false;
826
827	if ((offset == 0x2168) || /*BB current head register UDW */
828	    (offset == 0x2140) || /*BB current header register */
829	    (offset == 0x211c) || /*second BB header register UDW */
830	    (offset == 0x2114)) { /*second BB header register UDW */
831		ret = true;
832	}
833	return ret;
834}
835
836static inline bool is_force_nonpriv_mmio(unsigned int offset)
837{
838	return (offset >= 0x24d0 && offset < 0x2500);
839}
840
841static int force_nonpriv_reg_handler(struct parser_exec_state *s,
842		unsigned int offset, unsigned int index, char *cmd)
843{
844	struct intel_gvt *gvt = s->vgpu->gvt;
845	unsigned int data;
846	u32 ring_base;
847	u32 nopid;
848
849	if (!strcmp(cmd, "lri"))
850		data = cmd_val(s, index + 1);
851	else {
852		gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
853			offset, cmd);
854		return -EINVAL;
855	}
856
857	ring_base = s->engine->mmio_base;
858	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
859
860	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
861			data != nopid) {
862		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
863			offset, data);
864		patch_value(s, cmd_ptr(s, index), nopid);
865		return 0;
866	}
867	return 0;
868}
869
870static inline bool is_mocs_mmio(unsigned int offset)
871{
872	return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
873		((offset >= 0xb020) && (offset <= 0xb0a0));
874}
875
876static int mocs_cmd_reg_handler(struct parser_exec_state *s,
877				unsigned int offset, unsigned int index)
878{
879	if (!is_mocs_mmio(offset))
880		return -EINVAL;
881	vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
882	return 0;
883}
884
885static int is_cmd_update_pdps(unsigned int offset,
886			      struct parser_exec_state *s)
887{
888	u32 base = s->workload->engine->mmio_base;
889	return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
890}
891
892static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
893				       unsigned int offset, unsigned int index)
894{
895	struct intel_vgpu *vgpu = s->vgpu;
896	struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
897	struct intel_vgpu_mm *mm;
898	u64 pdps[GEN8_3LVL_PDPES];
899
900	if (shadow_mm->ppgtt_mm.root_entry_type ==
901	    GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
902		pdps[0] = (u64)cmd_val(s, 2) << 32;
903		pdps[0] |= cmd_val(s, 4);
904
905		mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
906		if (!mm) {
907			gvt_vgpu_err("failed to get the 4-level shadow vm\n");
908			return -EINVAL;
909		}
910		intel_vgpu_mm_get(mm);
911		list_add_tail(&mm->ppgtt_mm.link,
912			      &s->workload->lri_shadow_mm);
913		*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
914		*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
915	} else {
916		/* Currently all guests use PML4 table and now can't
917		 * have a guest with 3-level table but uses LRI for
918		 * PPGTT update. So this is simply un-testable. */
919		GEM_BUG_ON(1);
920		gvt_vgpu_err("invalid shared shadow vm type\n");
921		return -EINVAL;
922	}
923	return 0;
924}
925
926static int cmd_reg_handler(struct parser_exec_state *s,
927	unsigned int offset, unsigned int index, char *cmd)
928{
929	struct intel_vgpu *vgpu = s->vgpu;
930	struct intel_gvt *gvt = vgpu->gvt;
931	u32 ctx_sr_ctl;
932
933	if (offset + 4 > gvt->device_info.mmio_size) {
934		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
935				cmd, offset);
936		return -EFAULT;
937	}
938
939	if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
940		gvt_vgpu_err("%s access to non-render register (%x)\n",
941				cmd, offset);
942		return -EBADRQC;
943	}
944
945	if (is_shadowed_mmio(offset)) {
946		gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
947		return 0;
948	}
949
950	if (is_mocs_mmio(offset) &&
951	    mocs_cmd_reg_handler(s, offset, index))
952		return -EINVAL;
953
954	if (is_force_nonpriv_mmio(offset) &&
955		force_nonpriv_reg_handler(s, offset, index, cmd))
956		return -EPERM;
957
958	if (offset == i915_mmio_reg_offset(DERRMR) ||
959		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
960		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
961		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
962	}
963
964	if (is_cmd_update_pdps(offset, s) &&
965	    cmd_pdp_mmio_update_handler(s, offset, index))
966		return -EINVAL;
967
968	/* TODO
969	 * In order to let workload with inhibit context to generate
970	 * correct image data into memory, vregs values will be loaded to
971	 * hw via LRIs in the workload with inhibit context. But as
972	 * indirect context is loaded prior to LRIs in workload, we don't
973	 * want reg values specified in indirect context overwritten by
974	 * LRIs in workloads. So, when scanning an indirect context, we
975	 * update reg values in it into vregs, so LRIs in workload with
976	 * inhibit context will restore with correct values
977	 */
978	if (IS_GEN(s->engine->i915, 9) &&
979	    intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
980	    !strncmp(cmd, "lri", 3)) {
981		intel_gvt_hypervisor_read_gpa(s->vgpu,
982			s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
983		/* check inhibit context */
984		if (ctx_sr_ctl & 1) {
985			u32 data = cmd_val(s, index + 1);
986
987			if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
988				intel_vgpu_mask_mmio_write(vgpu,
989							offset, &data, 4);
990			else
991				vgpu_vreg(vgpu, offset) = data;
992		}
993	}
994
995	return 0;
996}
997
998#define cmd_reg(s, i) \
999	(cmd_val(s, i) & GENMASK(22, 2))
1000
1001#define cmd_reg_inhibit(s, i) \
1002	(cmd_val(s, i) & GENMASK(22, 18))
1003
1004#define cmd_gma(s, i) \
1005	(cmd_val(s, i) & GENMASK(31, 2))
1006
1007#define cmd_gma_hi(s, i) \
1008	(cmd_val(s, i) & GENMASK(15, 0))
1009
1010static int cmd_handler_lri(struct parser_exec_state *s)
1011{
1012	int i, ret = 0;
1013	int cmd_len = cmd_length(s);
1014
1015	for (i = 1; i < cmd_len; i += 2) {
1016		if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
1017			if (s->engine->id == BCS0 &&
1018			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
1019				ret |= 0;
1020			else
1021				ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
1022		}
1023		if (ret)
1024			break;
1025		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
1026		if (ret)
1027			break;
1028	}
1029	return ret;
1030}
1031
1032static int cmd_handler_lrr(struct parser_exec_state *s)
1033{
1034	int i, ret = 0;
1035	int cmd_len = cmd_length(s);
1036
1037	for (i = 1; i < cmd_len; i += 2) {
1038		if (IS_BROADWELL(s->engine->i915))
1039			ret |= ((cmd_reg_inhibit(s, i) ||
1040				 (cmd_reg_inhibit(s, i + 1)))) ?
1041				-EBADRQC : 0;
1042		if (ret)
1043			break;
1044		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
1045		if (ret)
1046			break;
1047		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
1048		if (ret)
1049			break;
1050	}
1051	return ret;
1052}
1053
1054static inline int cmd_address_audit(struct parser_exec_state *s,
1055		unsigned long guest_gma, int op_size, bool index_mode);
1056
1057static int cmd_handler_lrm(struct parser_exec_state *s)
1058{
1059	struct intel_gvt *gvt = s->vgpu->gvt;
1060	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
1061	unsigned long gma;
1062	int i, ret = 0;
1063	int cmd_len = cmd_length(s);
1064
1065	for (i = 1; i < cmd_len;) {
1066		if (IS_BROADWELL(s->engine->i915))
1067			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
1068		if (ret)
1069			break;
1070		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
1071		if (ret)
1072			break;
1073		if (cmd_val(s, 0) & (1 << 22)) {
1074			gma = cmd_gma(s, i + 1);
1075			if (gmadr_bytes == 8)
1076				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1077			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1078			if (ret)
1079				break;
1080		}
1081		i += gmadr_dw_number(s) + 1;
1082	}
1083	return ret;
1084}
1085
1086static int cmd_handler_srm(struct parser_exec_state *s)
1087{
1088	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1089	unsigned long gma;
1090	int i, ret = 0;
1091	int cmd_len = cmd_length(s);
1092
1093	for (i = 1; i < cmd_len;) {
1094		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
1095		if (ret)
1096			break;
1097		if (cmd_val(s, 0) & (1 << 22)) {
1098			gma = cmd_gma(s, i + 1);
1099			if (gmadr_bytes == 8)
1100				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1101			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1102			if (ret)
1103				break;
1104		}
1105		i += gmadr_dw_number(s) + 1;
1106	}
1107	return ret;
1108}
1109
1110struct cmd_interrupt_event {
1111	int pipe_control_notify;
1112	int mi_flush_dw;
1113	int mi_user_interrupt;
1114};
1115
1116static struct cmd_interrupt_event cmd_interrupt_events[] = {
1117	[RCS0] = {
1118		.pipe_control_notify = RCS_PIPE_CONTROL,
1119		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
1120		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
1121	},
1122	[BCS0] = {
1123		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1124		.mi_flush_dw = BCS_MI_FLUSH_DW,
1125		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1126	},
1127	[VCS0] = {
1128		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1129		.mi_flush_dw = VCS_MI_FLUSH_DW,
1130		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1131	},
1132	[VCS1] = {
1133		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1134		.mi_flush_dw = VCS2_MI_FLUSH_DW,
1135		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1136	},
1137	[VECS0] = {
1138		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1139		.mi_flush_dw = VECS_MI_FLUSH_DW,
1140		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1141	},
1142};
1143
1144static int cmd_handler_pipe_control(struct parser_exec_state *s)
1145{
1146	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1147	unsigned long gma;
1148	bool index_mode = false;
1149	unsigned int post_sync;
1150	int ret = 0;
1151	u32 hws_pga, val;
1152
1153	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1154
1155	/* LRI post sync */
1156	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1157		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1158	/* post sync */
1159	else if (post_sync) {
1160		if (post_sync == 2)
1161			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1162		else if (post_sync == 3)
1163			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1164		else if (post_sync == 1) {
1165			/* check ggtt*/
1166			if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1167				gma = cmd_val(s, 2) & GENMASK(31, 3);
1168				if (gmadr_bytes == 8)
1169					gma |= (cmd_gma_hi(s, 3)) << 32;
1170				/* Store Data Index */
1171				if (cmd_val(s, 1) & (1 << 21))
1172					index_mode = true;
1173				ret |= cmd_address_audit(s, gma, sizeof(u64),
1174						index_mode);
1175				if (ret)
1176					return ret;
1177				if (index_mode) {
1178					hws_pga = s->vgpu->hws_pga[s->engine->id];
1179					gma = hws_pga + gma;
1180					patch_value(s, cmd_ptr(s, 2), gma);
1181					val = cmd_val(s, 1) & (~(1 << 21));
1182					patch_value(s, cmd_ptr(s, 1), val);
1183				}
1184			}
1185		}
1186	}
1187
1188	if (ret)
1189		return ret;
1190
1191	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1192		set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
1193			s->workload->pending_events);
1194	return 0;
1195}
1196
1197static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1198{
1199	set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
1200		s->workload->pending_events);
1201	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1202	return 0;
1203}
1204
1205static int cmd_advance_default(struct parser_exec_state *s)
1206{
1207	return ip_gma_advance(s, cmd_length(s));
1208}
1209
1210static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1211{
1212	int ret;
1213
1214	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1215		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1216		ret = ip_gma_set(s, s->ret_ip_gma_bb);
1217		s->buf_addr_type = s->saved_buf_addr_type;
1218	} else {
1219		s->buf_type = RING_BUFFER_INSTRUCTION;
1220		s->buf_addr_type = GTT_BUFFER;
1221		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1222			s->ret_ip_gma_ring -= s->ring_size;
1223		ret = ip_gma_set(s, s->ret_ip_gma_ring);
1224	}
1225	return ret;
1226}
1227
1228struct mi_display_flip_command_info {
1229	int pipe;
1230	int plane;
1231	int event;
1232	i915_reg_t stride_reg;
1233	i915_reg_t ctrl_reg;
1234	i915_reg_t surf_reg;
1235	u64 stride_val;
1236	u64 tile_val;
1237	u64 surf_val;
1238	bool async_flip;
1239};
1240
1241struct plane_code_mapping {
1242	int pipe;
1243	int plane;
1244	int event;
1245};
1246
1247static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1248		struct mi_display_flip_command_info *info)
1249{
1250	struct drm_i915_private *dev_priv = s->engine->i915;
1251	struct plane_code_mapping gen8_plane_code[] = {
1252		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1253		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1254		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1255		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1256		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1257		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1258	};
1259	u32 dword0, dword1, dword2;
1260	u32 v;
1261
1262	dword0 = cmd_val(s, 0);
1263	dword1 = cmd_val(s, 1);
1264	dword2 = cmd_val(s, 2);
1265
1266	v = (dword0 & GENMASK(21, 19)) >> 19;
1267	if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
1268		return -EBADRQC;
1269
1270	info->pipe = gen8_plane_code[v].pipe;
1271	info->plane = gen8_plane_code[v].plane;
1272	info->event = gen8_plane_code[v].event;
1273	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1274	info->tile_val = (dword1 & 0x1);
1275	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1276	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1277
1278	if (info->plane == PLANE_A) {
1279		info->ctrl_reg = DSPCNTR(info->pipe);
1280		info->stride_reg = DSPSTRIDE(info->pipe);
1281		info->surf_reg = DSPSURF(info->pipe);
1282	} else if (info->plane == PLANE_B) {
1283		info->ctrl_reg = SPRCTL(info->pipe);
1284		info->stride_reg = SPRSTRIDE(info->pipe);
1285		info->surf_reg = SPRSURF(info->pipe);
1286	} else {
1287		drm_WARN_ON(&dev_priv->drm, 1);
1288		return -EBADRQC;
1289	}
1290	return 0;
1291}
1292
1293static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1294		struct mi_display_flip_command_info *info)
1295{
1296	struct drm_i915_private *dev_priv = s->engine->i915;
1297	struct intel_vgpu *vgpu = s->vgpu;
1298	u32 dword0 = cmd_val(s, 0);
1299	u32 dword1 = cmd_val(s, 1);
1300	u32 dword2 = cmd_val(s, 2);
1301	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1302
1303	info->plane = PRIMARY_PLANE;
1304
1305	switch (plane) {
1306	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1307		info->pipe = PIPE_A;
1308		info->event = PRIMARY_A_FLIP_DONE;
1309		break;
1310	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1311		info->pipe = PIPE_B;
1312		info->event = PRIMARY_B_FLIP_DONE;
1313		break;
1314	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1315		info->pipe = PIPE_C;
1316		info->event = PRIMARY_C_FLIP_DONE;
1317		break;
1318
1319	case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1320		info->pipe = PIPE_A;
1321		info->event = SPRITE_A_FLIP_DONE;
1322		info->plane = SPRITE_PLANE;
1323		break;
1324	case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1325		info->pipe = PIPE_B;
1326		info->event = SPRITE_B_FLIP_DONE;
1327		info->plane = SPRITE_PLANE;
1328		break;
1329	case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1330		info->pipe = PIPE_C;
1331		info->event = SPRITE_C_FLIP_DONE;
1332		info->plane = SPRITE_PLANE;
1333		break;
1334
1335	default:
1336		gvt_vgpu_err("unknown plane code %d\n", plane);
1337		return -EBADRQC;
1338	}
1339
1340	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1341	info->tile_val = (dword1 & GENMASK(2, 0));
1342	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1343	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1344
1345	info->ctrl_reg = DSPCNTR(info->pipe);
1346	info->stride_reg = DSPSTRIDE(info->pipe);
1347	info->surf_reg = DSPSURF(info->pipe);
1348
1349	return 0;
1350}
1351
1352static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1353		struct mi_display_flip_command_info *info)
1354{
1355	u32 stride, tile;
1356
1357	if (!info->async_flip)
1358		return 0;
1359
1360	if (INTEL_GEN(s->engine->i915) >= 9) {
1361		stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1362		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
1363				GENMASK(12, 10)) >> 10;
1364	} else {
1365		stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
1366				GENMASK(15, 6)) >> 6;
1367		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1368	}
1369
1370	if (stride != info->stride_val)
1371		gvt_dbg_cmd("cannot change stride during async flip\n");
1372
1373	if (tile != info->tile_val)
1374		gvt_dbg_cmd("cannot change tile during async flip\n");
1375
1376	return 0;
1377}
1378
1379static int gen8_update_plane_mmio_from_mi_display_flip(
1380		struct parser_exec_state *s,
1381		struct mi_display_flip_command_info *info)
1382{
1383	struct drm_i915_private *dev_priv = s->engine->i915;
1384	struct intel_vgpu *vgpu = s->vgpu;
1385
1386	set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
1387		      info->surf_val << 12);
1388	if (INTEL_GEN(dev_priv) >= 9) {
1389		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
1390			      info->stride_val);
1391		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
1392			      info->tile_val << 10);
1393	} else {
1394		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
1395			      info->stride_val << 6);
1396		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
1397			      info->tile_val << 10);
1398	}
1399
1400	if (info->plane == PLANE_PRIMARY)
1401		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
1402
1403	if (info->async_flip)
1404		intel_vgpu_trigger_virtual_event(vgpu, info->event);
1405	else
1406		set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
1407
1408	return 0;
1409}
1410
1411static int decode_mi_display_flip(struct parser_exec_state *s,
1412		struct mi_display_flip_command_info *info)
1413{
1414	if (IS_BROADWELL(s->engine->i915))
1415		return gen8_decode_mi_display_flip(s, info);
1416	if (INTEL_GEN(s->engine->i915) >= 9)
1417		return skl_decode_mi_display_flip(s, info);
1418
1419	return -ENODEV;
1420}
1421
1422static int check_mi_display_flip(struct parser_exec_state *s,
1423		struct mi_display_flip_command_info *info)
1424{
1425	return gen8_check_mi_display_flip(s, info);
1426}
1427
1428static int update_plane_mmio_from_mi_display_flip(
1429		struct parser_exec_state *s,
1430		struct mi_display_flip_command_info *info)
1431{
1432	return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1433}
1434
1435static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1436{
1437	struct mi_display_flip_command_info info;
1438	struct intel_vgpu *vgpu = s->vgpu;
1439	int ret;
1440	int i;
1441	int len = cmd_length(s);
1442	u32 valid_len = CMD_LEN(1);
1443
1444	/* Flip Type == Stereo 3D Flip */
1445	if (DWORD_FIELD(2, 1, 0) == 2)
1446		valid_len++;
1447	ret = gvt_check_valid_cmd_length(cmd_length(s),
1448			valid_len);
1449	if (ret)
1450		return ret;
1451
1452	ret = decode_mi_display_flip(s, &info);
1453	if (ret) {
1454		gvt_vgpu_err("fail to decode MI display flip command\n");
1455		return ret;
1456	}
1457
1458	ret = check_mi_display_flip(s, &info);
1459	if (ret) {
1460		gvt_vgpu_err("invalid MI display flip command\n");
1461		return ret;
1462	}
1463
1464	ret = update_plane_mmio_from_mi_display_flip(s, &info);
1465	if (ret) {
1466		gvt_vgpu_err("fail to update plane mmio\n");
1467		return ret;
1468	}
1469
1470	for (i = 0; i < len; i++)
1471		patch_value(s, cmd_ptr(s, i), MI_NOOP);
1472	return 0;
1473}
1474
1475static bool is_wait_for_flip_pending(u32 cmd)
1476{
1477	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1478			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1479			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1480			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1481			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1482			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1483}
1484
1485static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1486{
1487	u32 cmd = cmd_val(s, 0);
1488
1489	if (!is_wait_for_flip_pending(cmd))
1490		return 0;
1491
1492	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1493	return 0;
1494}
1495
1496static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1497{
1498	unsigned long addr;
1499	unsigned long gma_high, gma_low;
1500	struct intel_vgpu *vgpu = s->vgpu;
1501	int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1502
1503	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1504		gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1505		return INTEL_GVT_INVALID_ADDR;
1506	}
1507
1508	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1509	if (gmadr_bytes == 4) {
1510		addr = gma_low;
1511	} else {
1512		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1513		addr = (((unsigned long)gma_high) << 32) | gma_low;
1514	}
1515	return addr;
1516}
1517
1518static inline int cmd_address_audit(struct parser_exec_state *s,
1519		unsigned long guest_gma, int op_size, bool index_mode)
1520{
1521	struct intel_vgpu *vgpu = s->vgpu;
1522	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1523	int i;
1524	int ret;
1525
1526	if (op_size > max_surface_size) {
1527		gvt_vgpu_err("command address audit fail name %s\n",
1528			s->info->name);
1529		return -EFAULT;
1530	}
1531
1532	if (index_mode)	{
1533		if (guest_gma >= I915_GTT_PAGE_SIZE) {
1534			ret = -EFAULT;
1535			goto err;
1536		}
1537	} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1538		ret = -EFAULT;
1539		goto err;
1540	}
1541
1542	return 0;
1543
1544err:
1545	gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1546			s->info->name, guest_gma, op_size);
1547
1548	pr_err("cmd dump: ");
1549	for (i = 0; i < cmd_length(s); i++) {
1550		if (!(i % 4))
1551			pr_err("\n%08x ", cmd_val(s, i));
1552		else
1553			pr_err("%08x ", cmd_val(s, i));
1554	}
1555	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1556			vgpu->id,
1557			vgpu_aperture_gmadr_base(vgpu),
1558			vgpu_aperture_gmadr_end(vgpu),
1559			vgpu_hidden_gmadr_base(vgpu),
1560			vgpu_hidden_gmadr_end(vgpu));
1561	return ret;
1562}
1563
1564static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1565{
1566	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1567	int op_size = (cmd_length(s) - 3) * sizeof(u32);
1568	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1569	unsigned long gma, gma_low, gma_high;
1570	u32 valid_len = CMD_LEN(2);
1571	int ret = 0;
1572
1573	/* check ppggt */
1574	if (!(cmd_val(s, 0) & (1 << 22)))
1575		return 0;
1576
1577	/* check if QWORD */
1578	if (DWORD_FIELD(0, 21, 21))
1579		valid_len++;
1580	ret = gvt_check_valid_cmd_length(cmd_length(s),
1581			valid_len);
1582	if (ret)
1583		return ret;
1584
1585	gma = cmd_val(s, 2) & GENMASK(31, 2);
1586
1587	if (gmadr_bytes == 8) {
1588		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1589		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1590		gma = (gma_high << 32) | gma_low;
1591		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1592	}
1593	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1594	return ret;
1595}
1596
1597static inline int unexpected_cmd(struct parser_exec_state *s)
1598{
1599	struct intel_vgpu *vgpu = s->vgpu;
1600
1601	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1602
1603	return -EBADRQC;
1604}
1605
1606static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1607{
1608	return unexpected_cmd(s);
1609}
1610
1611static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1612{
1613	return unexpected_cmd(s);
1614}
1615
1616static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1617{
1618	return unexpected_cmd(s);
1619}
1620
1621static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1622{
1623	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1624	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1625			sizeof(u32);
1626	unsigned long gma, gma_high;
1627	u32 valid_len = CMD_LEN(1);
1628	int ret = 0;
1629
1630	if (!(cmd_val(s, 0) & (1 << 22)))
1631		return ret;
1632
1633	/* check inline data */
1634	if (cmd_val(s, 0) & BIT(18))
1635		valid_len = CMD_LEN(9);
1636	ret = gvt_check_valid_cmd_length(cmd_length(s),
1637			valid_len);
1638	if (ret)
1639		return ret;
1640
1641	gma = cmd_val(s, 1) & GENMASK(31, 2);
1642	if (gmadr_bytes == 8) {
1643		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1644		gma = (gma_high << 32) | gma;
1645	}
1646	ret = cmd_address_audit(s, gma, op_size, false);
1647	return ret;
1648}
1649
1650static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1651{
1652	return unexpected_cmd(s);
1653}
1654
1655static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1656{
1657	return unexpected_cmd(s);
1658}
1659
1660static int cmd_handler_mi_conditional_batch_buffer_end(
1661		struct parser_exec_state *s)
1662{
1663	return unexpected_cmd(s);
1664}
1665
1666static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1667{
1668	return unexpected_cmd(s);
1669}
1670
1671static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1672{
1673	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1674	unsigned long gma;
1675	bool index_mode = false;
1676	int ret = 0;
1677	u32 hws_pga, val;
1678	u32 valid_len = CMD_LEN(2);
1679
1680	ret = gvt_check_valid_cmd_length(cmd_length(s),
1681			valid_len);
1682	if (ret) {
1683		/* Check again for Qword */
1684		ret = gvt_check_valid_cmd_length(cmd_length(s),
1685			++valid_len);
1686		return ret;
1687	}
1688
1689	/* Check post-sync and ppgtt bit */
1690	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1691		gma = cmd_val(s, 1) & GENMASK(31, 3);
1692		if (gmadr_bytes == 8)
1693			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1694		/* Store Data Index */
1695		if (cmd_val(s, 0) & (1 << 21))
1696			index_mode = true;
1697		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1698		if (ret)
1699			return ret;
1700		if (index_mode) {
1701			hws_pga = s->vgpu->hws_pga[s->engine->id];
1702			gma = hws_pga + gma;
1703			patch_value(s, cmd_ptr(s, 1), gma);
1704			val = cmd_val(s, 0) & (~(1 << 21));
1705			patch_value(s, cmd_ptr(s, 0), val);
1706		}
1707	}
1708	/* Check notify bit */
1709	if ((cmd_val(s, 0) & (1 << 8)))
1710		set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
1711			s->workload->pending_events);
1712	return ret;
1713}
1714
1715static void addr_type_update_snb(struct parser_exec_state *s)
1716{
1717	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1718			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1719		s->buf_addr_type = PPGTT_BUFFER;
1720	}
1721}
1722
1723
1724static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1725		unsigned long gma, unsigned long end_gma, void *va)
1726{
1727	unsigned long copy_len, offset;
1728	unsigned long len = 0;
1729	unsigned long gpa;
1730
1731	while (gma != end_gma) {
1732		gpa = intel_vgpu_gma_to_gpa(mm, gma);
1733		if (gpa == INTEL_GVT_INVALID_ADDR) {
1734			gvt_vgpu_err("invalid gma address: %lx\n", gma);
1735			return -EFAULT;
1736		}
1737
1738		offset = gma & (I915_GTT_PAGE_SIZE - 1);
1739
1740		copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1741			I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1742
1743		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1744
1745		len += copy_len;
1746		gma += copy_len;
1747	}
1748	return len;
1749}
1750
1751
1752/*
1753 * Check whether a batch buffer needs to be scanned. Currently
1754 * the only criteria is based on privilege.
1755 */
1756static int batch_buffer_needs_scan(struct parser_exec_state *s)
1757{
1758	/* Decide privilege based on address space */
1759	if (cmd_val(s, 0) & BIT(8) &&
1760	    !(s->vgpu->scan_nonprivbb & s->engine->mask))
1761		return 0;
1762
1763	return 1;
1764}
1765
1766static const char *repr_addr_type(unsigned int type)
1767{
1768	return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
1769}
1770
1771static int find_bb_size(struct parser_exec_state *s,
1772			unsigned long *bb_size,
1773			unsigned long *bb_end_cmd_offset)
1774{
1775	unsigned long gma = 0;
1776	const struct cmd_info *info;
1777	u32 cmd_len = 0;
1778	bool bb_end = false;
1779	struct intel_vgpu *vgpu = s->vgpu;
1780	u32 cmd;
1781	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1782		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1783
1784	*bb_size = 0;
1785	*bb_end_cmd_offset = 0;
1786
1787	/* get the start gm address of the batch buffer */
1788	gma = get_gma_bb_from_cmd(s, 1);
1789	if (gma == INTEL_GVT_INVALID_ADDR)
1790		return -EFAULT;
1791
1792	cmd = cmd_val(s, 0);
1793	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1794	if (info == NULL) {
1795		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1796			     cmd, get_opcode(cmd, s->engine),
1797			     repr_addr_type(s->buf_addr_type),
1798			     s->engine->name, s->workload);
1799		return -EBADRQC;
1800	}
1801	do {
1802		if (copy_gma_to_hva(s->vgpu, mm,
1803				    gma, gma + 4, &cmd) < 0)
1804			return -EFAULT;
1805		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1806		if (info == NULL) {
1807			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1808				     cmd, get_opcode(cmd, s->engine),
1809				     repr_addr_type(s->buf_addr_type),
1810				     s->engine->name, s->workload);
1811			return -EBADRQC;
1812		}
1813
1814		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1815			bb_end = true;
1816		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1817			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1818				/* chained batch buffer */
1819				bb_end = true;
1820		}
1821
1822		if (bb_end)
1823			*bb_end_cmd_offset = *bb_size;
1824
1825		cmd_len = get_cmd_length(info, cmd) << 2;
1826		*bb_size += cmd_len;
1827		gma += cmd_len;
1828	} while (!bb_end);
1829
1830	return 0;
1831}
1832
1833static int audit_bb_end(struct parser_exec_state *s, void *va)
1834{
1835	struct intel_vgpu *vgpu = s->vgpu;
1836	u32 cmd = *(u32 *)va;
1837	const struct cmd_info *info;
1838
1839	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1840	if (info == NULL) {
1841		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1842			     cmd, get_opcode(cmd, s->engine),
1843			     repr_addr_type(s->buf_addr_type),
1844			     s->engine->name, s->workload);
1845		return -EBADRQC;
1846	}
1847
1848	if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
1849	    ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
1850	     (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
1851		return 0;
1852
1853	return -EBADRQC;
1854}
1855
1856static int perform_bb_shadow(struct parser_exec_state *s)
1857{
1858	struct intel_vgpu *vgpu = s->vgpu;
1859	struct intel_vgpu_shadow_bb *bb;
1860	unsigned long gma = 0;
1861	unsigned long bb_size;
1862	unsigned long bb_end_cmd_offset;
1863	int ret = 0;
1864	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1865		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1866	unsigned long start_offset = 0;
1867
1868	/* get the start gm address of the batch buffer */
1869	gma = get_gma_bb_from_cmd(s, 1);
1870	if (gma == INTEL_GVT_INVALID_ADDR)
1871		return -EFAULT;
1872
1873	ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
1874	if (ret)
1875		return ret;
1876
1877	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1878	if (!bb)
1879		return -ENOMEM;
1880
1881	bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
1882
1883	/* the start_offset stores the batch buffer's start gma's
1884	 * offset relative to page boundary. so for non-privileged batch
1885	 * buffer, the shadowed gem object holds exactly the same page
1886	 * layout as original gem object. This is for the convience of
1887	 * replacing the whole non-privilged batch buffer page to this
1888	 * shadowed one in PPGTT at the same gma address. (this replacing
1889	 * action is not implemented yet now, but may be necessary in
1890	 * future).
1891	 * for prileged batch buffer, we just change start gma address to
1892	 * that of shadowed page.
1893	 */
1894	if (bb->ppgtt)
1895		start_offset = gma & ~I915_GTT_PAGE_MASK;
1896
1897	bb->obj = i915_gem_object_create_shmem(s->engine->i915,
1898					       round_up(bb_size + start_offset,
1899							PAGE_SIZE));
1900	if (IS_ERR(bb->obj)) {
1901		ret = PTR_ERR(bb->obj);
1902		goto err_free_bb;
1903	}
1904
1905	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1906	if (IS_ERR(bb->va)) {
1907		ret = PTR_ERR(bb->va);
1908		goto err_free_obj;
1909	}
1910
1911	ret = copy_gma_to_hva(s->vgpu, mm,
1912			      gma, gma + bb_size,
1913			      bb->va + start_offset);
1914	if (ret < 0) {
1915		gvt_vgpu_err("fail to copy guest ring buffer\n");
1916		ret = -EFAULT;
1917		goto err_unmap;
1918	}
1919
1920	ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
1921	if (ret)
1922		goto err_unmap;
1923
1924	i915_gem_object_unlock(bb->obj);
1925	INIT_LIST_HEAD(&bb->list);
1926	list_add(&bb->list, &s->workload->shadow_bb);
1927
1928	bb->bb_start_cmd_va = s->ip_va;
1929
1930	if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1931		bb->bb_offset = s->ip_va - s->rb_va;
1932	else
1933		bb->bb_offset = 0;
1934
1935	/*
1936	 * ip_va saves the virtual address of the shadow batch buffer, while
1937	 * ip_gma saves the graphics address of the original batch buffer.
1938	 * As the shadow batch buffer is just a copy from the originial one,
1939	 * it should be right to use shadow batch buffer'va and original batch
1940	 * buffer's gma in pair. After all, we don't want to pin the shadow
1941	 * buffer here (too early).
1942	 */
1943	s->ip_va = bb->va + start_offset;
1944	s->ip_gma = gma;
1945	return 0;
1946err_unmap:
1947	i915_gem_object_unpin_map(bb->obj);
1948err_free_obj:
1949	i915_gem_object_put(bb->obj);
1950err_free_bb:
1951	kfree(bb);
1952	return ret;
1953}
1954
1955static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1956{
1957	bool second_level;
1958	int ret = 0;
1959	struct intel_vgpu *vgpu = s->vgpu;
1960
1961	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1962		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1963		return -EFAULT;
1964	}
1965
1966	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1967	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1968		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1969		return -EFAULT;
1970	}
1971
1972	s->saved_buf_addr_type = s->buf_addr_type;
1973	addr_type_update_snb(s);
1974	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1975		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1976		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1977	} else if (second_level) {
1978		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1979		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1980		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1981	}
1982
1983	if (batch_buffer_needs_scan(s)) {
1984		ret = perform_bb_shadow(s);
1985		if (ret < 0)
1986			gvt_vgpu_err("invalid shadow batch buffer\n");
1987	} else {
1988		/* emulate a batch buffer end to do return right */
1989		ret = cmd_handler_mi_batch_buffer_end(s);
1990		if (ret < 0)
1991			return ret;
1992	}
1993	return ret;
1994}
1995
1996static int mi_noop_index;
1997
1998static const struct cmd_info cmd_info[] = {
1999	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2000
2001	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
2002		0, 1, NULL},
2003
2004	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
2005		0, 1, cmd_handler_mi_user_interrupt},
2006
2007	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
2008		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
2009
2010	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2011
2012	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2013		NULL},
2014
2015	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2016		NULL},
2017
2018	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2019		NULL},
2020
2021	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2022		NULL},
2023
2024	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
2025		D_ALL, 0, 1, NULL},
2026
2027	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
2028		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2029		cmd_handler_mi_batch_buffer_end},
2030
2031	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
2032		0, 1, NULL},
2033
2034	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2035		NULL},
2036
2037	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
2038		D_ALL, 0, 1, NULL},
2039
2040	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2041		NULL},
2042
2043	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2044		NULL},
2045
2046	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
2047		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
2048
2049	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
2050		R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
2051
2052	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
2053
2054	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
2055		D_ALL, 0, 8, NULL, CMD_LEN(0)},
2056
2057	{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
2058		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
2059		NULL, CMD_LEN(0)},
2060
2061	{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
2062		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
2063		8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
2064
2065	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
2066		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
2067
2068	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
2069		0, 8, cmd_handler_mi_store_data_index},
2070
2071	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
2072		D_ALL, 0, 8, cmd_handler_lri},
2073
2074	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
2075		cmd_handler_mi_update_gtt},
2076
2077	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
2078		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2079		cmd_handler_srm, CMD_LEN(2)},
2080
2081	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
2082		cmd_handler_mi_flush_dw},
2083
2084	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
2085		10, cmd_handler_mi_clflush},
2086
2087	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
2088		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
2089		cmd_handler_mi_report_perf_count, CMD_LEN(2)},
2090
2091	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
2092		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2093		cmd_handler_lrm, CMD_LEN(2)},
2094
2095	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
2096		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
2097		cmd_handler_lrr, CMD_LEN(1)},
2098
2099	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
2100		F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
2101		8, NULL, CMD_LEN(2)},
2102
2103	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
2104		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
2105
2106	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
2107		ADDR_FIX_1(2), 8, NULL},
2108
2109	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
2110		ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
2111
2112	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
2113		8, cmd_handler_mi_op_2f},
2114
2115	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
2116		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
2117		cmd_handler_mi_batch_buffer_start},
2118
2119	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
2120		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2121		cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
2122
2123	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
2124		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
2125
2126	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2127		ADDR_FIX_2(4, 7), 8, NULL},
2128
2129	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2130		0, 8, NULL},
2131
2132	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
2133		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2134
2135	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2136
2137	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
2138		0, 8, NULL},
2139
2140	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2141		ADDR_FIX_1(3), 8, NULL},
2142
2143	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
2144		D_ALL, 0, 8, NULL},
2145
2146	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
2147		ADDR_FIX_1(4), 8, NULL},
2148
2149	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2150		ADDR_FIX_2(4, 5), 8, NULL},
2151
2152	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2153		ADDR_FIX_1(4), 8, NULL},
2154
2155	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
2156		ADDR_FIX_2(4, 7), 8, NULL},
2157
2158	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
2159		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2160
2161	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2162
2163	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
2164		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
2165
2166	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
2167		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2168
2169	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2170		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
2171		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2172
2173	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
2174		D_ALL, ADDR_FIX_1(4), 8, NULL},
2175
2176	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
2177		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2178
2179	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
2180		D_ALL, ADDR_FIX_1(4), 8, NULL},
2181
2182	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
2183		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2184
2185	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
2186		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2187
2188	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2189		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
2190		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2191
2192	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
2193		ADDR_FIX_2(4, 5), 8, NULL},
2194
2195	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
2196		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2197
2198	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2199		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2200		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2201
2202	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2203		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2204		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2205
2206	{"3DSTATE_BLEND_STATE_POINTERS",
2207		OP_3DSTATE_BLEND_STATE_POINTERS,
2208		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2209
2210	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2211		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
2212		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2213
2214	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
2215		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
2216		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2217
2218	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
2219		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
2220		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2221
2222	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
2223		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
2224		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2225
2226	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
2227		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
2228		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2229
2230	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
2231		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
2232		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2233
2234	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2235		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2236		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2237
2238	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2239		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2240		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2241
2242	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2243		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2244		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2245
2246	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2247		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2248		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2249
2250	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2251		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2252		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2253
2254	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2255		0, 8, NULL},
2256
2257	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2258		0, 8, NULL},
2259
2260	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2261		0, 8, NULL},
2262
2263	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2264		0, 8, NULL},
2265
2266	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2267		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2268
2269	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2270		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2271
2272	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2273		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2274
2275	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2276		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2277
2278	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2279		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2280
2281	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2282		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2283
2284	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2285		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2286
2287	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2288		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2289
2290	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2291		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2292
2293	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2294		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2295
2296	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2297		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2298
2299	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2300		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2301
2302	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2303		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2304
2305	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2306		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2307
2308	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2309		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2310
2311	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2312		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2313
2314	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2315		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2316
2317	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2318		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2319
2320	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2321		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2322
2323	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2324		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2325
2326	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2327		D_BDW_PLUS, 0, 8, NULL},
2328
2329	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2330		NULL},
2331
2332	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2333		D_BDW_PLUS, 0, 8, NULL},
2334
2335	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2336		D_BDW_PLUS, 0, 8, NULL},
2337
2338	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2339		8, NULL},
2340
2341	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2342		R_RCS, D_BDW_PLUS, 0, 8, NULL},
2343
2344	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2345		8, NULL},
2346
2347	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2348		NULL},
2349
2350	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2351		NULL},
2352
2353	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2354		NULL},
2355
2356	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2357		D_BDW_PLUS, 0, 8, NULL},
2358
2359	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2360		R_RCS, D_ALL, 0, 8, NULL},
2361
2362	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2363		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2364
2365	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2366		R_RCS, D_ALL, 0, 1, NULL},
2367
2368	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2369
2370	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2371		R_RCS, D_ALL, 0, 8, NULL},
2372
2373	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2374		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2375
2376	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2377
2378	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2379
2380	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2381
2382	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2383		D_BDW_PLUS, 0, 8, NULL},
2384
2385	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2386		D_BDW_PLUS, 0, 8, NULL},
2387
2388	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2389		D_ALL, 0, 8, NULL},
2390
2391	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2392		D_BDW_PLUS, 0, 8, NULL},
2393
2394	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2395		D_BDW_PLUS, 0, 8, NULL},
2396
2397	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2398
2399	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2400
2401	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2402
2403	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2404		D_ALL, 0, 8, NULL},
2405
2406	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2407
2408	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2409
2410	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2411		R_RCS, D_ALL, 0, 8, NULL},
2412
2413	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2414		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2415
2416	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2417		0, 8, NULL},
2418
2419	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2420		D_ALL, ADDR_FIX_1(2), 8, NULL},
2421
2422	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2423		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2424
2425	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2426		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2427
2428	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2429		D_ALL, 0, 8, NULL},
2430
2431	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2432		D_ALL, 0, 8, NULL},
2433
2434	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2435		D_ALL, 0, 8, NULL},
2436
2437	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2438		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2439
2440	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2441		D_BDW_PLUS, 0, 8, NULL},
2442
2443	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2444		D_ALL, ADDR_FIX_1(2), 8, NULL},
2445
2446	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2447		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2448
2449	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2450		R_RCS, D_ALL, 0, 8, NULL},
2451
2452	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2453		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2454
2455	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2456		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2457
2458	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2459		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2460
2461	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2462		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2463
2464	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2465		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2466
2467	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2468		R_RCS, D_ALL, 0, 8, NULL},
2469
2470	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2471		D_ALL, 0, 9, NULL},
2472
2473	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2474		ADDR_FIX_2(2, 4), 8, NULL},
2475
2476	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2477		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2478		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2479
2480	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2481		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2482
2483	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2484		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2485		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2486
2487	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2488		D_BDW_PLUS, 0, 8, NULL},
2489
2490	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2491		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2492
2493	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2494
2495	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2496		1, NULL},
2497
2498	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2499		ADDR_FIX_1(1), 8, NULL},
2500
2501	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2502
2503	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2504		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2505
2506	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2507		ADDR_FIX_1(1), 8, NULL},
2508
2509	{"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
2510		F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
2511
2512	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2513
2514	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2515
2516	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2517		0, 8, NULL},
2518
2519	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2520		D_SKL_PLUS, 0, 8, NULL},
2521
2522	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2523		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2524
2525	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2526		0, 16, NULL},
2527
2528	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2529		0, 16, NULL},
2530
2531	{"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
2532		0, 16, NULL},
2533
2534	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2535
2536	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2537		0, 16, NULL},
2538
2539	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2540		0, 16, NULL},
2541
2542	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2543		0, 16, NULL},
2544
2545	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2546		0, 8, NULL},
2547
2548	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2549		NULL},
2550
2551	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2552		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2553
2554	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2555		R_VCS, D_ALL, 0, 12, NULL},
2556
2557	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2558		R_VCS, D_ALL, 0, 12, NULL},
2559
2560	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2561		R_VCS, D_BDW_PLUS, 0, 12, NULL},
2562
2563	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2564		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2565
2566	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2567		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2568
2569	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2570
2571	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2572		R_VCS, D_ALL, 0, 12, NULL},
2573
2574	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2575		R_VCS, D_ALL, 0, 12, NULL},
2576
2577	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2578		R_VCS, D_ALL, 0, 12, NULL},
2579
2580	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2581		R_VCS, D_ALL, 0, 12, NULL},
2582
2583	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2584		R_VCS, D_ALL, 0, 12, NULL},
2585
2586	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2587		R_VCS, D_ALL, 0, 12, NULL},
2588
2589	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2590		R_VCS, D_ALL, 0, 6, NULL},
2591
2592	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2593		R_VCS, D_ALL, 0, 12, NULL},
2594
2595	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2596		R_VCS, D_ALL, 0, 12, NULL},
2597
2598	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2599		R_VCS, D_ALL, 0, 12, NULL},
2600
2601	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2602		R_VCS, D_ALL, 0, 12, NULL},
2603
2604	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2605		R_VCS, D_ALL, 0, 12, NULL},
2606
2607	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2608		R_VCS, D_ALL, 0, 12, NULL},
2609
2610	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2611		R_VCS, D_ALL, 0, 12, NULL},
2612	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2613		R_VCS, D_ALL, 0, 12, NULL},
2614
2615	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2616		R_VCS, D_ALL, 0, 12, NULL},
2617
2618	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2619		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2620
2621	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2622		R_VCS, D_ALL, 0, 12, NULL},
2623
2624	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2625		R_VCS, D_ALL, 0, 12, NULL},
2626
2627	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2628		R_VCS, D_ALL, 0, 12, NULL},
2629
2630	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2631		R_VCS, D_ALL, 0, 12, NULL},
2632
2633	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2634		R_VCS, D_ALL, 0, 12, NULL},
2635
2636	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2637		R_VCS, D_ALL, 0, 12, NULL},
2638
2639	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2640		R_VCS, D_ALL, 0, 12, NULL},
2641
2642	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2643		R_VCS, D_ALL, 0, 12, NULL},
2644
2645	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2646		R_VCS, D_ALL, 0, 12, NULL},
2647
2648	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2649		R_VCS, D_ALL, 0, 12, NULL},
2650
2651	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2652		R_VCS, D_ALL, 0, 12, NULL},
2653
2654	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2655		0, 16, NULL},
2656
2657	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2658
2659	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2660
2661	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2662		R_VCS, D_ALL, 0, 12, NULL},
2663
2664	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2665		R_VCS, D_ALL, 0, 12, NULL},
2666
2667	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2668		R_VCS, D_ALL, 0, 12, NULL},
2669
2670	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2671
2672	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2673		0, 12, NULL},
2674
2675	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2676		0, 12, NULL},
2677};
2678
2679static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2680{
2681	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2682}
2683
2684/* call the cmd handler, and advance ip */
2685static int cmd_parser_exec(struct parser_exec_state *s)
2686{
2687	struct intel_vgpu *vgpu = s->vgpu;
2688	const struct cmd_info *info;
2689	u32 cmd;
2690	int ret = 0;
2691
2692	cmd = cmd_val(s, 0);
2693
2694	/* fastpath for MI_NOOP */
2695	if (cmd == MI_NOOP)
2696		info = &cmd_info[mi_noop_index];
2697	else
2698		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
2699
2700	if (info == NULL) {
2701		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
2702			     cmd, get_opcode(cmd, s->engine),
2703			     repr_addr_type(s->buf_addr_type),
2704			     s->engine->name, s->workload);
2705		return -EBADRQC;
2706	}
2707
2708	s->info = info;
2709
2710	trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
2711			  cmd_length(s), s->buf_type, s->buf_addr_type,
2712			  s->workload, info->name);
2713
2714	if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
2715		ret = gvt_check_valid_cmd_length(cmd_length(s),
2716						 info->valid_len);
2717		if (ret)
2718			return ret;
2719	}
2720
2721	if (info->handler) {
2722		ret = info->handler(s);
2723		if (ret < 0) {
2724			gvt_vgpu_err("%s handler error\n", info->name);
2725			return ret;
2726		}
2727	}
2728
2729	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2730		ret = cmd_advance_default(s);
2731		if (ret) {
2732			gvt_vgpu_err("%s IP advance error\n", info->name);
2733			return ret;
2734		}
2735	}
2736	return 0;
2737}
2738
2739static inline bool gma_out_of_range(unsigned long gma,
2740		unsigned long gma_head, unsigned int gma_tail)
2741{
2742	if (gma_tail >= gma_head)
2743		return (gma < gma_head) || (gma > gma_tail);
2744	else
2745		return (gma > gma_tail) && (gma < gma_head);
2746}
2747
2748/* Keep the consistent return type, e.g EBADRQC for unknown
2749 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2750 * works as the input of VM healthy status.
2751 */
2752static int command_scan(struct parser_exec_state *s,
2753		unsigned long rb_head, unsigned long rb_tail,
2754		unsigned long rb_start, unsigned long rb_len)
2755{
2756
2757	unsigned long gma_head, gma_tail, gma_bottom;
2758	int ret = 0;
2759	struct intel_vgpu *vgpu = s->vgpu;
2760
2761	gma_head = rb_start + rb_head;
2762	gma_tail = rb_start + rb_tail;
2763	gma_bottom = rb_start +  rb_len;
2764
2765	while (s->ip_gma != gma_tail) {
2766		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2767			if (!(s->ip_gma >= rb_start) ||
2768				!(s->ip_gma < gma_bottom)) {
2769				gvt_vgpu_err("ip_gma %lx out of ring scope."
2770					"(base:0x%lx, bottom: 0x%lx)\n",
2771					s->ip_gma, rb_start,
2772					gma_bottom);
2773				parser_exec_state_dump(s);
2774				return -EFAULT;
2775			}
2776			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2777				gvt_vgpu_err("ip_gma %lx out of range."
2778					"base 0x%lx head 0x%lx tail 0x%lx\n",
2779					s->ip_gma, rb_start,
2780					rb_head, rb_tail);
2781				parser_exec_state_dump(s);
2782				break;
2783			}
2784		}
2785		ret = cmd_parser_exec(s);
2786		if (ret) {
2787			gvt_vgpu_err("cmd parser error\n");
2788			parser_exec_state_dump(s);
2789			break;
2790		}
2791	}
2792
2793	return ret;
2794}
2795
2796static int scan_workload(struct intel_vgpu_workload *workload)
2797{
2798	unsigned long gma_head, gma_tail, gma_bottom;
2799	struct parser_exec_state s;
2800	int ret = 0;
2801
2802	/* ring base is page aligned */
2803	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2804		return -EINVAL;
2805
2806	gma_head = workload->rb_start + workload->rb_head;
2807	gma_tail = workload->rb_start + workload->rb_tail;
2808	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2809
2810	s.buf_type = RING_BUFFER_INSTRUCTION;
2811	s.buf_addr_type = GTT_BUFFER;
2812	s.vgpu = workload->vgpu;
2813	s.engine = workload->engine;
2814	s.ring_start = workload->rb_start;
2815	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2816	s.ring_head = gma_head;
2817	s.ring_tail = gma_tail;
2818	s.rb_va = workload->shadow_ring_buffer_va;
2819	s.workload = workload;
2820	s.is_ctx_wa = false;
2821
2822	if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
2823		return 0;
2824
2825	ret = ip_gma_set(&s, gma_head);
2826	if (ret)
2827		goto out;
2828
2829	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2830		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2831
2832out:
2833	return ret;
2834}
2835
2836static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2837{
2838
2839	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2840	struct parser_exec_state s;
2841	int ret = 0;
2842	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2843				struct intel_vgpu_workload,
2844				wa_ctx);
2845
2846	/* ring base is page aligned */
2847	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2848					I915_GTT_PAGE_SIZE)))
2849		return -EINVAL;
2850
2851	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
2852	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2853			PAGE_SIZE);
2854	gma_head = wa_ctx->indirect_ctx.guest_gma;
2855	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2856	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2857
2858	s.buf_type = RING_BUFFER_INSTRUCTION;
2859	s.buf_addr_type = GTT_BUFFER;
2860	s.vgpu = workload->vgpu;
2861	s.engine = workload->engine;
2862	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2863	s.ring_size = ring_size;
2864	s.ring_head = gma_head;
2865	s.ring_tail = gma_tail;
2866	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2867	s.workload = workload;
2868	s.is_ctx_wa = true;
2869
2870	ret = ip_gma_set(&s, gma_head);
2871	if (ret)
2872		goto out;
2873
2874	ret = command_scan(&s, 0, ring_tail,
2875		wa_ctx->indirect_ctx.guest_gma, ring_size);
2876out:
2877	return ret;
2878}
2879
2880static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2881{
2882	struct intel_vgpu *vgpu = workload->vgpu;
2883	struct intel_vgpu_submission *s = &vgpu->submission;
2884	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2885	void *shadow_ring_buffer_va;
2886	int ret;
2887
2888	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2889
2890	/* calculate workload ring buffer size */
2891	workload->rb_len = (workload->rb_tail + guest_rb_size -
2892			workload->rb_head) % guest_rb_size;
2893
2894	gma_head = workload->rb_start + workload->rb_head;
2895	gma_tail = workload->rb_start + workload->rb_tail;
2896	gma_top = workload->rb_start + guest_rb_size;
2897
2898	if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
2899		void *p;
2900
2901		/* realloc the new ring buffer if needed */
2902		p = krealloc(s->ring_scan_buffer[workload->engine->id],
2903			     workload->rb_len, GFP_KERNEL);
2904		if (!p) {
2905			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2906			return -ENOMEM;
2907		}
2908		s->ring_scan_buffer[workload->engine->id] = p;
2909		s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
2910	}
2911
2912	shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
2913
2914	/* get shadow ring buffer va */
2915	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2916
2917	/* head > tail --> copy head <-> top */
2918	if (gma_head > gma_tail) {
2919		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2920				      gma_head, gma_top, shadow_ring_buffer_va);
2921		if (ret < 0) {
2922			gvt_vgpu_err("fail to copy guest ring buffer\n");
2923			return ret;
2924		}
2925		shadow_ring_buffer_va += ret;
2926		gma_head = workload->rb_start;
2927	}
2928
2929	/* copy head or start <-> tail */
2930	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2931				shadow_ring_buffer_va);
2932	if (ret < 0) {
2933		gvt_vgpu_err("fail to copy guest ring buffer\n");
2934		return ret;
2935	}
2936	return 0;
2937}
2938
2939int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2940{
2941	int ret;
2942	struct intel_vgpu *vgpu = workload->vgpu;
2943
2944	ret = shadow_workload_ring_buffer(workload);
2945	if (ret) {
2946		gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2947		return ret;
2948	}
2949
2950	ret = scan_workload(workload);
2951	if (ret) {
2952		gvt_vgpu_err("scan workload error\n");
2953		return ret;
2954	}
2955	return 0;
2956}
2957
2958static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2959{
2960	int ctx_size = wa_ctx->indirect_ctx.size;
2961	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2962	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2963					struct intel_vgpu_workload,
2964					wa_ctx);
2965	struct intel_vgpu *vgpu = workload->vgpu;
2966	struct drm_i915_gem_object *obj;
2967	int ret = 0;
2968	void *map;
2969
2970	obj = i915_gem_object_create_shmem(workload->engine->i915,
2971					   roundup(ctx_size + CACHELINE_BYTES,
2972						   PAGE_SIZE));
2973	if (IS_ERR(obj))
2974		return PTR_ERR(obj);
2975
2976	/* get the va of the shadow batch buffer */
2977	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2978	if (IS_ERR(map)) {
2979		gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2980		ret = PTR_ERR(map);
2981		goto put_obj;
2982	}
2983
2984	i915_gem_object_lock(obj, NULL);
2985	ret = i915_gem_object_set_to_cpu_domain(obj, false);
2986	i915_gem_object_unlock(obj);
2987	if (ret) {
2988		gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2989		goto unmap_src;
2990	}
2991
2992	ret = copy_gma_to_hva(workload->vgpu,
2993				workload->vgpu->gtt.ggtt_mm,
2994				guest_gma, guest_gma + ctx_size,
2995				map);
2996	if (ret < 0) {
2997		gvt_vgpu_err("fail to copy guest indirect ctx\n");
2998		goto unmap_src;
2999	}
3000
3001	wa_ctx->indirect_ctx.obj = obj;
3002	wa_ctx->indirect_ctx.shadow_va = map;
3003	return 0;
3004
3005unmap_src:
3006	i915_gem_object_unpin_map(obj);
3007put_obj:
3008	i915_gem_object_put(obj);
3009	return ret;
3010}
3011
3012static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3013{
3014	u32 per_ctx_start[CACHELINE_DWORDS] = {0};
3015	unsigned char *bb_start_sva;
3016
3017	if (!wa_ctx->per_ctx.valid)
3018		return 0;
3019
3020	per_ctx_start[0] = 0x18800001;
3021	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
3022
3023	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
3024				wa_ctx->indirect_ctx.size;
3025
3026	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
3027
3028	return 0;
3029}
3030
3031int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3032{
3033	int ret;
3034	struct intel_vgpu_workload *workload = container_of(wa_ctx,
3035					struct intel_vgpu_workload,
3036					wa_ctx);
3037	struct intel_vgpu *vgpu = workload->vgpu;
3038
3039	if (wa_ctx->indirect_ctx.size == 0)
3040		return 0;
3041
3042	ret = shadow_indirect_ctx(wa_ctx);
3043	if (ret) {
3044		gvt_vgpu_err("fail to shadow indirect ctx\n");
3045		return ret;
3046	}
3047
3048	combine_wa_ctx(wa_ctx);
3049
3050	ret = scan_wa_ctx(wa_ctx);
3051	if (ret) {
3052		gvt_vgpu_err("scan wa ctx error\n");
3053		return ret;
3054	}
3055
3056	return 0;
3057}
3058
3059static int init_cmd_table(struct intel_gvt *gvt)
3060{
3061	unsigned int gen_type = intel_gvt_get_device_type(gvt);
3062	int i;
3063
3064	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
3065		struct cmd_entry *e;
3066
3067		if (!(cmd_info[i].devices & gen_type))
3068			continue;
3069
3070		e = kzalloc(sizeof(*e), GFP_KERNEL);
3071		if (!e)
3072			return -ENOMEM;
3073
3074		e->info = &cmd_info[i];
3075		if (cmd_info[i].opcode == OP_MI_NOOP)
3076			mi_noop_index = i;
3077
3078		INIT_HLIST_NODE(&e->hlist);
3079		add_cmd_entry(gvt, e);
3080		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3081			    e->info->name, e->info->opcode, e->info->flag,
3082			    e->info->devices, e->info->rings);
3083	}
3084
3085	return 0;
3086}
3087
3088static void clean_cmd_table(struct intel_gvt *gvt)
3089{
3090	struct hlist_node *tmp;
3091	struct cmd_entry *e;
3092	int i;
3093
3094	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
3095		kfree(e);
3096
3097	hash_init(gvt->cmd_table);
3098}
3099
3100void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
3101{
3102	clean_cmd_table(gvt);
3103}
3104
3105int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
3106{
3107	int ret;
3108
3109	ret = init_cmd_table(gvt);
3110	if (ret) {
3111		intel_gvt_clean_cmd_parser(gvt);
3112		return ret;
3113	}
3114	return 0;
3115}
3116