2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Brad Volkin <bradley.d.volkin@intel.com>
31 * DOC: batch buffer command parser
34 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
35 * require userspace code to submit batches containing commands such as
36 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
37 * generations of the hardware will noop these commands in "unsecure" batches
38 * (which includes all userspace batches submitted via i915) even though the
39 * commands may be safe and represent the intended programming model of the
42 * The software command parser is similar in operation to the command parsing
43 * done in hardware for unsecure batches. However, the software parser allows
44 * some operations that would be noop'd by hardware, if the parser determines
45 * the operation is safe, and submits the batch as "secure" to prevent hardware
49 * At a high level, the hardware (and software) checks attempt to prevent
50 * granting userspace undue privileges. There are three categories of privilege.
52 * First, commands which are explicitly defined as privileged or which should
53 * only be used by the kernel driver. The parser generally rejects such
54 * commands, though it may allow some from the drm master process.
56 * Second, commands which access registers. To support correct/enhanced
57 * userspace functionality, particularly certain OpenGL extensions, the parser
58 * provides a whitelist of registers which userspace may safely access (for both
59 * normal and drm master processes).
61 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
62 * The parser always rejects such commands.
64 * The majority of the problematic commands fall in the MI_* range, with only a
65 * few specific commands on each ring (e.g. PIPE_CONTROL and MI_FLUSH_DW).
68 * Each ring maintains tables of commands and registers which the parser uses in
69 * scanning batch buffers submitted to that ring.
71 * Since the set of commands that the parser must check for is significantly
72 * smaller than the number of commands supported, the parser tables contain only
73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is
76 * implemented via a per-ring length decoding vfunc.
78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands.
80 * To handle this, the parser provides a way to define explicit "skip" entries
81 * in the per-ring command tables.
83 * Other command table entries map fairly directly to high level categories
84 * mentioned above: rejected, master-only, register whitelist. The parser
85 * implements a number of checks, including the privileged memory checks, via a
86 * general bitmasking mechanism.
89 #define STD_MI_OPCODE_MASK 0xFF800000
90 #define STD_3D_OPCODE_MASK 0xFFFF0000
91 #define STD_2D_OPCODE_MASK 0xFFC00000
92 #define STD_MFX_OPCODE_MASK 0xFFFF0000
94 #define CMD(op, opm, f, lm, fl, ...) \
96 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
97 .cmd = { (op), (opm) }, \
102 /* Convenience macros to compress the tables */
103 #define SMI STD_MI_OPCODE_MASK
104 #define S3D STD_3D_OPCODE_MASK
105 #define S2D STD_2D_OPCODE_MASK
106 #define SMFX STD_MFX_OPCODE_MASK
108 #define S CMD_DESC_SKIP
109 #define R CMD_DESC_REJECT
110 #define W CMD_DESC_REGISTER
111 #define B CMD_DESC_BITMASK
112 #define M CMD_DESC_MASTER
114 /* Command Mask Fixed Len Action
115 ---------------------------------------------------------- */
116 static const struct drm_i915_cmd_descriptor common_cmds
[] = {
117 CMD( MI_NOOP
, SMI
, F
, 1, S
),
118 CMD( MI_USER_INTERRUPT
, SMI
, F
, 1, R
),
119 CMD( MI_WAIT_FOR_EVENT
, SMI
, F
, 1, M
),
120 CMD( MI_ARB_CHECK
, SMI
, F
, 1, S
),
121 CMD( MI_REPORT_HEAD
, SMI
, F
, 1, S
),
122 CMD( MI_SUSPEND_FLUSH
, SMI
, F
, 1, S
),
123 CMD( MI_SEMAPHORE_MBOX
, SMI
, !F
, 0xFF, R
),
124 CMD( MI_STORE_DWORD_INDEX
, SMI
, !F
, 0xFF, R
),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI
, !F
, 0xFF, W
,
126 .reg
= { .offset
= 1, .mask
= 0x007FFFFC, .step
= 2 } ),
127 CMD( MI_STORE_REGISTER_MEM
, SMI
, F
, 3, W
| B
,
128 .reg
= { .offset
= 1, .mask
= 0x007FFFFC },
131 .mask
= MI_GLOBAL_GTT
,
134 CMD( MI_LOAD_REGISTER_MEM
, SMI
, F
, 3, W
| B
,
135 .reg
= { .offset
= 1, .mask
= 0x007FFFFC },
138 .mask
= MI_GLOBAL_GTT
,
142 * MI_BATCH_BUFFER_START requires some special handling. It's not
143 * really a 'skip' action but it doesn't seem like it's worth adding
144 * a new action. See i915_parse_cmds().
146 CMD( MI_BATCH_BUFFER_START
, SMI
, !F
, 0xFF, S
),
149 static const struct drm_i915_cmd_descriptor render_cmds
[] = {
150 CMD( MI_FLUSH
, SMI
, F
, 1, S
),
151 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
152 CMD( MI_PREDICATE
, SMI
, F
, 1, S
),
153 CMD( MI_TOPOLOGY_FILTER
, SMI
, F
, 1, S
),
154 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
155 CMD( MI_DISPLAY_FLIP
, SMI
, !F
, 0xFF, R
),
156 CMD( MI_SET_CONTEXT
, SMI
, !F
, 0xFF, R
),
157 CMD( MI_URB_CLEAR
, SMI
, !F
, 0xFF, S
),
158 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0x3F, B
,
161 .mask
= MI_GLOBAL_GTT
,
164 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0xFF, R
),
165 CMD( MI_CLFLUSH
, SMI
, !F
, 0x3FF, B
,
168 .mask
= MI_GLOBAL_GTT
,
171 CMD( MI_REPORT_PERF_COUNT
, SMI
, !F
, 0x3F, B
,
174 .mask
= MI_REPORT_PERF_COUNT_GGTT
,
177 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
180 .mask
= MI_GLOBAL_GTT
,
183 CMD( GFX_OP_3DSTATE_VF_STATISTICS
, S3D
, F
, 1, S
),
184 CMD( PIPELINE_SELECT
, S3D
, F
, 1, S
),
185 CMD( MEDIA_VFE_STATE
, S3D
, !F
, 0xFFFF, B
,
188 .mask
= MEDIA_VFE_STATE_MMIO_ACCESS_MASK
,
191 CMD( GPGPU_OBJECT
, S3D
, !F
, 0xFF, S
),
192 CMD( GPGPU_WALKER
, S3D
, !F
, 0xFF, S
),
193 CMD( GFX_OP_3DSTATE_SO_DECL_LIST
, S3D
, !F
, 0x1FF, S
),
194 CMD( GFX_OP_PIPE_CONTROL(5), S3D
, !F
, 0xFF, B
,
197 .mask
= (PIPE_CONTROL_MMIO_WRITE
| PIPE_CONTROL_NOTIFY
),
202 .mask
= (PIPE_CONTROL_GLOBAL_GTT_IVB
|
203 PIPE_CONTROL_STORE_DATA_INDEX
),
205 .condition_offset
= 1,
206 .condition_mask
= PIPE_CONTROL_POST_SYNC_OP_MASK
,
210 static const struct drm_i915_cmd_descriptor hsw_render_cmds
[] = {
211 CMD( MI_SET_PREDICATE
, SMI
, F
, 1, S
),
212 CMD( MI_RS_CONTROL
, SMI
, F
, 1, S
),
213 CMD( MI_URB_ATOMIC_ALLOC
, SMI
, F
, 1, S
),
214 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
215 CMD( MI_RS_CONTEXT
, SMI
, F
, 1, S
),
216 CMD( MI_LOAD_SCAN_LINES_INCL
, SMI
, !F
, 0x3F, M
),
217 CMD( MI_LOAD_SCAN_LINES_EXCL
, SMI
, !F
, 0x3F, R
),
218 CMD( MI_LOAD_REGISTER_REG
, SMI
, !F
, 0xFF, R
),
219 CMD( MI_RS_STORE_DATA_IMM
, SMI
, !F
, 0xFF, S
),
220 CMD( MI_LOAD_URB_MEM
, SMI
, !F
, 0xFF, S
),
221 CMD( MI_STORE_URB_MEM
, SMI
, !F
, 0xFF, S
),
222 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS
, S3D
, !F
, 0x7FF, S
),
223 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS
, S3D
, !F
, 0x7FF, S
),
225 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS
, S3D
, !F
, 0x1FF, S
),
226 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS
, S3D
, !F
, 0x1FF, S
),
227 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS
, S3D
, !F
, 0x1FF, S
),
228 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS
, S3D
, !F
, 0x1FF, S
),
229 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS
, S3D
, !F
, 0x1FF, S
),
232 static const struct drm_i915_cmd_descriptor video_cmds
[] = {
233 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
234 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
235 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0xFF, B
,
238 .mask
= MI_GLOBAL_GTT
,
241 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
242 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
245 .mask
= MI_FLUSH_DW_NOTIFY
,
250 .mask
= MI_FLUSH_DW_USE_GTT
,
252 .condition_offset
= 0,
253 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
257 .mask
= MI_FLUSH_DW_STORE_INDEX
,
259 .condition_offset
= 0,
260 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
262 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
265 .mask
= MI_GLOBAL_GTT
,
269 * MFX_WAIT doesn't fit the way we handle length for most commands.
270 * It has a length field but it uses a non-standard length bias.
271 * It is always 1 dword though, so just treat it as fixed length.
273 CMD( MFX_WAIT
, SMFX
, F
, 1, S
),
276 static const struct drm_i915_cmd_descriptor vecs_cmds
[] = {
277 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
278 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
279 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0xFF, B
,
282 .mask
= MI_GLOBAL_GTT
,
285 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
286 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
289 .mask
= MI_FLUSH_DW_NOTIFY
,
294 .mask
= MI_FLUSH_DW_USE_GTT
,
296 .condition_offset
= 0,
297 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
301 .mask
= MI_FLUSH_DW_STORE_INDEX
,
303 .condition_offset
= 0,
304 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
306 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
309 .mask
= MI_GLOBAL_GTT
,
314 static const struct drm_i915_cmd_descriptor blt_cmds
[] = {
315 CMD( MI_DISPLAY_FLIP
, SMI
, !F
, 0xFF, R
),
316 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0x3FF, B
,
319 .mask
= MI_GLOBAL_GTT
,
322 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
323 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
326 .mask
= MI_FLUSH_DW_NOTIFY
,
331 .mask
= MI_FLUSH_DW_USE_GTT
,
333 .condition_offset
= 0,
334 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
338 .mask
= MI_FLUSH_DW_STORE_INDEX
,
340 .condition_offset
= 0,
341 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
343 CMD( COLOR_BLT
, S2D
, !F
, 0x3F, S
),
344 CMD( SRC_COPY_BLT
, S2D
, !F
, 0x3F, S
),
347 static const struct drm_i915_cmd_descriptor hsw_blt_cmds
[] = {
348 CMD( MI_LOAD_SCAN_LINES_INCL
, SMI
, !F
, 0x3F, M
),
349 CMD( MI_LOAD_SCAN_LINES_EXCL
, SMI
, !F
, 0x3F, R
),
364 static const struct drm_i915_cmd_table gen7_render_cmds
[] = {
365 { common_cmds
, ARRAY_SIZE(common_cmds
) },
366 { render_cmds
, ARRAY_SIZE(render_cmds
) },
369 static const struct drm_i915_cmd_table hsw_render_ring_cmds
[] = {
370 { common_cmds
, ARRAY_SIZE(common_cmds
) },
371 { render_cmds
, ARRAY_SIZE(render_cmds
) },
372 { hsw_render_cmds
, ARRAY_SIZE(hsw_render_cmds
) },
375 static const struct drm_i915_cmd_table gen7_video_cmds
[] = {
376 { common_cmds
, ARRAY_SIZE(common_cmds
) },
377 { video_cmds
, ARRAY_SIZE(video_cmds
) },
380 static const struct drm_i915_cmd_table hsw_vebox_cmds
[] = {
381 { common_cmds
, ARRAY_SIZE(common_cmds
) },
382 { vecs_cmds
, ARRAY_SIZE(vecs_cmds
) },
385 static const struct drm_i915_cmd_table gen7_blt_cmds
[] = {
386 { common_cmds
, ARRAY_SIZE(common_cmds
) },
387 { blt_cmds
, ARRAY_SIZE(blt_cmds
) },
390 static const struct drm_i915_cmd_table hsw_blt_ring_cmds
[] = {
391 { common_cmds
, ARRAY_SIZE(common_cmds
) },
392 { blt_cmds
, ARRAY_SIZE(blt_cmds
) },
393 { hsw_blt_cmds
, ARRAY_SIZE(hsw_blt_cmds
) },
397 * Register whitelists, sorted by increasing register offset.
401 * An individual whitelist entry granting access to register addr. If
402 * mask is non-zero the argument of immediate register writes will be
403 * AND-ed with mask, and the command will be rejected if the result
404 * doesn't match value.
406 * Registers with non-zero mask are only allowed to be written using
409 struct drm_i915_reg_descriptor
{
415 /* Convenience macro for adding 32-bit registers. */
416 #define REG32(address, ...) \
417 { .addr = address, __VA_ARGS__ }
420 * Convenience macro for adding 64-bit registers.
422 * Some registers that userspace accesses are 64 bits. The register
423 * access commands only allow 32-bit accesses. Hence, we have to include
424 * entries for both halves of the 64-bit registers.
426 #define REG64(addr) \
427 REG32(addr), REG32(addr + sizeof(u32))
429 static const struct drm_i915_reg_descriptor gen7_render_regs
[] = {
430 REG64(GPGPU_THREADS_DISPATCHED
),
431 REG64(HS_INVOCATION_COUNT
),
432 REG64(DS_INVOCATION_COUNT
),
433 REG64(IA_VERTICES_COUNT
),
434 REG64(IA_PRIMITIVES_COUNT
),
435 REG64(VS_INVOCATION_COUNT
),
436 REG64(GS_INVOCATION_COUNT
),
437 REG64(GS_PRIMITIVES_COUNT
),
438 REG64(CL_INVOCATION_COUNT
),
439 REG64(CL_PRIMITIVES_COUNT
),
440 REG64(PS_INVOCATION_COUNT
),
441 REG64(PS_DEPTH_COUNT
),
442 REG32(OACONTROL
), /* Only allowed for LRI and SRM. See below. */
443 REG64(MI_PREDICATE_SRC0
),
444 REG64(MI_PREDICATE_SRC1
),
445 REG32(GEN7_3DPRIM_END_OFFSET
),
446 REG32(GEN7_3DPRIM_START_VERTEX
),
447 REG32(GEN7_3DPRIM_VERTEX_COUNT
),
448 REG32(GEN7_3DPRIM_INSTANCE_COUNT
),
449 REG32(GEN7_3DPRIM_START_INSTANCE
),
450 REG32(GEN7_3DPRIM_BASE_VERTEX
),
451 REG32(GEN7_GPGPU_DISPATCHDIMX
),
452 REG32(GEN7_GPGPU_DISPATCHDIMY
),
453 REG32(GEN7_GPGPU_DISPATCHDIMZ
),
454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
455 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
456 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
457 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(3)),
458 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(0)),
459 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
460 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
461 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
462 REG32(GEN7_SO_WRITE_OFFSET(0)),
463 REG32(GEN7_SO_WRITE_OFFSET(1)),
464 REG32(GEN7_SO_WRITE_OFFSET(2)),
465 REG32(GEN7_SO_WRITE_OFFSET(3)),
466 REG32(GEN7_L3SQCREG1
),
467 REG32(GEN7_L3CNTLREG2
),
468 REG32(GEN7_L3CNTLREG3
),
470 .mask
= ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
,
472 REG32(HSW_ROW_CHICKEN3
,
473 .mask
= ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
<< 16 |
474 HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
),
478 static const struct drm_i915_reg_descriptor gen7_blt_regs
[] = {
482 static const struct drm_i915_reg_descriptor ivb_master_regs
[] = {
485 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A
)),
486 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B
)),
487 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C
)),
490 static const struct drm_i915_reg_descriptor hsw_master_regs
[] = {
498 static u32
gen7_render_get_cmd_length_mask(u32 cmd_header
)
500 u32 client
= (cmd_header
& INSTR_CLIENT_MASK
) >> INSTR_CLIENT_SHIFT
;
502 (cmd_header
& INSTR_SUBCLIENT_MASK
) >> INSTR_SUBCLIENT_SHIFT
;
504 if (client
== INSTR_MI_CLIENT
)
506 else if (client
== INSTR_RC_CLIENT
) {
507 if (subclient
== INSTR_MEDIA_SUBCLIENT
)
513 DRM_DEBUG_DRIVER("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header
);
517 static u32
gen7_bsd_get_cmd_length_mask(u32 cmd_header
)
519 u32 client
= (cmd_header
& INSTR_CLIENT_MASK
) >> INSTR_CLIENT_SHIFT
;
521 (cmd_header
& INSTR_SUBCLIENT_MASK
) >> INSTR_SUBCLIENT_SHIFT
;
522 u32 op
= (cmd_header
& INSTR_26_TO_24_MASK
) >> INSTR_26_TO_24_SHIFT
;
524 if (client
== INSTR_MI_CLIENT
)
526 else if (client
== INSTR_RC_CLIENT
) {
527 if (subclient
== INSTR_MEDIA_SUBCLIENT
) {
536 DRM_DEBUG_DRIVER("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header
);
540 static u32
gen7_blt_get_cmd_length_mask(u32 cmd_header
)
542 u32 client
= (cmd_header
& INSTR_CLIENT_MASK
) >> INSTR_CLIENT_SHIFT
;
544 if (client
== INSTR_MI_CLIENT
)
546 else if (client
== INSTR_BC_CLIENT
)
549 DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header
);
553 static bool validate_cmds_sorted(struct intel_engine_cs
*ring
,
554 const struct drm_i915_cmd_table
*cmd_tables
,
560 if (!cmd_tables
|| cmd_table_count
== 0)
563 for (i
= 0; i
< cmd_table_count
; i
++) {
564 const struct drm_i915_cmd_table
*table
= &cmd_tables
[i
];
568 for (j
= 0; j
< table
->count
; j
++) {
569 const struct drm_i915_cmd_descriptor
*desc
=
571 u32 curr
= desc
->cmd
.value
& desc
->cmd
.mask
;
573 if (curr
< previous
) {
574 DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
575 ring
->id
, i
, j
, curr
, previous
);
586 static bool check_sorted(int ring_id
,
587 const struct drm_i915_reg_descriptor
*reg_table
,
594 for (i
= 0; i
< reg_count
; i
++) {
595 u32 curr
= reg_table
[i
].addr
;
597 if (curr
< previous
) {
598 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
599 ring_id
, i
, curr
, previous
);
609 static bool validate_regs_sorted(struct intel_engine_cs
*ring
)
611 return check_sorted(ring
->id
, ring
->reg_table
, ring
->reg_count
) &&
612 check_sorted(ring
->id
, ring
->master_reg_table
,
613 ring
->master_reg_count
);
617 const struct drm_i915_cmd_descriptor
*desc
;
618 struct hlist_node node
;
622 * Different command ranges have different numbers of bits for the opcode. For
623 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
624 * problem is that, for example, MI commands use bits 22:16 for other fields
625 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
626 * we mask a command from a batch it could hash to the wrong bucket due to
627 * non-opcode bits being set. But if we don't include those bits, some 3D
628 * commands may hash to the same bucket due to not including opcode bits that
629 * make the command unique. For now, we will risk hashing to the same bucket.
631 * If we attempt to generate a perfect hash, we should be able to look at bits
632 * 31:29 of a command from a batch buffer and use the full mask for that
633 * client. The existing INSTR_CLIENT_MASK/SHIFT defines can be used for this.
635 #define CMD_HASH_MASK STD_MI_OPCODE_MASK
637 static int init_hash_table(struct intel_engine_cs
*ring
,
638 const struct drm_i915_cmd_table
*cmd_tables
,
643 hash_init(ring
->cmd_hash
);
645 for (i
= 0; i
< cmd_table_count
; i
++) {
646 const struct drm_i915_cmd_table
*table
= &cmd_tables
[i
];
648 for (j
= 0; j
< table
->count
; j
++) {
649 const struct drm_i915_cmd_descriptor
*desc
=
651 struct cmd_node
*desc_node
=
652 kmalloc(sizeof(*desc_node
), GFP_KERNEL
);
657 desc_node
->desc
= desc
;
658 hash_add(ring
->cmd_hash
, &desc_node
->node
,
659 desc
->cmd
.value
& CMD_HASH_MASK
);
666 static void fini_hash_table(struct intel_engine_cs
*ring
)
668 struct hlist_node
*tmp
;
669 struct cmd_node
*desc_node
;
672 hash_for_each_safe(ring
->cmd_hash
, i
, tmp
, desc_node
, node
) {
673 hash_del(&desc_node
->node
);
679 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
680 * @ring: the ringbuffer to initialize
682 * Optionally initializes fields related to batch buffer command parsing in the
683 * struct intel_engine_cs based on whether the platform requires software
686 * Return: non-zero if initialization fails
688 int i915_cmd_parser_init_ring(struct intel_engine_cs
*ring
)
690 const struct drm_i915_cmd_table
*cmd_tables
;
694 if (!IS_GEN7(ring
->dev
))
699 if (IS_HASWELL(ring
->dev
)) {
700 cmd_tables
= hsw_render_ring_cmds
;
702 ARRAY_SIZE(hsw_render_ring_cmds
);
704 cmd_tables
= gen7_render_cmds
;
705 cmd_table_count
= ARRAY_SIZE(gen7_render_cmds
);
708 ring
->reg_table
= gen7_render_regs
;
709 ring
->reg_count
= ARRAY_SIZE(gen7_render_regs
);
711 if (IS_HASWELL(ring
->dev
)) {
712 ring
->master_reg_table
= hsw_master_regs
;
713 ring
->master_reg_count
= ARRAY_SIZE(hsw_master_regs
);
715 ring
->master_reg_table
= ivb_master_regs
;
716 ring
->master_reg_count
= ARRAY_SIZE(ivb_master_regs
);
719 ring
->get_cmd_length_mask
= gen7_render_get_cmd_length_mask
;
722 cmd_tables
= gen7_video_cmds
;
723 cmd_table_count
= ARRAY_SIZE(gen7_video_cmds
);
724 ring
->get_cmd_length_mask
= gen7_bsd_get_cmd_length_mask
;
727 if (IS_HASWELL(ring
->dev
)) {
728 cmd_tables
= hsw_blt_ring_cmds
;
729 cmd_table_count
= ARRAY_SIZE(hsw_blt_ring_cmds
);
731 cmd_tables
= gen7_blt_cmds
;
732 cmd_table_count
= ARRAY_SIZE(gen7_blt_cmds
);
735 ring
->reg_table
= gen7_blt_regs
;
736 ring
->reg_count
= ARRAY_SIZE(gen7_blt_regs
);
738 if (IS_HASWELL(ring
->dev
)) {
739 ring
->master_reg_table
= hsw_master_regs
;
740 ring
->master_reg_count
= ARRAY_SIZE(hsw_master_regs
);
742 ring
->master_reg_table
= ivb_master_regs
;
743 ring
->master_reg_count
= ARRAY_SIZE(ivb_master_regs
);
746 ring
->get_cmd_length_mask
= gen7_blt_get_cmd_length_mask
;
749 cmd_tables
= hsw_vebox_cmds
;
750 cmd_table_count
= ARRAY_SIZE(hsw_vebox_cmds
);
751 /* VECS can use the same length_mask function as VCS */
752 ring
->get_cmd_length_mask
= gen7_bsd_get_cmd_length_mask
;
755 DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
760 BUG_ON(!validate_cmds_sorted(ring
, cmd_tables
, cmd_table_count
));
761 BUG_ON(!validate_regs_sorted(ring
));
763 WARN_ON(!hash_empty(ring
->cmd_hash
));
765 ret
= init_hash_table(ring
, cmd_tables
, cmd_table_count
);
767 DRM_ERROR("CMD: cmd_parser_init failed!\n");
768 fini_hash_table(ring
);
772 ring
->needs_cmd_parser
= true;
778 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
779 * @ring: the ringbuffer to clean up
781 * Releases any resources related to command parsing that may have been
782 * initialized for the specified ring.
784 void i915_cmd_parser_fini_ring(struct intel_engine_cs
*ring
)
786 if (!ring
->needs_cmd_parser
)
789 fini_hash_table(ring
);
792 static const struct drm_i915_cmd_descriptor
*
793 find_cmd_in_table(struct intel_engine_cs
*ring
,
796 struct cmd_node
*desc_node
;
798 hash_for_each_possible(ring
->cmd_hash
, desc_node
, node
,
799 cmd_header
& CMD_HASH_MASK
) {
800 const struct drm_i915_cmd_descriptor
*desc
= desc_node
->desc
;
801 u32 masked_cmd
= desc
->cmd
.mask
& cmd_header
;
802 u32 masked_value
= desc
->cmd
.value
& desc
->cmd
.mask
;
804 if (masked_cmd
== masked_value
)
812 * Returns a pointer to a descriptor for the command specified by cmd_header.
814 * The caller must supply space for a default descriptor via the default_desc
815 * parameter. If no descriptor for the specified command exists in the ring's
816 * command parser tables, this function fills in default_desc based on the
817 * ring's default length encoding and returns default_desc.
819 static const struct drm_i915_cmd_descriptor
*
820 find_cmd(struct intel_engine_cs
*ring
,
822 struct drm_i915_cmd_descriptor
*default_desc
)
824 const struct drm_i915_cmd_descriptor
*desc
;
827 desc
= find_cmd_in_table(ring
, cmd_header
);
831 mask
= ring
->get_cmd_length_mask(cmd_header
);
835 BUG_ON(!default_desc
);
836 default_desc
->flags
= CMD_DESC_SKIP
;
837 default_desc
->length
.mask
= mask
;
842 static const struct drm_i915_reg_descriptor
*
843 find_reg(const struct drm_i915_reg_descriptor
*table
,
849 for (i
= 0; i
< count
; i
++) {
850 if (table
[i
].addr
== addr
)
858 static u32
*vmap_batch(struct drm_i915_gem_object
*obj
,
859 unsigned start
, unsigned len
)
863 struct sg_page_iter sg_iter
;
864 int first_page
= start
>> PAGE_SHIFT
;
865 int last_page
= (len
+ start
+ 4095) >> PAGE_SHIFT
;
866 int npages
= last_page
- first_page
;
869 pages
= drm_malloc_ab(npages
, sizeof(*pages
));
871 DRM_DEBUG_DRIVER("Failed to get space for pages\n");
876 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, first_page
) {
877 pages
[i
++] = sg_page_iter_page(&sg_iter
);
882 addr
= vmap(pages
, i
, 0, PAGE_KERNEL
);
884 DRM_DEBUG_DRIVER("Failed to vmap pages\n");
890 drm_free_large(pages
);
894 /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
895 static u32
*copy_batch(struct drm_i915_gem_object
*dest_obj
,
896 struct drm_i915_gem_object
*src_obj
,
897 u32 batch_start_offset
,
900 int needs_clflush
= 0;
901 void *src_base
, *src
;
905 if (batch_len
> dest_obj
->base
.size
||
906 batch_len
+ batch_start_offset
> src_obj
->base
.size
)
907 return ERR_PTR(-E2BIG
);
909 if (WARN_ON(dest_obj
->pages_pin_count
== 0))
910 return ERR_PTR(-ENODEV
);
912 ret
= i915_gem_obj_prepare_shmem_read(src_obj
, &needs_clflush
);
914 DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
918 src_base
= vmap_batch(src_obj
, batch_start_offset
, batch_len
);
920 DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
925 ret
= i915_gem_object_set_to_cpu_domain(dest_obj
, true);
927 DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
931 dst
= vmap_batch(dest_obj
, 0, batch_len
);
933 DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
938 src
= src_base
+ offset_in_page(batch_start_offset
);
940 drm_clflush_virt_range(src
, batch_len
);
942 memcpy(dst
, src
, batch_len
);
947 i915_gem_object_unpin_pages(src_obj
);
949 return ret
? ERR_PTR(ret
) : dst
;
953 * i915_needs_cmd_parser() - should a given ring use software command parsing?
954 * @ring: the ring in question
956 * Only certain platforms require software batch buffer command parsing, and
957 * only when enabled via module parameter.
959 * Return: true if the ring requires software command parsing
961 bool i915_needs_cmd_parser(struct intel_engine_cs
*ring
)
963 if (!ring
->needs_cmd_parser
)
966 if (!USES_PPGTT(ring
->dev
))
969 return (i915
.enable_cmd_parser
== 1);
972 static bool check_cmd(const struct intel_engine_cs
*ring
,
973 const struct drm_i915_cmd_descriptor
*desc
,
974 const u32
*cmd
, u32 length
,
975 const bool is_master
,
978 if (desc
->flags
& CMD_DESC_REJECT
) {
979 DRM_DEBUG_DRIVER("CMD: Rejected command: 0x%08X\n", *cmd
);
983 if ((desc
->flags
& CMD_DESC_MASTER
) && !is_master
) {
984 DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
989 if (desc
->flags
& CMD_DESC_REGISTER
) {
991 * Get the distance between individual register offset
992 * fields if the command can perform more than one
995 const u32 step
= desc
->reg
.step
? desc
->reg
.step
: length
;
998 for (offset
= desc
->reg
.offset
; offset
< length
;
1000 const u32 reg_addr
= cmd
[offset
] & desc
->reg
.mask
;
1001 const struct drm_i915_reg_descriptor
*reg
=
1002 find_reg(ring
->reg_table
, ring
->reg_count
,
1005 if (!reg
&& is_master
)
1006 reg
= find_reg(ring
->master_reg_table
,
1007 ring
->master_reg_count
,
1011 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
1012 reg_addr
, *cmd
, ring
->id
);
1017 * OACONTROL requires some special handling for
1018 * writes. We want to make sure that any batch which
1019 * enables OA also disables it before the end of the
1020 * batch. The goal is to prevent one process from
1021 * snooping on the perf data from another process. To do
1022 * that, we need to check the value that will be written
1023 * to the register. Hence, limit OACONTROL writes to
1024 * only MI_LOAD_REGISTER_IMM commands.
1026 if (reg_addr
== OACONTROL
) {
1027 if (desc
->cmd
.value
== MI_LOAD_REGISTER_MEM
) {
1028 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1032 if (desc
->cmd
.value
== MI_LOAD_REGISTER_IMM(1))
1033 *oacontrol_set
= (cmd
[offset
+ 1] != 0);
1037 * Check the value written to the register against the
1038 * allowed mask/value pair given in the whitelist entry.
1041 if (desc
->cmd
.value
== MI_LOAD_REGISTER_MEM
) {
1042 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
1047 if (desc
->cmd
.value
== MI_LOAD_REGISTER_IMM(1) &&
1048 (offset
+ 2 > length
||
1049 (cmd
[offset
+ 1] & reg
->mask
) != reg
->value
)) {
1050 DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
1058 if (desc
->flags
& CMD_DESC_BITMASK
) {
1061 for (i
= 0; i
< MAX_CMD_DESC_BITMASKS
; i
++) {
1064 if (desc
->bits
[i
].mask
== 0)
1067 if (desc
->bits
[i
].condition_mask
!= 0) {
1069 desc
->bits
[i
].condition_offset
;
1070 u32 condition
= cmd
[offset
] &
1071 desc
->bits
[i
].condition_mask
;
1077 dword
= cmd
[desc
->bits
[i
].offset
] &
1080 if (dword
!= desc
->bits
[i
].expected
) {
1081 DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (ring=%d)\n",
1084 desc
->bits
[i
].expected
,
1094 #define LENGTH_BIAS 2
1097 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1098 * @ring: the ring on which the batch is to execute
1099 * @batch_obj: the batch buffer in question
1100 * @shadow_batch_obj: copy of the batch buffer in question
1101 * @batch_start_offset: byte offset in the batch at which execution starts
1102 * @batch_len: length of the commands in batch_obj
1103 * @is_master: is the submitting process the drm master?
1105 * Parses the specified batch buffer looking for privilege violations as
1106 * described in the overview.
1108 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1109 * if the batch appears legal but should use hardware parsing
1111 int i915_parse_cmds(struct intel_engine_cs
*ring
,
1112 struct drm_i915_gem_object
*batch_obj
,
1113 struct drm_i915_gem_object
*shadow_batch_obj
,
1114 u32 batch_start_offset
,
1118 u32
*cmd
, *batch_base
, *batch_end
;
1119 struct drm_i915_cmd_descriptor default_desc
= { 0 };
1120 bool oacontrol_set
= false; /* OACONTROL tracking. See check_cmd() */
1123 batch_base
= copy_batch(shadow_batch_obj
, batch_obj
,
1124 batch_start_offset
, batch_len
);
1125 if (IS_ERR(batch_base
)) {
1126 DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
1127 return PTR_ERR(batch_base
);
1131 * We use the batch length as size because the shadow object is as
1132 * large or larger and copy_batch() will write MI_NOPs to the extra
1133 * space. Parsing should be faster in some cases this way.
1135 batch_end
= batch_base
+ (batch_len
/ sizeof(*batch_end
));
1138 while (cmd
< batch_end
) {
1139 const struct drm_i915_cmd_descriptor
*desc
;
1142 if (*cmd
== MI_BATCH_BUFFER_END
)
1145 desc
= find_cmd(ring
, *cmd
, &default_desc
);
1147 DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
1154 * If the batch buffer contains a chained batch, return an
1155 * error that tells the caller to abort and dispatch the
1156 * workload as a non-secure batch.
1158 if (desc
->cmd
.value
== MI_BATCH_BUFFER_START
) {
1163 if (desc
->flags
& CMD_DESC_FIXED
)
1164 length
= desc
->length
.fixed
;
1166 length
= ((*cmd
& desc
->length
.mask
) + LENGTH_BIAS
);
1168 if ((batch_end
- cmd
) < length
) {
1169 DRM_DEBUG_DRIVER("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1177 if (!check_cmd(ring
, desc
, cmd
, length
, is_master
,
1186 if (oacontrol_set
) {
1187 DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
1191 if (cmd
>= batch_end
) {
1192 DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1202 * i915_cmd_parser_get_version() - get the cmd parser version number
1204 * The cmd parser maintains a simple increasing integer version number suitable
1205 * for passing to userspace clients to determine what operations are permitted.
1207 * Return: the current version number of the cmd parser
1209 int i915_cmd_parser_get_version(void)
1212 * Command parser version history
1214 * 1. Initial version. Checks batches and reports violations, but leaves
1215 * hardware parsing enabled (so does not allow new use cases).
1216 * 2. Allow access to the MI_PREDICATE_SRC0 and
1217 * MI_PREDICATE_SRC1 registers.
1218 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1219 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1220 * 5. GPGPU dispatch compute indirect registers.