2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Brad Volkin <bradley.d.volkin@intel.com>
28 #include "gt/intel_engine.h"
31 #include "i915_memcpy.h"
34 * DOC: batch buffer command parser
37 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
38 * require userspace code to submit batches containing commands such as
39 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
40 * generations of the hardware will noop these commands in "unsecure" batches
41 * (which includes all userspace batches submitted via i915) even though the
42 * commands may be safe and represent the intended programming model of the
45 * The software command parser is similar in operation to the command parsing
46 * done in hardware for unsecure batches. However, the software parser allows
47 * some operations that would be noop'd by hardware, if the parser determines
48 * the operation is safe, and submits the batch as "secure" to prevent hardware
52 * At a high level, the hardware (and software) checks attempt to prevent
53 * granting userspace undue privileges. There are three categories of privilege.
55 * First, commands which are explicitly defined as privileged or which should
56 * only be used by the kernel driver. The parser rejects such commands
58 * Second, commands which access registers. To support correct/enhanced
59 * userspace functionality, particularly certain OpenGL extensions, the parser
60 * provides a whitelist of registers which userspace may safely access
62 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
63 * The parser always rejects such commands.
65 * The majority of the problematic commands fall in the MI_* range, with only a
66 * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
69 * Each engine maintains tables of commands and registers which the parser
70 * uses in scanning batch buffers submitted to that engine.
72 * Since the set of commands that the parser must check for is significantly
73 * smaller than the number of commands supported, the parser tables contain only
74 * those commands required by the parser. This generally works because command
75 * opcode ranges have standard command length encodings. So for commands that
76 * the parser does not need to check, it can easily skip them. This is
77 * implemented via a per-engine length decoding vfunc.
79 * Unfortunately, there are a number of commands that do not follow the standard
80 * length encoding for their opcode range, primarily amongst the MI_* commands.
81 * To handle this, the parser provides a way to define explicit "skip" entries
82 * in the per-engine command tables.
84 * Other command table entries map fairly directly to high level categories
85 * mentioned above: rejected, register whitelist. The parser implements a number
86 * of checks, including the privileged memory checks, via a general bitmasking
91 * A command that requires special handling by the command parser.
93 struct drm_i915_cmd_descriptor
{
95 * Flags describing how the command parser processes the command.
97 * CMD_DESC_FIXED: The command has a fixed length if this is set,
98 * a length mask if not set
99 * CMD_DESC_SKIP: The command is allowed but does not follow the
100 * standard length encoding for the opcode range in
102 * CMD_DESC_REJECT: The command is never allowed
103 * CMD_DESC_REGISTER: The command should be checked against the
104 * register whitelist for the appropriate ring
107 #define CMD_DESC_FIXED (1<<0)
108 #define CMD_DESC_SKIP (1<<1)
109 #define CMD_DESC_REJECT (1<<2)
110 #define CMD_DESC_REGISTER (1<<3)
111 #define CMD_DESC_BITMASK (1<<4)
114 * The command's unique identification bits and the bitmask to get them.
115 * This isn't strictly the opcode field as defined in the spec and may
116 * also include type, subtype, and/or subop fields.
124 * The command's length. The command is either fixed length (i.e. does
125 * not include a length field) or has a length field mask. The flag
126 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
127 * a length mask. All command entries in a command table must include
128 * length information.
136 * Describes where to find a register address in the command to check
137 * against the ring's register whitelist. Only valid if flags has the
138 * CMD_DESC_REGISTER bit set.
140 * A non-zero step value implies that the command may access multiple
141 * registers in sequence (e.g. LRI), in that case step gives the
142 * distance in dwords between individual offset fields.
150 #define MAX_CMD_DESC_BITMASKS 3
152 * Describes command checks where a particular dword is masked and
153 * compared against an expected value. If the command does not match
154 * the expected value, the parser rejects it. Only valid if flags has
155 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
158 * If the check specifies a non-zero condition_mask then the parser
159 * only performs the check when the bits specified by condition_mask
166 u32 condition_offset
;
168 } bits
[MAX_CMD_DESC_BITMASKS
];
172 * A table of commands requiring special handling by the command parser.
174 * Each engine has an array of tables. Each table consists of an array of
175 * command descriptors, which must be sorted with command opcodes in
178 struct drm_i915_cmd_table
{
179 const struct drm_i915_cmd_descriptor
*table
;
183 #define STD_MI_OPCODE_SHIFT (32 - 9)
184 #define STD_3D_OPCODE_SHIFT (32 - 16)
185 #define STD_2D_OPCODE_SHIFT (32 - 10)
186 #define STD_MFX_OPCODE_SHIFT (32 - 16)
187 #define MIN_OPCODE_SHIFT 16
189 #define CMD(op, opm, f, lm, fl, ...) \
191 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
192 .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
193 .length = { (lm) }, \
197 /* Convenience macros to compress the tables */
198 #define SMI STD_MI_OPCODE_SHIFT
199 #define S3D STD_3D_OPCODE_SHIFT
200 #define S2D STD_2D_OPCODE_SHIFT
201 #define SMFX STD_MFX_OPCODE_SHIFT
203 #define S CMD_DESC_SKIP
204 #define R CMD_DESC_REJECT
205 #define W CMD_DESC_REGISTER
206 #define B CMD_DESC_BITMASK
208 /* Command Mask Fixed Len Action
209 ---------------------------------------------------------- */
210 static const struct drm_i915_cmd_descriptor gen7_common_cmds
[] = {
211 CMD( MI_NOOP
, SMI
, F
, 1, S
),
212 CMD( MI_USER_INTERRUPT
, SMI
, F
, 1, R
),
213 CMD( MI_WAIT_FOR_EVENT
, SMI
, F
, 1, R
),
214 CMD( MI_ARB_CHECK
, SMI
, F
, 1, S
),
215 CMD( MI_REPORT_HEAD
, SMI
, F
, 1, S
),
216 CMD( MI_SUSPEND_FLUSH
, SMI
, F
, 1, S
),
217 CMD( MI_SEMAPHORE_MBOX
, SMI
, !F
, 0xFF, R
),
218 CMD( MI_STORE_DWORD_INDEX
, SMI
, !F
, 0xFF, R
),
219 CMD( MI_LOAD_REGISTER_IMM(1), SMI
, !F
, 0xFF, W
,
220 .reg
= { .offset
= 1, .mask
= 0x007FFFFC, .step
= 2 } ),
221 CMD( MI_STORE_REGISTER_MEM
, SMI
, F
, 3, W
| B
,
222 .reg
= { .offset
= 1, .mask
= 0x007FFFFC },
225 .mask
= MI_GLOBAL_GTT
,
228 CMD( MI_LOAD_REGISTER_MEM
, SMI
, F
, 3, W
| B
,
229 .reg
= { .offset
= 1, .mask
= 0x007FFFFC },
232 .mask
= MI_GLOBAL_GTT
,
236 * MI_BATCH_BUFFER_START requires some special handling. It's not
237 * really a 'skip' action but it doesn't seem like it's worth adding
238 * a new action. See intel_engine_cmd_parser().
240 CMD( MI_BATCH_BUFFER_START
, SMI
, !F
, 0xFF, S
),
243 static const struct drm_i915_cmd_descriptor gen7_render_cmds
[] = {
244 CMD( MI_FLUSH
, SMI
, F
, 1, S
),
245 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
246 CMD( MI_PREDICATE
, SMI
, F
, 1, S
),
247 CMD( MI_TOPOLOGY_FILTER
, SMI
, F
, 1, S
),
248 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
249 CMD( MI_DISPLAY_FLIP
, SMI
, !F
, 0xFF, R
),
250 CMD( MI_SET_CONTEXT
, SMI
, !F
, 0xFF, R
),
251 CMD( MI_URB_CLEAR
, SMI
, !F
, 0xFF, S
),
252 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0x3F, B
,
255 .mask
= MI_GLOBAL_GTT
,
258 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0xFF, R
),
259 CMD( MI_CLFLUSH
, SMI
, !F
, 0x3FF, B
,
262 .mask
= MI_GLOBAL_GTT
,
265 CMD( MI_REPORT_PERF_COUNT
, SMI
, !F
, 0x3F, B
,
268 .mask
= MI_REPORT_PERF_COUNT_GGTT
,
271 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
274 .mask
= MI_GLOBAL_GTT
,
277 CMD( GFX_OP_3DSTATE_VF_STATISTICS
, S3D
, F
, 1, S
),
278 CMD( PIPELINE_SELECT
, S3D
, F
, 1, S
),
279 CMD( MEDIA_VFE_STATE
, S3D
, !F
, 0xFFFF, B
,
282 .mask
= MEDIA_VFE_STATE_MMIO_ACCESS_MASK
,
285 CMD( GPGPU_OBJECT
, S3D
, !F
, 0xFF, S
),
286 CMD( GPGPU_WALKER
, S3D
, !F
, 0xFF, S
),
287 CMD( GFX_OP_3DSTATE_SO_DECL_LIST
, S3D
, !F
, 0x1FF, S
),
288 CMD( GFX_OP_PIPE_CONTROL(5), S3D
, !F
, 0xFF, B
,
291 .mask
= (PIPE_CONTROL_MMIO_WRITE
| PIPE_CONTROL_NOTIFY
),
296 .mask
= (PIPE_CONTROL_GLOBAL_GTT_IVB
|
297 PIPE_CONTROL_STORE_DATA_INDEX
),
299 .condition_offset
= 1,
300 .condition_mask
= PIPE_CONTROL_POST_SYNC_OP_MASK
,
304 static const struct drm_i915_cmd_descriptor hsw_render_cmds
[] = {
305 CMD( MI_SET_PREDICATE
, SMI
, F
, 1, S
),
306 CMD( MI_RS_CONTROL
, SMI
, F
, 1, S
),
307 CMD( MI_URB_ATOMIC_ALLOC
, SMI
, F
, 1, S
),
308 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
309 CMD( MI_RS_CONTEXT
, SMI
, F
, 1, S
),
310 CMD( MI_LOAD_SCAN_LINES_INCL
, SMI
, !F
, 0x3F, R
),
311 CMD( MI_LOAD_SCAN_LINES_EXCL
, SMI
, !F
, 0x3F, R
),
312 CMD( MI_LOAD_REGISTER_REG
, SMI
, !F
, 0xFF, W
,
313 .reg
= { .offset
= 1, .mask
= 0x007FFFFC, .step
= 1 } ),
314 CMD( MI_RS_STORE_DATA_IMM
, SMI
, !F
, 0xFF, S
),
315 CMD( MI_LOAD_URB_MEM
, SMI
, !F
, 0xFF, S
),
316 CMD( MI_STORE_URB_MEM
, SMI
, !F
, 0xFF, S
),
317 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS
, S3D
, !F
, 0x7FF, S
),
318 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS
, S3D
, !F
, 0x7FF, S
),
320 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS
, S3D
, !F
, 0x1FF, S
),
321 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS
, S3D
, !F
, 0x1FF, S
),
322 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS
, S3D
, !F
, 0x1FF, S
),
323 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS
, S3D
, !F
, 0x1FF, S
),
324 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS
, S3D
, !F
, 0x1FF, S
),
327 static const struct drm_i915_cmd_descriptor gen7_video_cmds
[] = {
328 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
329 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
330 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0xFF, B
,
333 .mask
= MI_GLOBAL_GTT
,
336 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
337 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
340 .mask
= MI_FLUSH_DW_NOTIFY
,
345 .mask
= MI_FLUSH_DW_USE_GTT
,
347 .condition_offset
= 0,
348 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
352 .mask
= MI_FLUSH_DW_STORE_INDEX
,
354 .condition_offset
= 0,
355 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
357 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
360 .mask
= MI_GLOBAL_GTT
,
364 * MFX_WAIT doesn't fit the way we handle length for most commands.
365 * It has a length field but it uses a non-standard length bias.
366 * It is always 1 dword though, so just treat it as fixed length.
368 CMD( MFX_WAIT
, SMFX
, F
, 1, S
),
371 static const struct drm_i915_cmd_descriptor gen7_vecs_cmds
[] = {
372 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, R
),
373 CMD( MI_SET_APPID
, SMI
, F
, 1, S
),
374 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0xFF, B
,
377 .mask
= MI_GLOBAL_GTT
,
380 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
381 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
384 .mask
= MI_FLUSH_DW_NOTIFY
,
389 .mask
= MI_FLUSH_DW_USE_GTT
,
391 .condition_offset
= 0,
392 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
396 .mask
= MI_FLUSH_DW_STORE_INDEX
,
398 .condition_offset
= 0,
399 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
401 CMD( MI_CONDITIONAL_BATCH_BUFFER_END
, SMI
, !F
, 0xFF, B
,
404 .mask
= MI_GLOBAL_GTT
,
409 static const struct drm_i915_cmd_descriptor gen7_blt_cmds
[] = {
410 CMD( MI_DISPLAY_FLIP
, SMI
, !F
, 0xFF, R
),
411 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0x3FF, B
,
414 .mask
= MI_GLOBAL_GTT
,
417 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3F, R
),
418 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, B
,
421 .mask
= MI_FLUSH_DW_NOTIFY
,
426 .mask
= MI_FLUSH_DW_USE_GTT
,
428 .condition_offset
= 0,
429 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
433 .mask
= MI_FLUSH_DW_STORE_INDEX
,
435 .condition_offset
= 0,
436 .condition_mask
= MI_FLUSH_DW_OP_MASK
,
438 CMD( COLOR_BLT
, S2D
, !F
, 0x3F, S
),
439 CMD( SRC_COPY_BLT
, S2D
, !F
, 0x3F, S
),
442 static const struct drm_i915_cmd_descriptor hsw_blt_cmds
[] = {
443 CMD( MI_LOAD_SCAN_LINES_INCL
, SMI
, !F
, 0x3F, R
),
444 CMD( MI_LOAD_SCAN_LINES_EXCL
, SMI
, !F
, 0x3F, R
),
448 * For Gen9 we can still rely on the h/w to enforce cmd security, and only
449 * need to re-enforce the register access checks. We therefore only need to
450 * teach the cmdparser how to find the end of each command, and identify
451 * register accesses. The table doesn't need to reject any commands, and so
452 * the only commands listed here are:
453 * 1) Those that touch registers
454 * 2) Those that do not have the default 8-bit length
456 * Note that the default MI length mask chosen for this table is 0xFF, not
457 * the 0x3F used on older devices. This is because the vast majority of MI
458 * cmds on Gen9 use a standard 8-bit Length field.
459 * All the Gen9 blitter instructions are standard 0xFF length mask, and
460 * none allow access to non-general registers, so in fact no BLT cmds are
461 * included in the table at all.
464 static const struct drm_i915_cmd_descriptor gen9_blt_cmds
[] = {
465 CMD( MI_NOOP
, SMI
, F
, 1, S
),
466 CMD( MI_USER_INTERRUPT
, SMI
, F
, 1, S
),
467 CMD( MI_WAIT_FOR_EVENT
, SMI
, F
, 1, S
),
468 CMD( MI_FLUSH
, SMI
, F
, 1, S
),
469 CMD( MI_ARB_CHECK
, SMI
, F
, 1, S
),
470 CMD( MI_REPORT_HEAD
, SMI
, F
, 1, S
),
471 CMD( MI_ARB_ON_OFF
, SMI
, F
, 1, S
),
472 CMD( MI_SUSPEND_FLUSH
, SMI
, F
, 1, S
),
473 CMD( MI_LOAD_SCAN_LINES_INCL
, SMI
, !F
, 0x3F, S
),
474 CMD( MI_LOAD_SCAN_LINES_EXCL
, SMI
, !F
, 0x3F, S
),
475 CMD( MI_STORE_DWORD_IMM
, SMI
, !F
, 0x3FF, S
),
476 CMD( MI_LOAD_REGISTER_IMM(1), SMI
, !F
, 0xFF, W
,
477 .reg
= { .offset
= 1, .mask
= 0x007FFFFC, .step
= 2 } ),
478 CMD( MI_UPDATE_GTT
, SMI
, !F
, 0x3FF, S
),
479 CMD( MI_STORE_REGISTER_MEM_GEN8
, SMI
, F
, 4, W
,
480 .reg
= { .offset
= 1, .mask
= 0x007FFFFC } ),
481 CMD( MI_FLUSH_DW
, SMI
, !F
, 0x3F, S
),
482 CMD( MI_LOAD_REGISTER_MEM_GEN8
, SMI
, F
, 4, W
,
483 .reg
= { .offset
= 1, .mask
= 0x007FFFFC } ),
484 CMD( MI_LOAD_REGISTER_REG
, SMI
, !F
, 0xFF, W
,
485 .reg
= { .offset
= 1, .mask
= 0x007FFFFC, .step
= 1 } ),
488 * We allow BB_START but apply further checks. We just sanitize the
491 #define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
492 #define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
493 CMD( MI_BATCH_BUFFER_START_GEN8
, SMI
, !F
, 0xFF, B
,
496 .mask
= MI_BB_START_OPERAND_MASK
,
497 .expected
= MI_BB_START_OPERAND_EXPECT
,
501 static const struct drm_i915_cmd_descriptor noop_desc
=
502 CMD(MI_NOOP
, SMI
, F
, 1, S
);
515 static const struct drm_i915_cmd_table gen7_render_cmd_table
[] = {
516 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
517 { gen7_render_cmds
, ARRAY_SIZE(gen7_render_cmds
) },
520 static const struct drm_i915_cmd_table hsw_render_ring_cmd_table
[] = {
521 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
522 { gen7_render_cmds
, ARRAY_SIZE(gen7_render_cmds
) },
523 { hsw_render_cmds
, ARRAY_SIZE(hsw_render_cmds
) },
526 static const struct drm_i915_cmd_table gen7_video_cmd_table
[] = {
527 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
528 { gen7_video_cmds
, ARRAY_SIZE(gen7_video_cmds
) },
531 static const struct drm_i915_cmd_table hsw_vebox_cmd_table
[] = {
532 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
533 { gen7_vecs_cmds
, ARRAY_SIZE(gen7_vecs_cmds
) },
536 static const struct drm_i915_cmd_table gen7_blt_cmd_table
[] = {
537 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
538 { gen7_blt_cmds
, ARRAY_SIZE(gen7_blt_cmds
) },
541 static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table
[] = {
542 { gen7_common_cmds
, ARRAY_SIZE(gen7_common_cmds
) },
543 { gen7_blt_cmds
, ARRAY_SIZE(gen7_blt_cmds
) },
544 { hsw_blt_cmds
, ARRAY_SIZE(hsw_blt_cmds
) },
547 static const struct drm_i915_cmd_table gen9_blt_cmd_table
[] = {
548 { gen9_blt_cmds
, ARRAY_SIZE(gen9_blt_cmds
) },
553 * Register whitelists, sorted by increasing register offset.
557 * An individual whitelist entry granting access to register addr. If
558 * mask is non-zero the argument of immediate register writes will be
559 * AND-ed with mask, and the command will be rejected if the result
560 * doesn't match value.
562 * Registers with non-zero mask are only allowed to be written using
565 struct drm_i915_reg_descriptor
{
571 /* Convenience macro for adding 32-bit registers. */
572 #define REG32(_reg, ...) \
573 { .addr = (_reg), __VA_ARGS__ }
575 #define REG32_IDX(_reg, idx) \
576 { .addr = _reg(idx) }
579 * Convenience macro for adding 64-bit registers.
581 * Some registers that userspace accesses are 64 bits. The register
582 * access commands only allow 32-bit accesses. Hence, we have to include
583 * entries for both halves of the 64-bit registers.
585 #define REG64(_reg) \
587 { .addr = _reg ## _UDW }
589 #define REG64_IDX(_reg, idx) \
590 { .addr = _reg(idx) }, \
591 { .addr = _reg ## _UDW(idx) }
593 static const struct drm_i915_reg_descriptor gen7_render_regs
[] = {
594 REG64(GPGPU_THREADS_DISPATCHED
),
595 REG64(HS_INVOCATION_COUNT
),
596 REG64(DS_INVOCATION_COUNT
),
597 REG64(IA_VERTICES_COUNT
),
598 REG64(IA_PRIMITIVES_COUNT
),
599 REG64(VS_INVOCATION_COUNT
),
600 REG64(GS_INVOCATION_COUNT
),
601 REG64(GS_PRIMITIVES_COUNT
),
602 REG64(CL_INVOCATION_COUNT
),
603 REG64(CL_PRIMITIVES_COUNT
),
604 REG64(PS_INVOCATION_COUNT
),
605 REG64(PS_DEPTH_COUNT
),
606 REG64_IDX(RING_TIMESTAMP
, RENDER_RING_BASE
),
607 REG64(MI_PREDICATE_SRC0
),
608 REG64(MI_PREDICATE_SRC1
),
609 REG32(GEN7_3DPRIM_END_OFFSET
),
610 REG32(GEN7_3DPRIM_START_VERTEX
),
611 REG32(GEN7_3DPRIM_VERTEX_COUNT
),
612 REG32(GEN7_3DPRIM_INSTANCE_COUNT
),
613 REG32(GEN7_3DPRIM_START_INSTANCE
),
614 REG32(GEN7_3DPRIM_BASE_VERTEX
),
615 REG32(GEN7_GPGPU_DISPATCHDIMX
),
616 REG32(GEN7_GPGPU_DISPATCHDIMY
),
617 REG32(GEN7_GPGPU_DISPATCHDIMZ
),
618 REG64_IDX(RING_TIMESTAMP
, BSD_RING_BASE
),
619 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN
, 0),
620 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN
, 1),
621 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN
, 2),
622 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN
, 3),
623 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED
, 0),
624 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED
, 1),
625 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED
, 2),
626 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED
, 3),
627 REG32(GEN7_SO_WRITE_OFFSET(0)),
628 REG32(GEN7_SO_WRITE_OFFSET(1)),
629 REG32(GEN7_SO_WRITE_OFFSET(2)),
630 REG32(GEN7_SO_WRITE_OFFSET(3)),
631 REG32(GEN7_L3SQCREG1
),
632 REG32(GEN7_L3CNTLREG2
),
633 REG32(GEN7_L3CNTLREG3
),
634 REG64_IDX(RING_TIMESTAMP
, BLT_RING_BASE
),
637 static const struct drm_i915_reg_descriptor hsw_render_regs
[] = {
638 REG64_IDX(HSW_CS_GPR
, 0),
639 REG64_IDX(HSW_CS_GPR
, 1),
640 REG64_IDX(HSW_CS_GPR
, 2),
641 REG64_IDX(HSW_CS_GPR
, 3),
642 REG64_IDX(HSW_CS_GPR
, 4),
643 REG64_IDX(HSW_CS_GPR
, 5),
644 REG64_IDX(HSW_CS_GPR
, 6),
645 REG64_IDX(HSW_CS_GPR
, 7),
646 REG64_IDX(HSW_CS_GPR
, 8),
647 REG64_IDX(HSW_CS_GPR
, 9),
648 REG64_IDX(HSW_CS_GPR
, 10),
649 REG64_IDX(HSW_CS_GPR
, 11),
650 REG64_IDX(HSW_CS_GPR
, 12),
651 REG64_IDX(HSW_CS_GPR
, 13),
652 REG64_IDX(HSW_CS_GPR
, 14),
653 REG64_IDX(HSW_CS_GPR
, 15),
655 .mask
= ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
,
657 REG32(HSW_ROW_CHICKEN3
,
658 .mask
= ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
<< 16 |
659 HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
),
663 static const struct drm_i915_reg_descriptor gen7_blt_regs
[] = {
664 REG64_IDX(RING_TIMESTAMP
, RENDER_RING_BASE
),
665 REG64_IDX(RING_TIMESTAMP
, BSD_RING_BASE
),
667 REG64_IDX(RING_TIMESTAMP
, BLT_RING_BASE
),
670 static const struct drm_i915_reg_descriptor gen9_blt_regs
[] = {
671 REG64_IDX(RING_TIMESTAMP
, RENDER_RING_BASE
),
672 REG64_IDX(RING_TIMESTAMP
, BSD_RING_BASE
),
674 REG64_IDX(RING_TIMESTAMP
, BLT_RING_BASE
),
675 REG32_IDX(RING_CTX_TIMESTAMP
, BLT_RING_BASE
),
676 REG64_IDX(BCS_GPR
, 0),
677 REG64_IDX(BCS_GPR
, 1),
678 REG64_IDX(BCS_GPR
, 2),
679 REG64_IDX(BCS_GPR
, 3),
680 REG64_IDX(BCS_GPR
, 4),
681 REG64_IDX(BCS_GPR
, 5),
682 REG64_IDX(BCS_GPR
, 6),
683 REG64_IDX(BCS_GPR
, 7),
684 REG64_IDX(BCS_GPR
, 8),
685 REG64_IDX(BCS_GPR
, 9),
686 REG64_IDX(BCS_GPR
, 10),
687 REG64_IDX(BCS_GPR
, 11),
688 REG64_IDX(BCS_GPR
, 12),
689 REG64_IDX(BCS_GPR
, 13),
690 REG64_IDX(BCS_GPR
, 14),
691 REG64_IDX(BCS_GPR
, 15),
697 struct drm_i915_reg_table
{
698 const struct drm_i915_reg_descriptor
*regs
;
702 static const struct drm_i915_reg_table ivb_render_reg_tables
[] = {
703 { gen7_render_regs
, ARRAY_SIZE(gen7_render_regs
) },
706 static const struct drm_i915_reg_table ivb_blt_reg_tables
[] = {
707 { gen7_blt_regs
, ARRAY_SIZE(gen7_blt_regs
) },
710 static const struct drm_i915_reg_table hsw_render_reg_tables
[] = {
711 { gen7_render_regs
, ARRAY_SIZE(gen7_render_regs
) },
712 { hsw_render_regs
, ARRAY_SIZE(hsw_render_regs
) },
715 static const struct drm_i915_reg_table hsw_blt_reg_tables
[] = {
716 { gen7_blt_regs
, ARRAY_SIZE(gen7_blt_regs
) },
719 static const struct drm_i915_reg_table gen9_blt_reg_tables
[] = {
720 { gen9_blt_regs
, ARRAY_SIZE(gen9_blt_regs
) },
723 static u32
gen7_render_get_cmd_length_mask(u32 cmd_header
)
725 u32 client
= cmd_header
>> INSTR_CLIENT_SHIFT
;
727 (cmd_header
& INSTR_SUBCLIENT_MASK
) >> INSTR_SUBCLIENT_SHIFT
;
729 if (client
== INSTR_MI_CLIENT
)
731 else if (client
== INSTR_RC_CLIENT
) {
732 if (subclient
== INSTR_MEDIA_SUBCLIENT
)
738 DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header
);
742 static u32
gen7_bsd_get_cmd_length_mask(u32 cmd_header
)
744 u32 client
= cmd_header
>> INSTR_CLIENT_SHIFT
;
746 (cmd_header
& INSTR_SUBCLIENT_MASK
) >> INSTR_SUBCLIENT_SHIFT
;
747 u32 op
= (cmd_header
& INSTR_26_TO_24_MASK
) >> INSTR_26_TO_24_SHIFT
;
749 if (client
== INSTR_MI_CLIENT
)
751 else if (client
== INSTR_RC_CLIENT
) {
752 if (subclient
== INSTR_MEDIA_SUBCLIENT
) {
761 DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header
);
765 static u32
gen7_blt_get_cmd_length_mask(u32 cmd_header
)
767 u32 client
= cmd_header
>> INSTR_CLIENT_SHIFT
;
769 if (client
== INSTR_MI_CLIENT
)
771 else if (client
== INSTR_BC_CLIENT
)
774 DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header
);
778 static u32
gen9_blt_get_cmd_length_mask(u32 cmd_header
)
780 u32 client
= cmd_header
>> INSTR_CLIENT_SHIFT
;
782 if (client
== INSTR_MI_CLIENT
|| client
== INSTR_BC_CLIENT
)
785 DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header
);
789 static bool validate_cmds_sorted(const struct intel_engine_cs
*engine
,
790 const struct drm_i915_cmd_table
*cmd_tables
,
796 if (!cmd_tables
|| cmd_table_count
== 0)
799 for (i
= 0; i
< cmd_table_count
; i
++) {
800 const struct drm_i915_cmd_table
*table
= &cmd_tables
[i
];
804 for (j
= 0; j
< table
->count
; j
++) {
805 const struct drm_i915_cmd_descriptor
*desc
=
807 u32 curr
= desc
->cmd
.value
& desc
->cmd
.mask
;
809 if (curr
< previous
) {
810 drm_err(&engine
->i915
->drm
,
811 "CMD: %s [%d] command table not sorted: "
812 "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
813 engine
->name
, engine
->id
,
814 i
, j
, curr
, previous
);
825 static bool check_sorted(const struct intel_engine_cs
*engine
,
826 const struct drm_i915_reg_descriptor
*reg_table
,
833 for (i
= 0; i
< reg_count
; i
++) {
834 u32 curr
= i915_mmio_reg_offset(reg_table
[i
].addr
);
836 if (curr
< previous
) {
837 drm_err(&engine
->i915
->drm
,
838 "CMD: %s [%d] register table not sorted: "
839 "entry=%d reg=0x%08X prev=0x%08X\n",
840 engine
->name
, engine
->id
,
851 static bool validate_regs_sorted(struct intel_engine_cs
*engine
)
854 const struct drm_i915_reg_table
*table
;
856 for (i
= 0; i
< engine
->reg_table_count
; i
++) {
857 table
= &engine
->reg_tables
[i
];
858 if (!check_sorted(engine
, table
->regs
, table
->num_regs
))
866 const struct drm_i915_cmd_descriptor
*desc
;
867 struct hlist_node node
;
871 * Different command ranges have different numbers of bits for the opcode. For
872 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
873 * problem is that, for example, MI commands use bits 22:16 for other fields
874 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
875 * we mask a command from a batch it could hash to the wrong bucket due to
876 * non-opcode bits being set. But if we don't include those bits, some 3D
877 * commands may hash to the same bucket due to not including opcode bits that
878 * make the command unique. For now, we will risk hashing to the same bucket.
880 static inline u32
cmd_header_key(u32 x
)
882 switch (x
>> INSTR_CLIENT_SHIFT
) {
884 case INSTR_MI_CLIENT
:
885 return x
>> STD_MI_OPCODE_SHIFT
;
886 case INSTR_RC_CLIENT
:
887 return x
>> STD_3D_OPCODE_SHIFT
;
888 case INSTR_BC_CLIENT
:
889 return x
>> STD_2D_OPCODE_SHIFT
;
893 static int init_hash_table(struct intel_engine_cs
*engine
,
894 const struct drm_i915_cmd_table
*cmd_tables
,
899 hash_init(engine
->cmd_hash
);
901 for (i
= 0; i
< cmd_table_count
; i
++) {
902 const struct drm_i915_cmd_table
*table
= &cmd_tables
[i
];
904 for (j
= 0; j
< table
->count
; j
++) {
905 const struct drm_i915_cmd_descriptor
*desc
=
907 struct cmd_node
*desc_node
=
908 kmalloc(sizeof(*desc_node
), GFP_KERNEL
);
913 desc_node
->desc
= desc
;
914 hash_add(engine
->cmd_hash
, &desc_node
->node
,
915 cmd_header_key(desc
->cmd
.value
));
922 static void fini_hash_table(struct intel_engine_cs
*engine
)
924 struct hlist_node
*tmp
;
925 struct cmd_node
*desc_node
;
928 hash_for_each_safe(engine
->cmd_hash
, i
, tmp
, desc_node
, node
) {
929 hash_del(&desc_node
->node
);
935 * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
936 * @engine: the engine to initialize
938 * Optionally initializes fields related to batch buffer command parsing in the
939 * struct intel_engine_cs based on whether the platform requires software
942 void intel_engine_init_cmd_parser(struct intel_engine_cs
*engine
)
944 const struct drm_i915_cmd_table
*cmd_tables
;
948 if (!IS_GEN(engine
->i915
, 7) && !(IS_GEN(engine
->i915
, 9) &&
949 engine
->class == COPY_ENGINE_CLASS
))
952 switch (engine
->class) {
954 if (IS_HASWELL(engine
->i915
)) {
955 cmd_tables
= hsw_render_ring_cmd_table
;
957 ARRAY_SIZE(hsw_render_ring_cmd_table
);
959 cmd_tables
= gen7_render_cmd_table
;
960 cmd_table_count
= ARRAY_SIZE(gen7_render_cmd_table
);
963 if (IS_HASWELL(engine
->i915
)) {
964 engine
->reg_tables
= hsw_render_reg_tables
;
965 engine
->reg_table_count
= ARRAY_SIZE(hsw_render_reg_tables
);
967 engine
->reg_tables
= ivb_render_reg_tables
;
968 engine
->reg_table_count
= ARRAY_SIZE(ivb_render_reg_tables
);
970 engine
->get_cmd_length_mask
= gen7_render_get_cmd_length_mask
;
972 case VIDEO_DECODE_CLASS
:
973 cmd_tables
= gen7_video_cmd_table
;
974 cmd_table_count
= ARRAY_SIZE(gen7_video_cmd_table
);
975 engine
->get_cmd_length_mask
= gen7_bsd_get_cmd_length_mask
;
977 case COPY_ENGINE_CLASS
:
978 engine
->get_cmd_length_mask
= gen7_blt_get_cmd_length_mask
;
979 if (IS_GEN(engine
->i915
, 9)) {
980 cmd_tables
= gen9_blt_cmd_table
;
981 cmd_table_count
= ARRAY_SIZE(gen9_blt_cmd_table
);
982 engine
->get_cmd_length_mask
=
983 gen9_blt_get_cmd_length_mask
;
985 /* BCS Engine unsafe without parser */
986 engine
->flags
|= I915_ENGINE_REQUIRES_CMD_PARSER
;
987 } else if (IS_HASWELL(engine
->i915
)) {
988 cmd_tables
= hsw_blt_ring_cmd_table
;
989 cmd_table_count
= ARRAY_SIZE(hsw_blt_ring_cmd_table
);
991 cmd_tables
= gen7_blt_cmd_table
;
992 cmd_table_count
= ARRAY_SIZE(gen7_blt_cmd_table
);
995 if (IS_GEN(engine
->i915
, 9)) {
996 engine
->reg_tables
= gen9_blt_reg_tables
;
997 engine
->reg_table_count
=
998 ARRAY_SIZE(gen9_blt_reg_tables
);
999 } else if (IS_HASWELL(engine
->i915
)) {
1000 engine
->reg_tables
= hsw_blt_reg_tables
;
1001 engine
->reg_table_count
= ARRAY_SIZE(hsw_blt_reg_tables
);
1003 engine
->reg_tables
= ivb_blt_reg_tables
;
1004 engine
->reg_table_count
= ARRAY_SIZE(ivb_blt_reg_tables
);
1007 case VIDEO_ENHANCEMENT_CLASS
:
1008 cmd_tables
= hsw_vebox_cmd_table
;
1009 cmd_table_count
= ARRAY_SIZE(hsw_vebox_cmd_table
);
1010 /* VECS can use the same length_mask function as VCS */
1011 engine
->get_cmd_length_mask
= gen7_bsd_get_cmd_length_mask
;
1014 MISSING_CASE(engine
->class);
1018 if (!validate_cmds_sorted(engine
, cmd_tables
, cmd_table_count
)) {
1019 drm_err(&engine
->i915
->drm
,
1020 "%s: command descriptions are not sorted\n",
1024 if (!validate_regs_sorted(engine
)) {
1025 drm_err(&engine
->i915
->drm
,
1026 "%s: registers are not sorted\n", engine
->name
);
1030 ret
= init_hash_table(engine
, cmd_tables
, cmd_table_count
);
1032 drm_err(&engine
->i915
->drm
,
1033 "%s: initialised failed!\n", engine
->name
);
1034 fini_hash_table(engine
);
1038 engine
->flags
|= I915_ENGINE_USING_CMD_PARSER
;
1042 * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
1043 * @engine: the engine to clean up
1045 * Releases any resources related to command parsing that may have been
1046 * initialized for the specified engine.
1048 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs
*engine
)
1050 if (!intel_engine_using_cmd_parser(engine
))
1053 fini_hash_table(engine
);
1056 static const struct drm_i915_cmd_descriptor
*
1057 find_cmd_in_table(struct intel_engine_cs
*engine
,
1060 struct cmd_node
*desc_node
;
1062 hash_for_each_possible(engine
->cmd_hash
, desc_node
, node
,
1063 cmd_header_key(cmd_header
)) {
1064 const struct drm_i915_cmd_descriptor
*desc
= desc_node
->desc
;
1065 if (((cmd_header
^ desc
->cmd
.value
) & desc
->cmd
.mask
) == 0)
1073 * Returns a pointer to a descriptor for the command specified by cmd_header.
1075 * The caller must supply space for a default descriptor via the default_desc
1076 * parameter. If no descriptor for the specified command exists in the engine's
1077 * command parser tables, this function fills in default_desc based on the
1078 * engine's default length encoding and returns default_desc.
1080 static const struct drm_i915_cmd_descriptor
*
1081 find_cmd(struct intel_engine_cs
*engine
,
1083 const struct drm_i915_cmd_descriptor
*desc
,
1084 struct drm_i915_cmd_descriptor
*default_desc
)
1088 if (((cmd_header
^ desc
->cmd
.value
) & desc
->cmd
.mask
) == 0)
1091 desc
= find_cmd_in_table(engine
, cmd_header
);
1095 mask
= engine
->get_cmd_length_mask(cmd_header
);
1099 default_desc
->cmd
.value
= cmd_header
;
1100 default_desc
->cmd
.mask
= ~0u << MIN_OPCODE_SHIFT
;
1101 default_desc
->length
.mask
= mask
;
1102 default_desc
->flags
= CMD_DESC_SKIP
;
1103 return default_desc
;
1106 static const struct drm_i915_reg_descriptor
*
1107 __find_reg(const struct drm_i915_reg_descriptor
*table
, int count
, u32 addr
)
1109 int start
= 0, end
= count
;
1110 while (start
< end
) {
1111 int mid
= start
+ (end
- start
) / 2;
1112 int ret
= addr
- i915_mmio_reg_offset(table
[mid
].addr
);
1123 static const struct drm_i915_reg_descriptor
*
1124 find_reg(const struct intel_engine_cs
*engine
, u32 addr
)
1126 const struct drm_i915_reg_table
*table
= engine
->reg_tables
;
1127 const struct drm_i915_reg_descriptor
*reg
= NULL
;
1128 int count
= engine
->reg_table_count
;
1130 for (; !reg
&& (count
> 0); ++table
, --count
)
1131 reg
= __find_reg(table
->regs
, table
->num_regs
, addr
);
1136 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
1137 static u32
*copy_batch(struct drm_i915_gem_object
*dst_obj
,
1138 struct drm_i915_gem_object
*src_obj
,
1139 unsigned long offset
, unsigned long length
)
1145 dst
= i915_gem_object_pin_map(dst_obj
, I915_MAP_FORCE_WB
);
1149 ret
= i915_gem_object_pin_pages(src_obj
);
1151 i915_gem_object_unpin_map(dst_obj
);
1152 return ERR_PTR(ret
);
1156 !(src_obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_READ
);
1158 src
= ERR_PTR(-ENODEV
);
1159 if (needs_clflush
&& i915_has_memcpy_from_wc()) {
1160 src
= i915_gem_object_pin_map(src_obj
, I915_MAP_WC
);
1162 i915_unaligned_memcpy_from_wc(dst
,
1165 i915_gem_object_unpin_map(src_obj
);
1169 unsigned long x
, n
, remain
;
1173 * We can avoid clflushing partial cachelines before the write
1174 * if we only every write full cache-lines. Since we know that
1175 * both the source and destination are in multiples of
1176 * PAGE_SIZE, we can simply round up to the next cacheline.
1177 * We don't care about copying too much here as we only
1178 * validate up to the end of the batch.
1181 if (!(dst_obj
->cache_coherent
& I915_BO_CACHE_COHERENT_FOR_READ
))
1182 remain
= round_up(remain
,
1183 boot_cpu_data
.x86_clflush_size
);
1186 x
= offset_in_page(offset
);
1187 for (n
= offset
>> PAGE_SHIFT
; remain
; n
++) {
1188 int len
= min(remain
, PAGE_SIZE
- x
);
1190 src
= kmap_atomic(i915_gem_object_get_page(src_obj
, n
));
1192 drm_clflush_virt_range(src
+ x
, len
);
1193 memcpy(ptr
, src
+ x
, len
);
1202 i915_gem_object_unpin_pages(src_obj
);
1204 memset32(dst
+ length
, 0, (dst_obj
->base
.size
- length
) / sizeof(u32
));
1206 /* dst_obj is returned with vmap pinned */
1210 static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor
* const desc
,
1213 return desc
->cmd
.value
== (cmd
& desc
->cmd
.mask
);
1216 static bool check_cmd(const struct intel_engine_cs
*engine
,
1217 const struct drm_i915_cmd_descriptor
*desc
,
1218 const u32
*cmd
, u32 length
)
1220 if (desc
->flags
& CMD_DESC_SKIP
)
1223 if (desc
->flags
& CMD_DESC_REJECT
) {
1224 DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd
);
1228 if (desc
->flags
& CMD_DESC_REGISTER
) {
1230 * Get the distance between individual register offset
1231 * fields if the command can perform more than one
1234 const u32 step
= desc
->reg
.step
? desc
->reg
.step
: length
;
1237 for (offset
= desc
->reg
.offset
; offset
< length
;
1239 const u32 reg_addr
= cmd
[offset
] & desc
->reg
.mask
;
1240 const struct drm_i915_reg_descriptor
*reg
=
1241 find_reg(engine
, reg_addr
);
1244 DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
1245 reg_addr
, *cmd
, engine
->name
);
1250 * Check the value written to the register against the
1251 * allowed mask/value pair given in the whitelist entry.
1254 if (cmd_desc_is(desc
, MI_LOAD_REGISTER_MEM
)) {
1255 DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
1260 if (cmd_desc_is(desc
, MI_LOAD_REGISTER_REG
)) {
1261 DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
1266 if (cmd_desc_is(desc
, MI_LOAD_REGISTER_IMM(1)) &&
1267 (offset
+ 2 > length
||
1268 (cmd
[offset
+ 1] & reg
->mask
) != reg
->value
)) {
1269 DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
1277 if (desc
->flags
& CMD_DESC_BITMASK
) {
1280 for (i
= 0; i
< MAX_CMD_DESC_BITMASKS
; i
++) {
1283 if (desc
->bits
[i
].mask
== 0)
1286 if (desc
->bits
[i
].condition_mask
!= 0) {
1288 desc
->bits
[i
].condition_offset
;
1289 u32 condition
= cmd
[offset
] &
1290 desc
->bits
[i
].condition_mask
;
1296 if (desc
->bits
[i
].offset
>= length
) {
1297 DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
1298 *cmd
, engine
->name
);
1302 dword
= cmd
[desc
->bits
[i
].offset
] &
1305 if (dword
!= desc
->bits
[i
].expected
) {
1306 DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
1309 desc
->bits
[i
].expected
,
1310 dword
, engine
->name
);
1319 static int check_bbstart(u32
*cmd
, u32 offset
, u32 length
,
1323 const unsigned long *jump_whitelist
)
1325 u64 jump_offset
, jump_target
;
1326 u32 target_cmd_offset
, target_cmd_index
;
1328 /* For igt compatibility on older platforms */
1329 if (!jump_whitelist
) {
1330 DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1335 DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1340 jump_target
= *(u64
*)(cmd
+ 1);
1341 jump_offset
= jump_target
- batch_addr
;
1344 * Any underflow of jump_target is guaranteed to be outside the range
1345 * of a u32, so >= test catches both too large and too small
1347 if (jump_offset
>= batch_length
) {
1348 DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1354 * This cannot overflow a u32 because we already checked jump_offset
1355 * is within the BB, and the batch_length is a u32
1357 target_cmd_offset
= lower_32_bits(jump_offset
);
1358 target_cmd_index
= target_cmd_offset
/ sizeof(u32
);
1360 *(u64
*)(cmd
+ 1) = shadow_addr
+ target_cmd_offset
;
1362 if (target_cmd_index
== offset
)
1365 if (IS_ERR(jump_whitelist
))
1366 return PTR_ERR(jump_whitelist
);
1368 if (!test_bit(target_cmd_index
, jump_whitelist
)) {
1369 DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1377 static unsigned long *alloc_whitelist(u32 batch_length
)
1382 * We expect batch_length to be less than 256KiB for known users,
1383 * i.e. we need at most an 8KiB bitmap allocation which should be
1384 * reasonably cheap due to kmalloc caches.
1387 /* Prefer to report transient allocation failure rather than hit oom */
1388 jmp
= bitmap_zalloc(DIV_ROUND_UP(batch_length
, sizeof(u32
)),
1389 GFP_KERNEL
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
);
1391 return ERR_PTR(-ENOMEM
);
1396 #define LENGTH_BIAS 2
1399 * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
1400 * @engine: the engine on which the batch is to execute
1401 * @batch: the batch buffer in question
1402 * @batch_offset: byte offset in the batch at which execution starts
1403 * @batch_length: length of the commands in batch_obj
1404 * @shadow: validated copy of the batch buffer in question
1405 * @trampoline: whether to emit a conditional trampoline at the end of the batch
1407 * Parses the specified batch buffer looking for privilege violations as
1408 * described in the overview.
1410 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1411 * if the batch appears legal but should use hardware parsing
1413 int intel_engine_cmd_parser(struct intel_engine_cs
*engine
,
1414 struct i915_vma
*batch
,
1415 unsigned long batch_offset
,
1416 unsigned long batch_length
,
1417 struct i915_vma
*shadow
,
1420 u32
*cmd
, *batch_end
, offset
= 0;
1421 struct drm_i915_cmd_descriptor default_desc
= noop_desc
;
1422 const struct drm_i915_cmd_descriptor
*desc
= &default_desc
;
1423 unsigned long *jump_whitelist
;
1424 u64 batch_addr
, shadow_addr
;
1427 GEM_BUG_ON(!IS_ALIGNED(batch_offset
, sizeof(*cmd
)));
1428 GEM_BUG_ON(!IS_ALIGNED(batch_length
, sizeof(*cmd
)));
1429 GEM_BUG_ON(range_overflows_t(u64
, batch_offset
, batch_length
,
1431 GEM_BUG_ON(!batch_length
);
1433 cmd
= copy_batch(shadow
->obj
, batch
->obj
, batch_offset
, batch_length
);
1435 DRM_DEBUG("CMD: Failed to copy batch\n");
1436 return PTR_ERR(cmd
);
1439 jump_whitelist
= NULL
;
1441 /* Defer failure until attempted use */
1442 jump_whitelist
= alloc_whitelist(batch_length
);
1444 shadow_addr
= gen8_canonical_addr(shadow
->node
.start
);
1445 batch_addr
= gen8_canonical_addr(batch
->node
.start
+ batch_offset
);
1448 * We use the batch length as size because the shadow object is as
1449 * large or larger and copy_batch() will write MI_NOPs to the extra
1450 * space. Parsing should be faster in some cases this way.
1452 batch_end
= cmd
+ batch_length
/ sizeof(*batch_end
);
1453 while (*cmd
!= MI_BATCH_BUFFER_END
) {
1456 if (*cmd
!= MI_NOOP
) { /* MI_NOOP == 0 */
1457 desc
= find_cmd(engine
, *cmd
, desc
, &default_desc
);
1459 DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd
);
1464 if (desc
->flags
& CMD_DESC_FIXED
)
1465 length
= desc
->length
.fixed
;
1467 length
= (*cmd
& desc
->length
.mask
) + LENGTH_BIAS
;
1469 if ((batch_end
- cmd
) < length
) {
1470 DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1478 if (!check_cmd(engine
, desc
, cmd
, length
)) {
1483 if (cmd_desc_is(desc
, MI_BATCH_BUFFER_START
)) {
1484 ret
= check_bbstart(cmd
, offset
, length
, batch_length
,
1485 batch_addr
, shadow_addr
,
1491 if (!IS_ERR_OR_NULL(jump_whitelist
))
1492 __set_bit(offset
, jump_whitelist
);
1496 if (cmd
>= batch_end
) {
1497 DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1505 * With the trampoline, the shadow is executed twice.
1507 * 1 - starting at offset 0, in privileged mode
1508 * 2 - starting at offset batch_len, as non-privileged
1510 * Only if the batch is valid and safe to execute, do we
1511 * allow the first privileged execution to proceed. If not,
1512 * we terminate the first batch and use the second batchbuffer
1513 * entry to chain to the original unsafe non-privileged batch,
1514 * leaving it to the HW to validate.
1516 *batch_end
= MI_BATCH_BUFFER_END
;
1519 /* Batch unsafe to execute with privileges, cancel! */
1520 cmd
= page_mask_bits(shadow
->obj
->mm
.mapping
);
1521 *cmd
= MI_BATCH_BUFFER_END
;
1523 /* If batch is unsafe but valid, jump to the original */
1524 if (ret
== -EACCES
) {
1527 flags
= MI_BATCH_NON_SECURE_I965
;
1528 if (IS_HASWELL(engine
->i915
))
1529 flags
= MI_BATCH_NON_SECURE_HSW
;
1531 GEM_BUG_ON(!IS_GEN_RANGE(engine
->i915
, 6, 7));
1532 __gen6_emit_bb_start(batch_end
,
1536 ret
= 0; /* allow execution */
1541 i915_gem_object_flush_map(shadow
->obj
);
1543 if (!IS_ERR_OR_NULL(jump_whitelist
))
1544 kfree(jump_whitelist
);
1545 i915_gem_object_unpin_map(shadow
->obj
);
1550 * i915_cmd_parser_get_version() - get the cmd parser version number
1551 * @dev_priv: i915 device private
1553 * The cmd parser maintains a simple increasing integer version number suitable
1554 * for passing to userspace clients to determine what operations are permitted.
1556 * Return: the current version number of the cmd parser
1558 int i915_cmd_parser_get_version(struct drm_i915_private
*dev_priv
)
1560 struct intel_engine_cs
*engine
;
1561 bool active
= false;
1563 /* If the command parser is not enabled, report 0 - unsupported */
1564 for_each_uabi_engine(engine
, dev_priv
) {
1565 if (intel_engine_using_cmd_parser(engine
)) {
1574 * Command parser version history
1576 * 1. Initial version. Checks batches and reports violations, but leaves
1577 * hardware parsing enabled (so does not allow new use cases).
1578 * 2. Allow access to the MI_PREDICATE_SRC0 and
1579 * MI_PREDICATE_SRC1 registers.
1580 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1581 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1582 * 5. GPGPU dispatch compute indirect registers.
1583 * 6. TIMESTAMP register and Haswell CS GPR registers
1584 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1585 * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
1586 * rely on the HW to NOOP disallowed commands as it would without
1587 * the parser enabled.
1588 * 9. Don't whitelist or handle oacontrol specially, as ownership
1589 * for oacontrol state is moving to i915-perf.
1590 * 10. Support for Gen9 BCS Parsing