treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / i915 / i915_cmd_parser.c
bloba0e437aa65b7f06ab40041bcd69e7365af8deb41
1 /*
2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Brad Volkin <bradley.d.volkin@intel.com>
28 #include "gt/intel_engine.h"
30 #include "i915_drv.h"
31 #include "i915_memcpy.h"
33 /**
34 * DOC: batch buffer command parser
36 * Motivation:
37 * Certain OpenGL features (e.g. transform feedback, performance monitoring)
38 * require userspace code to submit batches containing commands such as
39 * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some
40 * generations of the hardware will noop these commands in "unsecure" batches
41 * (which includes all userspace batches submitted via i915) even though the
42 * commands may be safe and represent the intended programming model of the
43 * device.
45 * The software command parser is similar in operation to the command parsing
46 * done in hardware for unsecure batches. However, the software parser allows
47 * some operations that would be noop'd by hardware, if the parser determines
48 * the operation is safe, and submits the batch as "secure" to prevent hardware
49 * parsing.
51 * Threats:
52 * At a high level, the hardware (and software) checks attempt to prevent
53 * granting userspace undue privileges. There are three categories of privilege.
55 * First, commands which are explicitly defined as privileged or which should
56 * only be used by the kernel driver. The parser rejects such commands
58 * Second, commands which access registers. To support correct/enhanced
59 * userspace functionality, particularly certain OpenGL extensions, the parser
60 * provides a whitelist of registers which userspace may safely access
62 * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
63 * The parser always rejects such commands.
65 * The majority of the problematic commands fall in the MI_* range, with only a
66 * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW).
68 * Implementation:
69 * Each engine maintains tables of commands and registers which the parser
70 * uses in scanning batch buffers submitted to that engine.
72 * Since the set of commands that the parser must check for is significantly
73 * smaller than the number of commands supported, the parser tables contain only
74 * those commands required by the parser. This generally works because command
75 * opcode ranges have standard command length encodings. So for commands that
76 * the parser does not need to check, it can easily skip them. This is
77 * implemented via a per-engine length decoding vfunc.
79 * Unfortunately, there are a number of commands that do not follow the standard
80 * length encoding for their opcode range, primarily amongst the MI_* commands.
81 * To handle this, the parser provides a way to define explicit "skip" entries
82 * in the per-engine command tables.
84 * Other command table entries map fairly directly to high level categories
85 * mentioned above: rejected, register whitelist. The parser implements a number
86 * of checks, including the privileged memory checks, via a general bitmasking
87 * mechanism.
91 * A command that requires special handling by the command parser.
93 struct drm_i915_cmd_descriptor {
95 * Flags describing how the command parser processes the command.
97 * CMD_DESC_FIXED: The command has a fixed length if this is set,
98 * a length mask if not set
99 * CMD_DESC_SKIP: The command is allowed but does not follow the
100 * standard length encoding for the opcode range in
101 * which it falls
102 * CMD_DESC_REJECT: The command is never allowed
103 * CMD_DESC_REGISTER: The command should be checked against the
104 * register whitelist for the appropriate ring
106 u32 flags;
107 #define CMD_DESC_FIXED (1<<0)
108 #define CMD_DESC_SKIP (1<<1)
109 #define CMD_DESC_REJECT (1<<2)
110 #define CMD_DESC_REGISTER (1<<3)
111 #define CMD_DESC_BITMASK (1<<4)
114 * The command's unique identification bits and the bitmask to get them.
115 * This isn't strictly the opcode field as defined in the spec and may
116 * also include type, subtype, and/or subop fields.
118 struct {
119 u32 value;
120 u32 mask;
121 } cmd;
124 * The command's length. The command is either fixed length (i.e. does
125 * not include a length field) or has a length field mask. The flag
126 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
127 * a length mask. All command entries in a command table must include
128 * length information.
130 union {
131 u32 fixed;
132 u32 mask;
133 } length;
136 * Describes where to find a register address in the command to check
137 * against the ring's register whitelist. Only valid if flags has the
138 * CMD_DESC_REGISTER bit set.
140 * A non-zero step value implies that the command may access multiple
141 * registers in sequence (e.g. LRI), in that case step gives the
142 * distance in dwords between individual offset fields.
144 struct {
145 u32 offset;
146 u32 mask;
147 u32 step;
148 } reg;
150 #define MAX_CMD_DESC_BITMASKS 3
152 * Describes command checks where a particular dword is masked and
153 * compared against an expected value. If the command does not match
154 * the expected value, the parser rejects it. Only valid if flags has
155 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
156 * are valid.
158 * If the check specifies a non-zero condition_mask then the parser
159 * only performs the check when the bits specified by condition_mask
160 * are non-zero.
162 struct {
163 u32 offset;
164 u32 mask;
165 u32 expected;
166 u32 condition_offset;
167 u32 condition_mask;
168 } bits[MAX_CMD_DESC_BITMASKS];
172 * A table of commands requiring special handling by the command parser.
174 * Each engine has an array of tables. Each table consists of an array of
175 * command descriptors, which must be sorted with command opcodes in
176 * ascending order.
178 struct drm_i915_cmd_table {
179 const struct drm_i915_cmd_descriptor *table;
180 int count;
183 #define STD_MI_OPCODE_SHIFT (32 - 9)
184 #define STD_3D_OPCODE_SHIFT (32 - 16)
185 #define STD_2D_OPCODE_SHIFT (32 - 10)
186 #define STD_MFX_OPCODE_SHIFT (32 - 16)
187 #define MIN_OPCODE_SHIFT 16
189 #define CMD(op, opm, f, lm, fl, ...) \
191 .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0), \
192 .cmd = { (op & ~0u << (opm)), ~0u << (opm) }, \
193 .length = { (lm) }, \
194 __VA_ARGS__ \
197 /* Convenience macros to compress the tables */
198 #define SMI STD_MI_OPCODE_SHIFT
199 #define S3D STD_3D_OPCODE_SHIFT
200 #define S2D STD_2D_OPCODE_SHIFT
201 #define SMFX STD_MFX_OPCODE_SHIFT
202 #define F true
203 #define S CMD_DESC_SKIP
204 #define R CMD_DESC_REJECT
205 #define W CMD_DESC_REGISTER
206 #define B CMD_DESC_BITMASK
208 /* Command Mask Fixed Len Action
209 ---------------------------------------------------------- */
210 static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
211 CMD( MI_NOOP, SMI, F, 1, S ),
212 CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
213 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
214 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
215 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
216 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
217 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
218 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
219 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
220 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
221 CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
222 .reg = { .offset = 1, .mask = 0x007FFFFC },
223 .bits = {{
224 .offset = 0,
225 .mask = MI_GLOBAL_GTT,
226 .expected = 0,
227 }}, ),
228 CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
229 .reg = { .offset = 1, .mask = 0x007FFFFC },
230 .bits = {{
231 .offset = 0,
232 .mask = MI_GLOBAL_GTT,
233 .expected = 0,
234 }}, ),
236 * MI_BATCH_BUFFER_START requires some special handling. It's not
237 * really a 'skip' action but it doesn't seem like it's worth adding
238 * a new action. See intel_engine_cmd_parser().
240 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
243 static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
244 CMD( MI_FLUSH, SMI, F, 1, S ),
245 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
246 CMD( MI_PREDICATE, SMI, F, 1, S ),
247 CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ),
248 CMD( MI_SET_APPID, SMI, F, 1, S ),
249 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
250 CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ),
251 CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ),
252 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B,
253 .bits = {{
254 .offset = 0,
255 .mask = MI_GLOBAL_GTT,
256 .expected = 0,
257 }}, ),
258 CMD( MI_UPDATE_GTT, SMI, !F, 0xFF, R ),
259 CMD( MI_CLFLUSH, SMI, !F, 0x3FF, B,
260 .bits = {{
261 .offset = 0,
262 .mask = MI_GLOBAL_GTT,
263 .expected = 0,
264 }}, ),
265 CMD( MI_REPORT_PERF_COUNT, SMI, !F, 0x3F, B,
266 .bits = {{
267 .offset = 1,
268 .mask = MI_REPORT_PERF_COUNT_GGTT,
269 .expected = 0,
270 }}, ),
271 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
272 .bits = {{
273 .offset = 0,
274 .mask = MI_GLOBAL_GTT,
275 .expected = 0,
276 }}, ),
277 CMD( GFX_OP_3DSTATE_VF_STATISTICS, S3D, F, 1, S ),
278 CMD( PIPELINE_SELECT, S3D, F, 1, S ),
279 CMD( MEDIA_VFE_STATE, S3D, !F, 0xFFFF, B,
280 .bits = {{
281 .offset = 2,
282 .mask = MEDIA_VFE_STATE_MMIO_ACCESS_MASK,
283 .expected = 0,
284 }}, ),
285 CMD( GPGPU_OBJECT, S3D, !F, 0xFF, S ),
286 CMD( GPGPU_WALKER, S3D, !F, 0xFF, S ),
287 CMD( GFX_OP_3DSTATE_SO_DECL_LIST, S3D, !F, 0x1FF, S ),
288 CMD( GFX_OP_PIPE_CONTROL(5), S3D, !F, 0xFF, B,
289 .bits = {{
290 .offset = 1,
291 .mask = (PIPE_CONTROL_MMIO_WRITE | PIPE_CONTROL_NOTIFY),
292 .expected = 0,
295 .offset = 1,
296 .mask = (PIPE_CONTROL_GLOBAL_GTT_IVB |
297 PIPE_CONTROL_STORE_DATA_INDEX),
298 .expected = 0,
299 .condition_offset = 1,
300 .condition_mask = PIPE_CONTROL_POST_SYNC_OP_MASK,
301 }}, ),
304 static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
305 CMD( MI_SET_PREDICATE, SMI, F, 1, S ),
306 CMD( MI_RS_CONTROL, SMI, F, 1, S ),
307 CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ),
308 CMD( MI_SET_APPID, SMI, F, 1, S ),
309 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
310 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
311 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
312 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
313 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
314 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
315 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
316 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
317 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_VS, S3D, !F, 0x7FF, S ),
318 CMD( GFX_OP_3DSTATE_DX9_CONSTANTF_PS, S3D, !F, 0x7FF, S ),
320 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_VS, S3D, !F, 0x1FF, S ),
321 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_GS, S3D, !F, 0x1FF, S ),
322 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_HS, S3D, !F, 0x1FF, S ),
323 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_DS, S3D, !F, 0x1FF, S ),
324 CMD( GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS, S3D, !F, 0x1FF, S ),
327 static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
328 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
329 CMD( MI_SET_APPID, SMI, F, 1, S ),
330 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
331 .bits = {{
332 .offset = 0,
333 .mask = MI_GLOBAL_GTT,
334 .expected = 0,
335 }}, ),
336 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
337 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
338 .bits = {{
339 .offset = 0,
340 .mask = MI_FLUSH_DW_NOTIFY,
341 .expected = 0,
344 .offset = 1,
345 .mask = MI_FLUSH_DW_USE_GTT,
346 .expected = 0,
347 .condition_offset = 0,
348 .condition_mask = MI_FLUSH_DW_OP_MASK,
351 .offset = 0,
352 .mask = MI_FLUSH_DW_STORE_INDEX,
353 .expected = 0,
354 .condition_offset = 0,
355 .condition_mask = MI_FLUSH_DW_OP_MASK,
356 }}, ),
357 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
358 .bits = {{
359 .offset = 0,
360 .mask = MI_GLOBAL_GTT,
361 .expected = 0,
362 }}, ),
364 * MFX_WAIT doesn't fit the way we handle length for most commands.
365 * It has a length field but it uses a non-standard length bias.
366 * It is always 1 dword though, so just treat it as fixed length.
368 CMD( MFX_WAIT, SMFX, F, 1, S ),
371 static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
372 CMD( MI_ARB_ON_OFF, SMI, F, 1, R ),
373 CMD( MI_SET_APPID, SMI, F, 1, S ),
374 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B,
375 .bits = {{
376 .offset = 0,
377 .mask = MI_GLOBAL_GTT,
378 .expected = 0,
379 }}, ),
380 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
381 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
382 .bits = {{
383 .offset = 0,
384 .mask = MI_FLUSH_DW_NOTIFY,
385 .expected = 0,
388 .offset = 1,
389 .mask = MI_FLUSH_DW_USE_GTT,
390 .expected = 0,
391 .condition_offset = 0,
392 .condition_mask = MI_FLUSH_DW_OP_MASK,
395 .offset = 0,
396 .mask = MI_FLUSH_DW_STORE_INDEX,
397 .expected = 0,
398 .condition_offset = 0,
399 .condition_mask = MI_FLUSH_DW_OP_MASK,
400 }}, ),
401 CMD( MI_CONDITIONAL_BATCH_BUFFER_END, SMI, !F, 0xFF, B,
402 .bits = {{
403 .offset = 0,
404 .mask = MI_GLOBAL_GTT,
405 .expected = 0,
406 }}, ),
409 static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
410 CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ),
411 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, B,
412 .bits = {{
413 .offset = 0,
414 .mask = MI_GLOBAL_GTT,
415 .expected = 0,
416 }}, ),
417 CMD( MI_UPDATE_GTT, SMI, !F, 0x3F, R ),
418 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, B,
419 .bits = {{
420 .offset = 0,
421 .mask = MI_FLUSH_DW_NOTIFY,
422 .expected = 0,
425 .offset = 1,
426 .mask = MI_FLUSH_DW_USE_GTT,
427 .expected = 0,
428 .condition_offset = 0,
429 .condition_mask = MI_FLUSH_DW_OP_MASK,
432 .offset = 0,
433 .mask = MI_FLUSH_DW_STORE_INDEX,
434 .expected = 0,
435 .condition_offset = 0,
436 .condition_mask = MI_FLUSH_DW_OP_MASK,
437 }}, ),
438 CMD( COLOR_BLT, S2D, !F, 0x3F, S ),
439 CMD( SRC_COPY_BLT, S2D, !F, 0x3F, S ),
442 static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
443 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
444 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
448 * For Gen9 we can still rely on the h/w to enforce cmd security, and only
449 * need to re-enforce the register access checks. We therefore only need to
450 * teach the cmdparser how to find the end of each command, and identify
451 * register accesses. The table doesn't need to reject any commands, and so
452 * the only commands listed here are:
453 * 1) Those that touch registers
454 * 2) Those that do not have the default 8-bit length
456 * Note that the default MI length mask chosen for this table is 0xFF, not
457 * the 0x3F used on older devices. This is because the vast majority of MI
458 * cmds on Gen9 use a standard 8-bit Length field.
459 * All the Gen9 blitter instructions are standard 0xFF length mask, and
460 * none allow access to non-general registers, so in fact no BLT cmds are
461 * included in the table at all.
464 static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
465 CMD( MI_NOOP, SMI, F, 1, S ),
466 CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
467 CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
468 CMD( MI_FLUSH, SMI, F, 1, S ),
469 CMD( MI_ARB_CHECK, SMI, F, 1, S ),
470 CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
471 CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
472 CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
473 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
474 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
475 CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
476 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
477 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
478 CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
479 CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
480 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
481 CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
482 CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
483 .reg = { .offset = 1, .mask = 0x007FFFFC } ),
484 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
485 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
488 * We allow BB_START but apply further checks. We just sanitize the
489 * basic fields here.
491 #define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0)
492 #define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
493 CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
494 .bits = {{
495 .offset = 0,
496 .mask = MI_BB_START_OPERAND_MASK,
497 .expected = MI_BB_START_OPERAND_EXPECT,
498 }}, ),
501 static const struct drm_i915_cmd_descriptor noop_desc =
502 CMD(MI_NOOP, SMI, F, 1, S);
504 #undef CMD
505 #undef SMI
506 #undef S3D
507 #undef S2D
508 #undef SMFX
509 #undef F
510 #undef S
511 #undef R
512 #undef W
513 #undef B
515 static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
516 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
517 { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
520 static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
521 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
522 { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
523 { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
526 static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
527 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
528 { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
531 static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
532 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
533 { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
536 static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
537 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
538 { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
541 static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
542 { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
543 { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
544 { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
547 static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
548 { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
553 * Register whitelists, sorted by increasing register offset.
557 * An individual whitelist entry granting access to register addr. If
558 * mask is non-zero the argument of immediate register writes will be
559 * AND-ed with mask, and the command will be rejected if the result
560 * doesn't match value.
562 * Registers with non-zero mask are only allowed to be written using
563 * LRI.
565 struct drm_i915_reg_descriptor {
566 i915_reg_t addr;
567 u32 mask;
568 u32 value;
571 /* Convenience macro for adding 32-bit registers. */
572 #define REG32(_reg, ...) \
573 { .addr = (_reg), __VA_ARGS__ }
576 * Convenience macro for adding 64-bit registers.
578 * Some registers that userspace accesses are 64 bits. The register
579 * access commands only allow 32-bit accesses. Hence, we have to include
580 * entries for both halves of the 64-bit registers.
582 #define REG64(_reg) \
583 { .addr = _reg }, \
584 { .addr = _reg ## _UDW }
586 #define REG64_IDX(_reg, idx) \
587 { .addr = _reg(idx) }, \
588 { .addr = _reg ## _UDW(idx) }
590 static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
591 REG64(GPGPU_THREADS_DISPATCHED),
592 REG64(HS_INVOCATION_COUNT),
593 REG64(DS_INVOCATION_COUNT),
594 REG64(IA_VERTICES_COUNT),
595 REG64(IA_PRIMITIVES_COUNT),
596 REG64(VS_INVOCATION_COUNT),
597 REG64(GS_INVOCATION_COUNT),
598 REG64(GS_PRIMITIVES_COUNT),
599 REG64(CL_INVOCATION_COUNT),
600 REG64(CL_PRIMITIVES_COUNT),
601 REG64(PS_INVOCATION_COUNT),
602 REG64(PS_DEPTH_COUNT),
603 REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
604 REG64(MI_PREDICATE_SRC0),
605 REG64(MI_PREDICATE_SRC1),
606 REG32(GEN7_3DPRIM_END_OFFSET),
607 REG32(GEN7_3DPRIM_START_VERTEX),
608 REG32(GEN7_3DPRIM_VERTEX_COUNT),
609 REG32(GEN7_3DPRIM_INSTANCE_COUNT),
610 REG32(GEN7_3DPRIM_START_INSTANCE),
611 REG32(GEN7_3DPRIM_BASE_VERTEX),
612 REG32(GEN7_GPGPU_DISPATCHDIMX),
613 REG32(GEN7_GPGPU_DISPATCHDIMY),
614 REG32(GEN7_GPGPU_DISPATCHDIMZ),
615 REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
616 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 0),
617 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 1),
618 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 2),
619 REG64_IDX(GEN7_SO_NUM_PRIMS_WRITTEN, 3),
620 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 0),
621 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 1),
622 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 2),
623 REG64_IDX(GEN7_SO_PRIM_STORAGE_NEEDED, 3),
624 REG32(GEN7_SO_WRITE_OFFSET(0)),
625 REG32(GEN7_SO_WRITE_OFFSET(1)),
626 REG32(GEN7_SO_WRITE_OFFSET(2)),
627 REG32(GEN7_SO_WRITE_OFFSET(3)),
628 REG32(GEN7_L3SQCREG1),
629 REG32(GEN7_L3CNTLREG2),
630 REG32(GEN7_L3CNTLREG3),
631 REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
634 static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
635 REG64_IDX(HSW_CS_GPR, 0),
636 REG64_IDX(HSW_CS_GPR, 1),
637 REG64_IDX(HSW_CS_GPR, 2),
638 REG64_IDX(HSW_CS_GPR, 3),
639 REG64_IDX(HSW_CS_GPR, 4),
640 REG64_IDX(HSW_CS_GPR, 5),
641 REG64_IDX(HSW_CS_GPR, 6),
642 REG64_IDX(HSW_CS_GPR, 7),
643 REG64_IDX(HSW_CS_GPR, 8),
644 REG64_IDX(HSW_CS_GPR, 9),
645 REG64_IDX(HSW_CS_GPR, 10),
646 REG64_IDX(HSW_CS_GPR, 11),
647 REG64_IDX(HSW_CS_GPR, 12),
648 REG64_IDX(HSW_CS_GPR, 13),
649 REG64_IDX(HSW_CS_GPR, 14),
650 REG64_IDX(HSW_CS_GPR, 15),
651 REG32(HSW_SCRATCH1,
652 .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
653 .value = 0),
654 REG32(HSW_ROW_CHICKEN3,
655 .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
656 HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
657 .value = 0),
660 static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
661 REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
662 REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
663 REG32(BCS_SWCTRL),
664 REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
667 static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
668 REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
669 REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
670 REG32(BCS_SWCTRL),
671 REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
672 REG64_IDX(BCS_GPR, 0),
673 REG64_IDX(BCS_GPR, 1),
674 REG64_IDX(BCS_GPR, 2),
675 REG64_IDX(BCS_GPR, 3),
676 REG64_IDX(BCS_GPR, 4),
677 REG64_IDX(BCS_GPR, 5),
678 REG64_IDX(BCS_GPR, 6),
679 REG64_IDX(BCS_GPR, 7),
680 REG64_IDX(BCS_GPR, 8),
681 REG64_IDX(BCS_GPR, 9),
682 REG64_IDX(BCS_GPR, 10),
683 REG64_IDX(BCS_GPR, 11),
684 REG64_IDX(BCS_GPR, 12),
685 REG64_IDX(BCS_GPR, 13),
686 REG64_IDX(BCS_GPR, 14),
687 REG64_IDX(BCS_GPR, 15),
690 #undef REG64
691 #undef REG32
693 struct drm_i915_reg_table {
694 const struct drm_i915_reg_descriptor *regs;
695 int num_regs;
698 static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
699 { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
702 static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
703 { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
706 static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
707 { gen7_render_regs, ARRAY_SIZE(gen7_render_regs) },
708 { hsw_render_regs, ARRAY_SIZE(hsw_render_regs) },
711 static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
712 { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs) },
715 static const struct drm_i915_reg_table gen9_blt_reg_tables[] = {
716 { gen9_blt_regs, ARRAY_SIZE(gen9_blt_regs) },
719 static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
721 u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
722 u32 subclient =
723 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
725 if (client == INSTR_MI_CLIENT)
726 return 0x3F;
727 else if (client == INSTR_RC_CLIENT) {
728 if (subclient == INSTR_MEDIA_SUBCLIENT)
729 return 0xFFFF;
730 else
731 return 0xFF;
734 DRM_DEBUG("CMD: Abnormal rcs cmd length! 0x%08X\n", cmd_header);
735 return 0;
738 static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
740 u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
741 u32 subclient =
742 (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
743 u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
745 if (client == INSTR_MI_CLIENT)
746 return 0x3F;
747 else if (client == INSTR_RC_CLIENT) {
748 if (subclient == INSTR_MEDIA_SUBCLIENT) {
749 if (op == 6)
750 return 0xFFFF;
751 else
752 return 0xFFF;
753 } else
754 return 0xFF;
757 DRM_DEBUG("CMD: Abnormal bsd cmd length! 0x%08X\n", cmd_header);
758 return 0;
761 static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
763 u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
765 if (client == INSTR_MI_CLIENT)
766 return 0x3F;
767 else if (client == INSTR_BC_CLIENT)
768 return 0xFF;
770 DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
771 return 0;
774 static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
776 u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
778 if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
779 return 0xFF;
781 DRM_DEBUG("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
782 return 0;
785 static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
786 const struct drm_i915_cmd_table *cmd_tables,
787 int cmd_table_count)
789 int i;
790 bool ret = true;
792 if (!cmd_tables || cmd_table_count == 0)
793 return true;
795 for (i = 0; i < cmd_table_count; i++) {
796 const struct drm_i915_cmd_table *table = &cmd_tables[i];
797 u32 previous = 0;
798 int j;
800 for (j = 0; j < table->count; j++) {
801 const struct drm_i915_cmd_descriptor *desc =
802 &table->table[j];
803 u32 curr = desc->cmd.value & desc->cmd.mask;
805 if (curr < previous) {
806 DRM_ERROR("CMD: %s [%d] command table not sorted: "
807 "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
808 engine->name, engine->id,
809 i, j, curr, previous);
810 ret = false;
813 previous = curr;
817 return ret;
820 static bool check_sorted(const struct intel_engine_cs *engine,
821 const struct drm_i915_reg_descriptor *reg_table,
822 int reg_count)
824 int i;
825 u32 previous = 0;
826 bool ret = true;
828 for (i = 0; i < reg_count; i++) {
829 u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
831 if (curr < previous) {
832 DRM_ERROR("CMD: %s [%d] register table not sorted: "
833 "entry=%d reg=0x%08X prev=0x%08X\n",
834 engine->name, engine->id,
835 i, curr, previous);
836 ret = false;
839 previous = curr;
842 return ret;
845 static bool validate_regs_sorted(struct intel_engine_cs *engine)
847 int i;
848 const struct drm_i915_reg_table *table;
850 for (i = 0; i < engine->reg_table_count; i++) {
851 table = &engine->reg_tables[i];
852 if (!check_sorted(engine, table->regs, table->num_regs))
853 return false;
856 return true;
859 struct cmd_node {
860 const struct drm_i915_cmd_descriptor *desc;
861 struct hlist_node node;
865 * Different command ranges have different numbers of bits for the opcode. For
866 * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The
867 * problem is that, for example, MI commands use bits 22:16 for other fields
868 * such as GGTT vs PPGTT bits. If we include those bits in the mask then when
869 * we mask a command from a batch it could hash to the wrong bucket due to
870 * non-opcode bits being set. But if we don't include those bits, some 3D
871 * commands may hash to the same bucket due to not including opcode bits that
872 * make the command unique. For now, we will risk hashing to the same bucket.
874 static inline u32 cmd_header_key(u32 x)
876 switch (x >> INSTR_CLIENT_SHIFT) {
877 default:
878 case INSTR_MI_CLIENT:
879 return x >> STD_MI_OPCODE_SHIFT;
880 case INSTR_RC_CLIENT:
881 return x >> STD_3D_OPCODE_SHIFT;
882 case INSTR_BC_CLIENT:
883 return x >> STD_2D_OPCODE_SHIFT;
887 static int init_hash_table(struct intel_engine_cs *engine,
888 const struct drm_i915_cmd_table *cmd_tables,
889 int cmd_table_count)
891 int i, j;
893 hash_init(engine->cmd_hash);
895 for (i = 0; i < cmd_table_count; i++) {
896 const struct drm_i915_cmd_table *table = &cmd_tables[i];
898 for (j = 0; j < table->count; j++) {
899 const struct drm_i915_cmd_descriptor *desc =
900 &table->table[j];
901 struct cmd_node *desc_node =
902 kmalloc(sizeof(*desc_node), GFP_KERNEL);
904 if (!desc_node)
905 return -ENOMEM;
907 desc_node->desc = desc;
908 hash_add(engine->cmd_hash, &desc_node->node,
909 cmd_header_key(desc->cmd.value));
913 return 0;
916 static void fini_hash_table(struct intel_engine_cs *engine)
918 struct hlist_node *tmp;
919 struct cmd_node *desc_node;
920 int i;
922 hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
923 hash_del(&desc_node->node);
924 kfree(desc_node);
929 * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine
930 * @engine: the engine to initialize
932 * Optionally initializes fields related to batch buffer command parsing in the
933 * struct intel_engine_cs based on whether the platform requires software
934 * command parsing.
936 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
938 const struct drm_i915_cmd_table *cmd_tables;
939 int cmd_table_count;
940 int ret;
942 if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
943 engine->class == COPY_ENGINE_CLASS))
944 return;
946 switch (engine->class) {
947 case RENDER_CLASS:
948 if (IS_HASWELL(engine->i915)) {
949 cmd_tables = hsw_render_ring_cmd_table;
950 cmd_table_count =
951 ARRAY_SIZE(hsw_render_ring_cmd_table);
952 } else {
953 cmd_tables = gen7_render_cmd_table;
954 cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
957 if (IS_HASWELL(engine->i915)) {
958 engine->reg_tables = hsw_render_reg_tables;
959 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
960 } else {
961 engine->reg_tables = ivb_render_reg_tables;
962 engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
964 engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
965 break;
966 case VIDEO_DECODE_CLASS:
967 cmd_tables = gen7_video_cmd_table;
968 cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
969 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
970 break;
971 case COPY_ENGINE_CLASS:
972 engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
973 if (IS_GEN(engine->i915, 9)) {
974 cmd_tables = gen9_blt_cmd_table;
975 cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
976 engine->get_cmd_length_mask =
977 gen9_blt_get_cmd_length_mask;
979 /* BCS Engine unsafe without parser */
980 engine->flags |= I915_ENGINE_REQUIRES_CMD_PARSER;
981 } else if (IS_HASWELL(engine->i915)) {
982 cmd_tables = hsw_blt_ring_cmd_table;
983 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
984 } else {
985 cmd_tables = gen7_blt_cmd_table;
986 cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
989 if (IS_GEN(engine->i915, 9)) {
990 engine->reg_tables = gen9_blt_reg_tables;
991 engine->reg_table_count =
992 ARRAY_SIZE(gen9_blt_reg_tables);
993 } else if (IS_HASWELL(engine->i915)) {
994 engine->reg_tables = hsw_blt_reg_tables;
995 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
996 } else {
997 engine->reg_tables = ivb_blt_reg_tables;
998 engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
1000 break;
1001 case VIDEO_ENHANCEMENT_CLASS:
1002 cmd_tables = hsw_vebox_cmd_table;
1003 cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
1004 /* VECS can use the same length_mask function as VCS */
1005 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
1006 break;
1007 default:
1008 MISSING_CASE(engine->class);
1009 return;
1012 if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
1013 DRM_ERROR("%s: command descriptions are not sorted\n",
1014 engine->name);
1015 return;
1017 if (!validate_regs_sorted(engine)) {
1018 DRM_ERROR("%s: registers are not sorted\n", engine->name);
1019 return;
1022 ret = init_hash_table(engine, cmd_tables, cmd_table_count);
1023 if (ret) {
1024 DRM_ERROR("%s: initialised failed!\n", engine->name);
1025 fini_hash_table(engine);
1026 return;
1029 engine->flags |= I915_ENGINE_USING_CMD_PARSER;
1033 * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields
1034 * @engine: the engine to clean up
1036 * Releases any resources related to command parsing that may have been
1037 * initialized for the specified engine.
1039 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
1041 if (!intel_engine_using_cmd_parser(engine))
1042 return;
1044 fini_hash_table(engine);
1047 static const struct drm_i915_cmd_descriptor*
1048 find_cmd_in_table(struct intel_engine_cs *engine,
1049 u32 cmd_header)
1051 struct cmd_node *desc_node;
1053 hash_for_each_possible(engine->cmd_hash, desc_node, node,
1054 cmd_header_key(cmd_header)) {
1055 const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
1056 if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
1057 return desc;
1060 return NULL;
1064 * Returns a pointer to a descriptor for the command specified by cmd_header.
1066 * The caller must supply space for a default descriptor via the default_desc
1067 * parameter. If no descriptor for the specified command exists in the engine's
1068 * command parser tables, this function fills in default_desc based on the
1069 * engine's default length encoding and returns default_desc.
1071 static const struct drm_i915_cmd_descriptor*
1072 find_cmd(struct intel_engine_cs *engine,
1073 u32 cmd_header,
1074 const struct drm_i915_cmd_descriptor *desc,
1075 struct drm_i915_cmd_descriptor *default_desc)
1077 u32 mask;
1079 if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0)
1080 return desc;
1082 desc = find_cmd_in_table(engine, cmd_header);
1083 if (desc)
1084 return desc;
1086 mask = engine->get_cmd_length_mask(cmd_header);
1087 if (!mask)
1088 return NULL;
1090 default_desc->cmd.value = cmd_header;
1091 default_desc->cmd.mask = ~0u << MIN_OPCODE_SHIFT;
1092 default_desc->length.mask = mask;
1093 default_desc->flags = CMD_DESC_SKIP;
1094 return default_desc;
1097 static const struct drm_i915_reg_descriptor *
1098 __find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr)
1100 int start = 0, end = count;
1101 while (start < end) {
1102 int mid = start + (end - start) / 2;
1103 int ret = addr - i915_mmio_reg_offset(table[mid].addr);
1104 if (ret < 0)
1105 end = mid;
1106 else if (ret > 0)
1107 start = mid + 1;
1108 else
1109 return &table[mid];
1111 return NULL;
1114 static const struct drm_i915_reg_descriptor *
1115 find_reg(const struct intel_engine_cs *engine, u32 addr)
1117 const struct drm_i915_reg_table *table = engine->reg_tables;
1118 const struct drm_i915_reg_descriptor *reg = NULL;
1119 int count = engine->reg_table_count;
1121 for (; !reg && (count > 0); ++table, --count)
1122 reg = __find_reg(table->regs, table->num_regs, addr);
1124 return reg;
1127 /* Returns a vmap'd pointer to dst_obj, which the caller must unmap */
1128 static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
1129 struct drm_i915_gem_object *src_obj,
1130 u32 offset, u32 length)
1132 bool needs_clflush;
1133 void *dst, *src;
1134 int ret;
1136 dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
1137 if (IS_ERR(dst))
1138 return dst;
1140 ret = i915_gem_object_pin_pages(src_obj);
1141 if (ret) {
1142 i915_gem_object_unpin_map(dst_obj);
1143 return ERR_PTR(ret);
1146 needs_clflush =
1147 !(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
1149 src = ERR_PTR(-ENODEV);
1150 if (needs_clflush && i915_has_memcpy_from_wc()) {
1151 src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
1152 if (!IS_ERR(src)) {
1153 i915_unaligned_memcpy_from_wc(dst,
1154 src + offset,
1155 length);
1156 i915_gem_object_unpin_map(src_obj);
1159 if (IS_ERR(src)) {
1160 void *ptr;
1161 int x, n;
1164 * We can avoid clflushing partial cachelines before the write
1165 * if we only every write full cache-lines. Since we know that
1166 * both the source and destination are in multiples of
1167 * PAGE_SIZE, we can simply round up to the next cacheline.
1168 * We don't care about copying too much here as we only
1169 * validate up to the end of the batch.
1171 if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
1172 length = round_up(length,
1173 boot_cpu_data.x86_clflush_size);
1175 ptr = dst;
1176 x = offset_in_page(offset);
1177 for (n = offset >> PAGE_SHIFT; length; n++) {
1178 int len = min_t(int, length, PAGE_SIZE - x);
1180 src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
1181 if (needs_clflush)
1182 drm_clflush_virt_range(src + x, len);
1183 memcpy(ptr, src + x, len);
1184 kunmap_atomic(src);
1186 ptr += len;
1187 length -= len;
1188 x = 0;
1192 i915_gem_object_unpin_pages(src_obj);
1194 /* dst_obj is returned with vmap pinned */
1195 return dst;
1198 static bool check_cmd(const struct intel_engine_cs *engine,
1199 const struct drm_i915_cmd_descriptor *desc,
1200 const u32 *cmd, u32 length)
1202 if (desc->flags & CMD_DESC_SKIP)
1203 return true;
1205 if (desc->flags & CMD_DESC_REJECT) {
1206 DRM_DEBUG("CMD: Rejected command: 0x%08X\n", *cmd);
1207 return false;
1210 if (desc->flags & CMD_DESC_REGISTER) {
1212 * Get the distance between individual register offset
1213 * fields if the command can perform more than one
1214 * access at a time.
1216 const u32 step = desc->reg.step ? desc->reg.step : length;
1217 u32 offset;
1219 for (offset = desc->reg.offset; offset < length;
1220 offset += step) {
1221 const u32 reg_addr = cmd[offset] & desc->reg.mask;
1222 const struct drm_i915_reg_descriptor *reg =
1223 find_reg(engine, reg_addr);
1225 if (!reg) {
1226 DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
1227 reg_addr, *cmd, engine->name);
1228 return false;
1232 * Check the value written to the register against the
1233 * allowed mask/value pair given in the whitelist entry.
1235 if (reg->mask) {
1236 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1237 DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
1238 reg_addr);
1239 return false;
1242 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1243 DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
1244 reg_addr);
1245 return false;
1248 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1249 (offset + 2 > length ||
1250 (cmd[offset + 1] & reg->mask) != reg->value)) {
1251 DRM_DEBUG("CMD: Rejected LRI to masked register 0x%08X\n",
1252 reg_addr);
1253 return false;
1259 if (desc->flags & CMD_DESC_BITMASK) {
1260 int i;
1262 for (i = 0; i < MAX_CMD_DESC_BITMASKS; i++) {
1263 u32 dword;
1265 if (desc->bits[i].mask == 0)
1266 break;
1268 if (desc->bits[i].condition_mask != 0) {
1269 u32 offset =
1270 desc->bits[i].condition_offset;
1271 u32 condition = cmd[offset] &
1272 desc->bits[i].condition_mask;
1274 if (condition == 0)
1275 continue;
1278 if (desc->bits[i].offset >= length) {
1279 DRM_DEBUG("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
1280 *cmd, engine->name);
1281 return false;
1284 dword = cmd[desc->bits[i].offset] &
1285 desc->bits[i].mask;
1287 if (dword != desc->bits[i].expected) {
1288 DRM_DEBUG("CMD: Rejected command 0x%08X for bitmask 0x%08X (exp=0x%08X act=0x%08X) (%s)\n",
1289 *cmd,
1290 desc->bits[i].mask,
1291 desc->bits[i].expected,
1292 dword, engine->name);
1293 return false;
1298 return true;
1301 static int check_bbstart(u32 *cmd, u32 offset, u32 length,
1302 u32 batch_length,
1303 u64 batch_addr,
1304 u64 shadow_addr,
1305 const unsigned long *jump_whitelist)
1307 u64 jump_offset, jump_target;
1308 u32 target_cmd_offset, target_cmd_index;
1310 /* For igt compatibility on older platforms */
1311 if (!jump_whitelist) {
1312 DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
1313 return -EACCES;
1316 if (length != 3) {
1317 DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
1318 length);
1319 return -EINVAL;
1322 jump_target = *(u64 *)(cmd + 1);
1323 jump_offset = jump_target - batch_addr;
1326 * Any underflow of jump_target is guaranteed to be outside the range
1327 * of a u32, so >= test catches both too large and too small
1329 if (jump_offset >= batch_length) {
1330 DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
1331 jump_target);
1332 return -EINVAL;
1336 * This cannot overflow a u32 because we already checked jump_offset
1337 * is within the BB, and the batch_length is a u32
1339 target_cmd_offset = lower_32_bits(jump_offset);
1340 target_cmd_index = target_cmd_offset / sizeof(u32);
1342 *(u64 *)(cmd + 1) = shadow_addr + target_cmd_offset;
1344 if (target_cmd_index == offset)
1345 return 0;
1347 if (IS_ERR(jump_whitelist))
1348 return PTR_ERR(jump_whitelist);
1350 if (!test_bit(target_cmd_index, jump_whitelist)) {
1351 DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
1352 jump_target);
1353 return -EINVAL;
1356 return 0;
1359 static unsigned long *alloc_whitelist(u32 batch_length)
1361 unsigned long *jmp;
1364 * We expect batch_length to be less than 256KiB for known users,
1365 * i.e. we need at most an 8KiB bitmap allocation which should be
1366 * reasonably cheap due to kmalloc caches.
1369 /* Prefer to report transient allocation failure rather than hit oom */
1370 jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
1371 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
1372 if (!jmp)
1373 return ERR_PTR(-ENOMEM);
1375 return jmp;
1378 #define LENGTH_BIAS 2
1380 static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
1382 return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
1386 * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
1387 * @engine: the engine on which the batch is to execute
1388 * @batch: the batch buffer in question
1389 * @batch_offset: byte offset in the batch at which execution starts
1390 * @batch_length: length of the commands in batch_obj
1391 * @shadow: validated copy of the batch buffer in question
1392 * @trampoline: whether to emit a conditional trampoline at the end of the batch
1394 * Parses the specified batch buffer looking for privilege violations as
1395 * described in the overview.
1397 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
1398 * if the batch appears legal but should use hardware parsing
1400 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1401 struct i915_vma *batch,
1402 u32 batch_offset,
1403 u32 batch_length,
1404 struct i915_vma *shadow,
1405 bool trampoline)
1407 u32 *cmd, *batch_end, offset = 0;
1408 struct drm_i915_cmd_descriptor default_desc = noop_desc;
1409 const struct drm_i915_cmd_descriptor *desc = &default_desc;
1410 unsigned long *jump_whitelist;
1411 u64 batch_addr, shadow_addr;
1412 int ret = 0;
1414 GEM_BUG_ON(!IS_ALIGNED(batch_offset, sizeof(*cmd)));
1415 GEM_BUG_ON(!IS_ALIGNED(batch_length, sizeof(*cmd)));
1416 GEM_BUG_ON(range_overflows_t(u64, batch_offset, batch_length,
1417 batch->size));
1418 GEM_BUG_ON(!batch_length);
1420 cmd = copy_batch(shadow->obj, batch->obj, batch_offset, batch_length);
1421 if (IS_ERR(cmd)) {
1422 DRM_DEBUG("CMD: Failed to copy batch\n");
1423 return PTR_ERR(cmd);
1426 jump_whitelist = NULL;
1427 if (!trampoline)
1428 /* Defer failure until attempted use */
1429 jump_whitelist = alloc_whitelist(batch_length);
1431 shadow_addr = gen8_canonical_addr(shadow->node.start);
1432 batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
1435 * We use the batch length as size because the shadow object is as
1436 * large or larger and copy_batch() will write MI_NOPs to the extra
1437 * space. Parsing should be faster in some cases this way.
1439 batch_end = cmd + batch_length / sizeof(*batch_end);
1440 do {
1441 u32 length;
1443 if (*cmd == MI_BATCH_BUFFER_END)
1444 break;
1446 desc = find_cmd(engine, *cmd, desc, &default_desc);
1447 if (!desc) {
1448 DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
1449 ret = -EINVAL;
1450 break;
1453 if (desc->flags & CMD_DESC_FIXED)
1454 length = desc->length.fixed;
1455 else
1456 length = (*cmd & desc->length.mask) + LENGTH_BIAS;
1458 if ((batch_end - cmd) < length) {
1459 DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
1460 *cmd,
1461 length,
1462 batch_end - cmd);
1463 ret = -EINVAL;
1464 break;
1467 if (!check_cmd(engine, desc, cmd, length)) {
1468 ret = -EACCES;
1469 break;
1472 if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1473 ret = check_bbstart(cmd, offset, length, batch_length,
1474 batch_addr, shadow_addr,
1475 jump_whitelist);
1476 break;
1479 if (!IS_ERR_OR_NULL(jump_whitelist))
1480 __set_bit(offset, jump_whitelist);
1482 cmd += length;
1483 offset += length;
1484 if (cmd >= batch_end) {
1485 DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
1486 ret = -EINVAL;
1487 break;
1489 } while (1);
1491 if (trampoline) {
1493 * With the trampoline, the shadow is executed twice.
1495 * 1 - starting at offset 0, in privileged mode
1496 * 2 - starting at offset batch_len, as non-privileged
1498 * Only if the batch is valid and safe to execute, do we
1499 * allow the first privileged execution to proceed. If not,
1500 * we terminate the first batch and use the second batchbuffer
1501 * entry to chain to the original unsafe non-privileged batch,
1502 * leaving it to the HW to validate.
1504 *batch_end = MI_BATCH_BUFFER_END;
1506 if (ret) {
1507 /* Batch unsafe to execute with privileges, cancel! */
1508 cmd = page_mask_bits(shadow->obj->mm.mapping);
1509 *cmd = MI_BATCH_BUFFER_END;
1511 /* If batch is unsafe but valid, jump to the original */
1512 if (ret == -EACCES) {
1513 unsigned int flags;
1515 flags = MI_BATCH_NON_SECURE_I965;
1516 if (IS_HASWELL(engine->i915))
1517 flags = MI_BATCH_NON_SECURE_HSW;
1519 GEM_BUG_ON(!IS_GEN_RANGE(engine->i915, 6, 7));
1520 __gen6_emit_bb_start(batch_end,
1521 batch_addr,
1522 flags);
1524 ret = 0; /* allow execution */
1528 if (shadow_needs_clflush(shadow->obj))
1529 drm_clflush_virt_range(batch_end, 8);
1532 if (shadow_needs_clflush(shadow->obj)) {
1533 void *ptr = page_mask_bits(shadow->obj->mm.mapping);
1535 drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
1538 if (!IS_ERR_OR_NULL(jump_whitelist))
1539 kfree(jump_whitelist);
1540 i915_gem_object_unpin_map(shadow->obj);
1541 return ret;
1545 * i915_cmd_parser_get_version() - get the cmd parser version number
1546 * @dev_priv: i915 device private
1548 * The cmd parser maintains a simple increasing integer version number suitable
1549 * for passing to userspace clients to determine what operations are permitted.
1551 * Return: the current version number of the cmd parser
1553 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1555 struct intel_engine_cs *engine;
1556 bool active = false;
1558 /* If the command parser is not enabled, report 0 - unsupported */
1559 for_each_uabi_engine(engine, dev_priv) {
1560 if (intel_engine_using_cmd_parser(engine)) {
1561 active = true;
1562 break;
1565 if (!active)
1566 return 0;
1569 * Command parser version history
1571 * 1. Initial version. Checks batches and reports violations, but leaves
1572 * hardware parsing enabled (so does not allow new use cases).
1573 * 2. Allow access to the MI_PREDICATE_SRC0 and
1574 * MI_PREDICATE_SRC1 registers.
1575 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
1576 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1577 * 5. GPGPU dispatch compute indirect registers.
1578 * 6. TIMESTAMP register and Haswell CS GPR registers
1579 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1580 * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
1581 * rely on the HW to NOOP disallowed commands as it would without
1582 * the parser enabled.
1583 * 9. Don't whitelist or handle oacontrol specially, as ownership
1584 * for oacontrol state is moving to i915-perf.
1585 * 10. Support for Gen9 BCS Parsing
1587 return 10;