2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * DOC: Command list validator for VC4.
27 * Since the VC4 has no IOMMU between it and system memory, a user
28 * with access to execute command lists could escalate privilege by
29 * overwriting system memory (drawing to it as a framebuffer) or
30 * reading system memory it shouldn't (reading it as a vertex buffer
33 * We validate binner command lists to ensure that all accesses are
34 * within the bounds of the GEM objects referenced by the submitted
35 * job. It explicitly whitelists packets, and looks at the offsets in
36 * any address fields to make sure they're contained within the BOs
39 * Note that because CL validation is already reading the
40 * user-submitted CL and writing the validated copy out to the memory
41 * that the GPU will actually read, this is also where GEM relocation
42 * processing (turning BO references into actual addresses for the GPU
46 #include "uapi/drm/vc4_drm.h"
48 #include "vc4_packet.h"
50 #define VALIDATE_ARGS \
51 struct vc4_exec_info *exec, \
55 /** Return the width in pixels of a 64-byte microtile. */
68 DRM_ERROR("unknown cpp: %d\n", cpp
);
73 /** Return the height in pixels of a 64-byte microtile. */
85 DRM_ERROR("unknown cpp: %d\n", cpp
);
91 * size_is_lt() - Returns whether a miplevel of the given size will
92 * use the lineartile (LT) tiling layout rather than the normal T
94 * @width: Width in pixels of the miplevel
95 * @height: Height in pixels of the miplevel
96 * @cpp: Bytes per pixel of the pixel format
99 size_is_lt(uint32_t width
, uint32_t height
, int cpp
)
101 return (width
<= 4 * utile_width(cpp
) ||
102 height
<= 4 * utile_height(cpp
));
105 struct drm_gem_cma_object
*
106 vc4_use_bo(struct vc4_exec_info
*exec
, uint32_t hindex
)
108 struct drm_gem_cma_object
*obj
;
111 if (hindex
>= exec
->bo_count
) {
112 DRM_ERROR("BO index %d greater than BO count %d\n",
113 hindex
, exec
->bo_count
);
116 obj
= exec
->bo
[hindex
];
117 bo
= to_vc4_bo(&obj
->base
);
119 if (bo
->validated_shader
) {
120 DRM_ERROR("Trying to use shader BO as something other than "
128 static struct drm_gem_cma_object
*
129 vc4_use_handle(struct vc4_exec_info
*exec
, uint32_t gem_handles_packet_index
)
131 return vc4_use_bo(exec
, exec
->bo_index
[gem_handles_packet_index
]);
135 validate_bin_pos(struct vc4_exec_info
*exec
, void *untrusted
, uint32_t pos
)
137 /* Note that the untrusted pointer passed to these functions is
138 * incremented past the packet byte.
140 return (untrusted
- 1 == exec
->bin_u
+ pos
);
144 gl_shader_rec_size(uint32_t pointer_bits
)
146 uint32_t attribute_count
= pointer_bits
& 7;
147 bool extended
= pointer_bits
& 8;
149 if (attribute_count
== 0)
153 return 100 + attribute_count
* 4;
155 return 36 + attribute_count
* 8;
159 vc4_check_tex_size(struct vc4_exec_info
*exec
, struct drm_gem_cma_object
*fbo
,
160 uint32_t offset
, uint8_t tiling_format
,
161 uint32_t width
, uint32_t height
, uint8_t cpp
)
163 uint32_t aligned_width
, aligned_height
, stride
, size
;
164 uint32_t utile_w
= utile_width(cpp
);
165 uint32_t utile_h
= utile_height(cpp
);
167 /* The shaded vertex format stores signed 12.4 fixed point
168 * (-2048,2047) offsets from the viewport center, so we should
169 * never have a render target larger than 4096. The texture
170 * unit can only sample from 2048x2048, so it's even more
171 * restricted. This lets us avoid worrying about overflow in
174 if (width
> 4096 || height
> 4096) {
175 DRM_ERROR("Surface dimesions (%d,%d) too large", width
, height
);
179 switch (tiling_format
) {
180 case VC4_TILING_FORMAT_LINEAR
:
181 aligned_width
= round_up(width
, utile_w
);
182 aligned_height
= height
;
184 case VC4_TILING_FORMAT_T
:
185 aligned_width
= round_up(width
, utile_w
* 8);
186 aligned_height
= round_up(height
, utile_h
* 8);
188 case VC4_TILING_FORMAT_LT
:
189 aligned_width
= round_up(width
, utile_w
);
190 aligned_height
= round_up(height
, utile_h
);
193 DRM_ERROR("buffer tiling %d unsupported\n", tiling_format
);
197 stride
= aligned_width
* cpp
;
198 size
= stride
* aligned_height
;
200 if (size
+ offset
< size
||
201 size
+ offset
> fbo
->base
.size
) {
202 DRM_ERROR("Overflow in %dx%d (%dx%d) fbo size (%d + %d > %zd)\n",
204 aligned_width
, aligned_height
,
205 size
, offset
, fbo
->base
.size
);
213 validate_flush(VALIDATE_ARGS
)
215 if (!validate_bin_pos(exec
, untrusted
, exec
->args
->bin_cl_size
- 1)) {
216 DRM_ERROR("Bin CL must end with VC4_PACKET_FLUSH\n");
219 exec
->found_flush
= true;
225 validate_start_tile_binning(VALIDATE_ARGS
)
227 if (exec
->found_start_tile_binning_packet
) {
228 DRM_ERROR("Duplicate VC4_PACKET_START_TILE_BINNING\n");
231 exec
->found_start_tile_binning_packet
= true;
233 if (!exec
->found_tile_binning_mode_config_packet
) {
234 DRM_ERROR("missing VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
242 validate_increment_semaphore(VALIDATE_ARGS
)
244 if (!validate_bin_pos(exec
, untrusted
, exec
->args
->bin_cl_size
- 2)) {
245 DRM_ERROR("Bin CL must end with "
246 "VC4_PACKET_INCREMENT_SEMAPHORE\n");
249 exec
->found_increment_semaphore_packet
= true;
255 validate_indexed_prim_list(VALIDATE_ARGS
)
257 struct drm_gem_cma_object
*ib
;
258 uint32_t length
= *(uint32_t *)(untrusted
+ 1);
259 uint32_t offset
= *(uint32_t *)(untrusted
+ 5);
260 uint32_t max_index
= *(uint32_t *)(untrusted
+ 9);
261 uint32_t index_size
= (*(uint8_t *)(untrusted
+ 0) >> 4) ? 2 : 1;
262 struct vc4_shader_state
*shader_state
;
264 /* Check overflow condition */
265 if (exec
->shader_state_count
== 0) {
266 DRM_ERROR("shader state must precede primitives\n");
269 shader_state
= &exec
->shader_state
[exec
->shader_state_count
- 1];
271 if (max_index
> shader_state
->max_index
)
272 shader_state
->max_index
= max_index
;
274 ib
= vc4_use_handle(exec
, 0);
278 exec
->bin_dep_seqno
= max(exec
->bin_dep_seqno
,
279 to_vc4_bo(&ib
->base
)->write_seqno
);
281 if (offset
> ib
->base
.size
||
282 (ib
->base
.size
- offset
) / index_size
< length
) {
283 DRM_ERROR("IB access overflow (%d + %d*%d > %zd)\n",
284 offset
, length
, index_size
, ib
->base
.size
);
288 *(uint32_t *)(validated
+ 5) = ib
->paddr
+ offset
;
294 validate_gl_array_primitive(VALIDATE_ARGS
)
296 uint32_t length
= *(uint32_t *)(untrusted
+ 1);
297 uint32_t base_index
= *(uint32_t *)(untrusted
+ 5);
299 struct vc4_shader_state
*shader_state
;
301 /* Check overflow condition */
302 if (exec
->shader_state_count
== 0) {
303 DRM_ERROR("shader state must precede primitives\n");
306 shader_state
= &exec
->shader_state
[exec
->shader_state_count
- 1];
308 if (length
+ base_index
< length
) {
309 DRM_ERROR("primitive vertex count overflow\n");
312 max_index
= length
+ base_index
- 1;
314 if (max_index
> shader_state
->max_index
)
315 shader_state
->max_index
= max_index
;
321 validate_gl_shader_state(VALIDATE_ARGS
)
323 uint32_t i
= exec
->shader_state_count
++;
325 if (i
>= exec
->shader_state_size
) {
326 DRM_ERROR("More requests for shader states than declared\n");
330 exec
->shader_state
[i
].addr
= *(uint32_t *)untrusted
;
331 exec
->shader_state
[i
].max_index
= 0;
333 if (exec
->shader_state
[i
].addr
& ~0xf) {
334 DRM_ERROR("high bits set in GL shader rec reference\n");
338 *(uint32_t *)validated
= (exec
->shader_rec_p
+
339 exec
->shader_state
[i
].addr
);
341 exec
->shader_rec_p
+=
342 roundup(gl_shader_rec_size(exec
->shader_state
[i
].addr
), 16);
348 validate_tile_binning_config(VALIDATE_ARGS
)
350 struct drm_device
*dev
= exec
->exec_bo
->base
.dev
;
351 struct vc4_bo
*tile_bo
;
353 uint32_t tile_state_size
, tile_alloc_size
;
356 if (exec
->found_tile_binning_mode_config_packet
) {
357 DRM_ERROR("Duplicate VC4_PACKET_TILE_BINNING_MODE_CONFIG\n");
360 exec
->found_tile_binning_mode_config_packet
= true;
362 exec
->bin_tiles_x
= *(uint8_t *)(untrusted
+ 12);
363 exec
->bin_tiles_y
= *(uint8_t *)(untrusted
+ 13);
364 tile_count
= exec
->bin_tiles_x
* exec
->bin_tiles_y
;
365 flags
= *(uint8_t *)(untrusted
+ 14);
367 if (exec
->bin_tiles_x
== 0 ||
368 exec
->bin_tiles_y
== 0) {
369 DRM_ERROR("Tile binning config of %dx%d too small\n",
370 exec
->bin_tiles_x
, exec
->bin_tiles_y
);
374 if (flags
& (VC4_BIN_CONFIG_DB_NON_MS
|
375 VC4_BIN_CONFIG_TILE_BUFFER_64BIT
)) {
376 DRM_ERROR("unsupported binning config flags 0x%02x\n", flags
);
380 /* The tile state data array is 48 bytes per tile, and we put it at
381 * the start of a BO containing both it and the tile alloc.
383 tile_state_size
= 48 * tile_count
;
385 /* Since the tile alloc array will follow us, align. */
386 exec
->tile_alloc_offset
= roundup(tile_state_size
, 4096);
388 *(uint8_t *)(validated
+ 14) =
389 ((flags
& ~(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_MASK
|
390 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_MASK
)) |
391 VC4_BIN_CONFIG_AUTO_INIT_TSDA
|
392 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE_32
,
393 VC4_BIN_CONFIG_ALLOC_INIT_BLOCK_SIZE
) |
394 VC4_SET_FIELD(VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE_128
,
395 VC4_BIN_CONFIG_ALLOC_BLOCK_SIZE
));
397 /* Initial block size. */
398 tile_alloc_size
= 32 * tile_count
;
401 * The initial allocation gets rounded to the next 256 bytes before
402 * the hardware starts fulfilling further allocations.
404 tile_alloc_size
= roundup(tile_alloc_size
, 256);
406 /* Add space for the extra allocations. This is what gets used first,
407 * before overflow memory. It must have at least 4096 bytes, but we
408 * want to avoid overflow memory usage if possible.
410 tile_alloc_size
+= 1024 * 1024;
412 tile_bo
= vc4_bo_create(dev
, exec
->tile_alloc_offset
+ tile_alloc_size
,
414 exec
->tile_bo
= &tile_bo
->base
;
415 if (IS_ERR(exec
->tile_bo
))
416 return PTR_ERR(exec
->tile_bo
);
417 list_add_tail(&tile_bo
->unref_head
, &exec
->unref_list
);
419 /* tile alloc address. */
420 *(uint32_t *)(validated
+ 0) = (exec
->tile_bo
->paddr
+
421 exec
->tile_alloc_offset
);
422 /* tile alloc size. */
423 *(uint32_t *)(validated
+ 4) = tile_alloc_size
;
424 /* tile state address. */
425 *(uint32_t *)(validated
+ 8) = exec
->tile_bo
->paddr
;
431 validate_gem_handles(VALIDATE_ARGS
)
433 memcpy(exec
->bo_index
, untrusted
, sizeof(exec
->bo_index
));
437 #define VC4_DEFINE_PACKET(packet, func) \
438 [packet] = { packet ## _SIZE, #packet, func }
440 static const struct cmd_info
{
443 int (*func
)(struct vc4_exec_info
*exec
, void *validated
,
446 VC4_DEFINE_PACKET(VC4_PACKET_HALT
, NULL
),
447 VC4_DEFINE_PACKET(VC4_PACKET_NOP
, NULL
),
448 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH
, validate_flush
),
449 VC4_DEFINE_PACKET(VC4_PACKET_FLUSH_ALL
, NULL
),
450 VC4_DEFINE_PACKET(VC4_PACKET_START_TILE_BINNING
,
451 validate_start_tile_binning
),
452 VC4_DEFINE_PACKET(VC4_PACKET_INCREMENT_SEMAPHORE
,
453 validate_increment_semaphore
),
455 VC4_DEFINE_PACKET(VC4_PACKET_GL_INDEXED_PRIMITIVE
,
456 validate_indexed_prim_list
),
457 VC4_DEFINE_PACKET(VC4_PACKET_GL_ARRAY_PRIMITIVE
,
458 validate_gl_array_primitive
),
460 VC4_DEFINE_PACKET(VC4_PACKET_PRIMITIVE_LIST_FORMAT
, NULL
),
462 VC4_DEFINE_PACKET(VC4_PACKET_GL_SHADER_STATE
, validate_gl_shader_state
),
464 VC4_DEFINE_PACKET(VC4_PACKET_CONFIGURATION_BITS
, NULL
),
465 VC4_DEFINE_PACKET(VC4_PACKET_FLAT_SHADE_FLAGS
, NULL
),
466 VC4_DEFINE_PACKET(VC4_PACKET_POINT_SIZE
, NULL
),
467 VC4_DEFINE_PACKET(VC4_PACKET_LINE_WIDTH
, NULL
),
468 VC4_DEFINE_PACKET(VC4_PACKET_RHT_X_BOUNDARY
, NULL
),
469 VC4_DEFINE_PACKET(VC4_PACKET_DEPTH_OFFSET
, NULL
),
470 VC4_DEFINE_PACKET(VC4_PACKET_CLIP_WINDOW
, NULL
),
471 VC4_DEFINE_PACKET(VC4_PACKET_VIEWPORT_OFFSET
, NULL
),
472 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_XY_SCALING
, NULL
),
473 /* Note: The docs say this was also 105, but it was 106 in the
474 * initial userland code drop.
476 VC4_DEFINE_PACKET(VC4_PACKET_CLIPPER_Z_SCALING
, NULL
),
478 VC4_DEFINE_PACKET(VC4_PACKET_TILE_BINNING_MODE_CONFIG
,
479 validate_tile_binning_config
),
481 VC4_DEFINE_PACKET(VC4_PACKET_GEM_HANDLES
, validate_gem_handles
),
485 vc4_validate_bin_cl(struct drm_device
*dev
,
488 struct vc4_exec_info
*exec
)
490 uint32_t len
= exec
->args
->bin_cl_size
;
491 uint32_t dst_offset
= 0;
492 uint32_t src_offset
= 0;
494 while (src_offset
< len
) {
495 void *dst_pkt
= validated
+ dst_offset
;
496 void *src_pkt
= unvalidated
+ src_offset
;
497 u8 cmd
= *(uint8_t *)src_pkt
;
498 const struct cmd_info
*info
;
500 if (cmd
>= ARRAY_SIZE(cmd_info
)) {
501 DRM_ERROR("0x%08x: packet %d out of bounds\n",
506 info
= &cmd_info
[cmd
];
508 DRM_ERROR("0x%08x: packet %d invalid\n",
513 if (src_offset
+ info
->len
> len
) {
514 DRM_ERROR("0x%08x: packet %d (%s) length 0x%08x "
515 "exceeds bounds (0x%08x)\n",
516 src_offset
, cmd
, info
->name
, info
->len
,
521 if (cmd
!= VC4_PACKET_GEM_HANDLES
)
522 memcpy(dst_pkt
, src_pkt
, info
->len
);
524 if (info
->func
&& info
->func(exec
,
527 DRM_ERROR("0x%08x: packet %d (%s) failed to validate\n",
528 src_offset
, cmd
, info
->name
);
532 src_offset
+= info
->len
;
533 /* GEM handle loading doesn't produce HW packets. */
534 if (cmd
!= VC4_PACKET_GEM_HANDLES
)
535 dst_offset
+= info
->len
;
537 /* When the CL hits halt, it'll stop reading anything else. */
538 if (cmd
== VC4_PACKET_HALT
)
542 exec
->ct0ea
= exec
->ct0ca
+ dst_offset
;
544 if (!exec
->found_start_tile_binning_packet
) {
545 DRM_ERROR("Bin CL missing VC4_PACKET_START_TILE_BINNING\n");
549 /* The bin CL must be ended with INCREMENT_SEMAPHORE and FLUSH. The
550 * semaphore is used to trigger the render CL to start up, and the
551 * FLUSH is what caps the bin lists with
552 * VC4_PACKET_RETURN_FROM_SUB_LIST (so they jump back to the main
553 * render CL when they get called to) and actually triggers the queued
554 * semaphore increment.
556 if (!exec
->found_increment_semaphore_packet
|| !exec
->found_flush
) {
557 DRM_ERROR("Bin CL missing VC4_PACKET_INCREMENT_SEMAPHORE + "
558 "VC4_PACKET_FLUSH\n");
566 reloc_tex(struct vc4_exec_info
*exec
,
567 void *uniform_data_u
,
568 struct vc4_texture_sample_info
*sample
,
569 uint32_t texture_handle_index
, bool is_cs
)
571 struct drm_gem_cma_object
*tex
;
572 uint32_t p0
= *(uint32_t *)(uniform_data_u
+ sample
->p_offset
[0]);
573 uint32_t p1
= *(uint32_t *)(uniform_data_u
+ sample
->p_offset
[1]);
574 uint32_t p2
= (sample
->p_offset
[2] != ~0 ?
575 *(uint32_t *)(uniform_data_u
+ sample
->p_offset
[2]) : 0);
576 uint32_t p3
= (sample
->p_offset
[3] != ~0 ?
577 *(uint32_t *)(uniform_data_u
+ sample
->p_offset
[3]) : 0);
578 uint32_t *validated_p0
= exec
->uniforms_v
+ sample
->p_offset
[0];
579 uint32_t offset
= p0
& VC4_TEX_P0_OFFSET_MASK
;
580 uint32_t miplevels
= VC4_GET_FIELD(p0
, VC4_TEX_P0_MIPLVLS
);
581 uint32_t width
= VC4_GET_FIELD(p1
, VC4_TEX_P1_WIDTH
);
582 uint32_t height
= VC4_GET_FIELD(p1
, VC4_TEX_P1_HEIGHT
);
583 uint32_t cpp
, tiling_format
, utile_w
, utile_h
;
585 uint32_t cube_map_stride
= 0;
586 enum vc4_texture_data_type type
;
588 tex
= vc4_use_bo(exec
, texture_handle_index
);
592 if (sample
->is_direct
) {
593 uint32_t remaining_size
= tex
->base
.size
- p0
;
595 if (p0
> tex
->base
.size
- 4) {
596 DRM_ERROR("UBO offset greater than UBO size\n");
599 if (p1
> remaining_size
- 4) {
600 DRM_ERROR("UBO clamp would allow reads "
604 *validated_p0
= tex
->paddr
+ p0
;
613 if (p0
& VC4_TEX_P0_CMMODE_MASK
) {
614 if (VC4_GET_FIELD(p2
, VC4_TEX_P2_PTYPE
) ==
615 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE
)
616 cube_map_stride
= p2
& VC4_TEX_P2_CMST_MASK
;
617 if (VC4_GET_FIELD(p3
, VC4_TEX_P2_PTYPE
) ==
618 VC4_TEX_P2_PTYPE_CUBE_MAP_STRIDE
) {
619 if (cube_map_stride
) {
620 DRM_ERROR("Cube map stride set twice\n");
624 cube_map_stride
= p3
& VC4_TEX_P2_CMST_MASK
;
626 if (!cube_map_stride
) {
627 DRM_ERROR("Cube map stride not set\n");
632 type
= (VC4_GET_FIELD(p0
, VC4_TEX_P0_TYPE
) |
633 (VC4_GET_FIELD(p1
, VC4_TEX_P1_TYPE4
) << 4));
636 case VC4_TEXTURE_TYPE_RGBA8888
:
637 case VC4_TEXTURE_TYPE_RGBX8888
:
638 case VC4_TEXTURE_TYPE_RGBA32R
:
641 case VC4_TEXTURE_TYPE_RGBA4444
:
642 case VC4_TEXTURE_TYPE_RGBA5551
:
643 case VC4_TEXTURE_TYPE_RGB565
:
644 case VC4_TEXTURE_TYPE_LUMALPHA
:
645 case VC4_TEXTURE_TYPE_S16F
:
646 case VC4_TEXTURE_TYPE_S16
:
649 case VC4_TEXTURE_TYPE_LUMINANCE
:
650 case VC4_TEXTURE_TYPE_ALPHA
:
651 case VC4_TEXTURE_TYPE_S8
:
654 case VC4_TEXTURE_TYPE_ETC1
:
655 /* ETC1 is arranged as 64-bit blocks, where each block is 4x4
659 width
= (width
+ 3) >> 2;
660 height
= (height
+ 3) >> 2;
662 case VC4_TEXTURE_TYPE_BW1
:
663 case VC4_TEXTURE_TYPE_A4
:
664 case VC4_TEXTURE_TYPE_A1
:
665 case VC4_TEXTURE_TYPE_RGBA64
:
666 case VC4_TEXTURE_TYPE_YUV422R
:
668 DRM_ERROR("Texture format %d unsupported\n", type
);
671 utile_w
= utile_width(cpp
);
672 utile_h
= utile_height(cpp
);
674 if (type
== VC4_TEXTURE_TYPE_RGBA32R
) {
675 tiling_format
= VC4_TILING_FORMAT_LINEAR
;
677 if (size_is_lt(width
, height
, cpp
))
678 tiling_format
= VC4_TILING_FORMAT_LT
;
680 tiling_format
= VC4_TILING_FORMAT_T
;
683 if (!vc4_check_tex_size(exec
, tex
, offset
+ cube_map_stride
* 5,
684 tiling_format
, width
, height
, cpp
)) {
688 /* The mipmap levels are stored before the base of the texture. Make
689 * sure there is actually space in the BO.
691 for (i
= 1; i
<= miplevels
; i
++) {
692 uint32_t level_width
= max(width
>> i
, 1u);
693 uint32_t level_height
= max(height
>> i
, 1u);
694 uint32_t aligned_width
, aligned_height
;
697 /* Once the levels get small enough, they drop from T to LT. */
698 if (tiling_format
== VC4_TILING_FORMAT_T
&&
699 size_is_lt(level_width
, level_height
, cpp
)) {
700 tiling_format
= VC4_TILING_FORMAT_LT
;
703 switch (tiling_format
) {
704 case VC4_TILING_FORMAT_T
:
705 aligned_width
= round_up(level_width
, utile_w
* 8);
706 aligned_height
= round_up(level_height
, utile_h
* 8);
708 case VC4_TILING_FORMAT_LT
:
709 aligned_width
= round_up(level_width
, utile_w
);
710 aligned_height
= round_up(level_height
, utile_h
);
713 aligned_width
= round_up(level_width
, utile_w
);
714 aligned_height
= level_height
;
718 level_size
= aligned_width
* cpp
* aligned_height
;
720 if (offset
< level_size
) {
721 DRM_ERROR("Level %d (%dx%d -> %dx%d) size %db "
722 "overflowed buffer bounds (offset %d)\n",
723 i
, level_width
, level_height
,
724 aligned_width
, aligned_height
,
729 offset
-= level_size
;
732 *validated_p0
= tex
->paddr
+ p0
;
735 exec
->bin_dep_seqno
= max(exec
->bin_dep_seqno
,
736 to_vc4_bo(&tex
->base
)->write_seqno
);
741 DRM_INFO("Texture p0 at %d: 0x%08x\n", sample
->p_offset
[0], p0
);
742 DRM_INFO("Texture p1 at %d: 0x%08x\n", sample
->p_offset
[1], p1
);
743 DRM_INFO("Texture p2 at %d: 0x%08x\n", sample
->p_offset
[2], p2
);
744 DRM_INFO("Texture p3 at %d: 0x%08x\n", sample
->p_offset
[3], p3
);
749 validate_gl_shader_rec(struct drm_device
*dev
,
750 struct vc4_exec_info
*exec
,
751 struct vc4_shader_state
*state
)
753 uint32_t *src_handles
;
755 static const uint32_t shader_reloc_offsets
[] = {
760 uint32_t shader_reloc_count
= ARRAY_SIZE(shader_reloc_offsets
);
761 struct drm_gem_cma_object
*bo
[shader_reloc_count
+ 8];
762 uint32_t nr_attributes
, nr_relocs
, packet_size
;
765 nr_attributes
= state
->addr
& 0x7;
766 if (nr_attributes
== 0)
768 packet_size
= gl_shader_rec_size(state
->addr
);
770 nr_relocs
= ARRAY_SIZE(shader_reloc_offsets
) + nr_attributes
;
771 if (nr_relocs
* 4 > exec
->shader_rec_size
) {
772 DRM_ERROR("overflowed shader recs reading %d handles "
773 "from %d bytes left\n",
774 nr_relocs
, exec
->shader_rec_size
);
777 src_handles
= exec
->shader_rec_u
;
778 exec
->shader_rec_u
+= nr_relocs
* 4;
779 exec
->shader_rec_size
-= nr_relocs
* 4;
781 if (packet_size
> exec
->shader_rec_size
) {
782 DRM_ERROR("overflowed shader recs copying %db packet "
783 "from %d bytes left\n",
784 packet_size
, exec
->shader_rec_size
);
787 pkt_u
= exec
->shader_rec_u
;
788 pkt_v
= exec
->shader_rec_v
;
789 memcpy(pkt_v
, pkt_u
, packet_size
);
790 exec
->shader_rec_u
+= packet_size
;
791 /* Shader recs have to be aligned to 16 bytes (due to the attribute
792 * flags being in the low bytes), so round the next validated shader
793 * rec address up. This should be safe, since we've got so many
794 * relocations in a shader rec packet.
796 BUG_ON(roundup(packet_size
, 16) - packet_size
> nr_relocs
* 4);
797 exec
->shader_rec_v
+= roundup(packet_size
, 16);
798 exec
->shader_rec_size
-= packet_size
;
800 for (i
= 0; i
< shader_reloc_count
; i
++) {
801 if (src_handles
[i
] > exec
->bo_count
) {
802 DRM_ERROR("Shader handle %d too big\n", src_handles
[i
]);
806 bo
[i
] = exec
->bo
[src_handles
[i
]];
810 for (i
= shader_reloc_count
; i
< nr_relocs
; i
++) {
811 bo
[i
] = vc4_use_bo(exec
, src_handles
[i
]);
816 if (((*(uint16_t *)pkt_u
& VC4_SHADER_FLAG_FS_SINGLE_THREAD
) == 0) !=
817 to_vc4_bo(&bo
[0]->base
)->validated_shader
->is_threaded
) {
818 DRM_ERROR("Thread mode of CL and FS do not match\n");
822 if (to_vc4_bo(&bo
[1]->base
)->validated_shader
->is_threaded
||
823 to_vc4_bo(&bo
[2]->base
)->validated_shader
->is_threaded
) {
824 DRM_ERROR("cs and vs cannot be threaded\n");
828 for (i
= 0; i
< shader_reloc_count
; i
++) {
829 struct vc4_validated_shader_info
*validated_shader
;
830 uint32_t o
= shader_reloc_offsets
[i
];
831 uint32_t src_offset
= *(uint32_t *)(pkt_u
+ o
);
832 uint32_t *texture_handles_u
;
833 void *uniform_data_u
;
836 *(uint32_t *)(pkt_v
+ o
) = bo
[i
]->paddr
+ src_offset
;
838 if (src_offset
!= 0) {
839 DRM_ERROR("Shaders must be at offset 0 of "
844 validated_shader
= to_vc4_bo(&bo
[i
]->base
)->validated_shader
;
845 if (!validated_shader
)
848 if (validated_shader
->uniforms_src_size
>
849 exec
->uniforms_size
) {
850 DRM_ERROR("Uniforms src buffer overflow\n");
854 texture_handles_u
= exec
->uniforms_u
;
855 uniform_data_u
= (texture_handles_u
+
856 validated_shader
->num_texture_samples
);
858 memcpy(exec
->uniforms_v
, uniform_data_u
,
859 validated_shader
->uniforms_size
);
862 tex
< validated_shader
->num_texture_samples
;
866 &validated_shader
->texture_samples
[tex
],
867 texture_handles_u
[tex
],
873 /* Fill in the uniform slots that need this shader's
874 * start-of-uniforms address (used for resetting the uniform
875 * stream in the presence of control flow).
878 uni
< validated_shader
->num_uniform_addr_offsets
;
880 uint32_t o
= validated_shader
->uniform_addr_offsets
[uni
];
881 ((uint32_t *)exec
->uniforms_v
)[o
] = exec
->uniforms_p
;
884 *(uint32_t *)(pkt_v
+ o
+ 4) = exec
->uniforms_p
;
886 exec
->uniforms_u
+= validated_shader
->uniforms_src_size
;
887 exec
->uniforms_v
+= validated_shader
->uniforms_size
;
888 exec
->uniforms_p
+= validated_shader
->uniforms_size
;
891 for (i
= 0; i
< nr_attributes
; i
++) {
892 struct drm_gem_cma_object
*vbo
=
893 bo
[ARRAY_SIZE(shader_reloc_offsets
) + i
];
894 uint32_t o
= 36 + i
* 8;
895 uint32_t offset
= *(uint32_t *)(pkt_u
+ o
+ 0);
896 uint32_t attr_size
= *(uint8_t *)(pkt_u
+ o
+ 4) + 1;
897 uint32_t stride
= *(uint8_t *)(pkt_u
+ o
+ 5);
900 exec
->bin_dep_seqno
= max(exec
->bin_dep_seqno
,
901 to_vc4_bo(&vbo
->base
)->write_seqno
);
903 if (state
->addr
& 0x8)
904 stride
|= (*(uint32_t *)(pkt_u
+ 100 + i
* 4)) & ~0xff;
906 if (vbo
->base
.size
< offset
||
907 vbo
->base
.size
- offset
< attr_size
) {
908 DRM_ERROR("BO offset overflow (%d + %d > %zu)\n",
909 offset
, attr_size
, vbo
->base
.size
);
914 max_index
= ((vbo
->base
.size
- offset
- attr_size
) /
916 if (state
->max_index
> max_index
) {
917 DRM_ERROR("primitives use index %d out of "
919 state
->max_index
, max_index
);
924 *(uint32_t *)(pkt_v
+ o
) = vbo
->paddr
+ offset
;
931 vc4_validate_shader_recs(struct drm_device
*dev
,
932 struct vc4_exec_info
*exec
)
937 for (i
= 0; i
< exec
->shader_state_count
; i
++) {
938 ret
= validate_gl_shader_rec(dev
, exec
, &exec
->shader_state
[i
]);