2 * Copyright © 2014-2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #ifndef _UAPI_VC4_DRM_H_
25 #define _UAPI_VC4_DRM_H_
29 #if defined(__cplusplus)
33 #define DRM_VC4_SUBMIT_CL 0x00
34 #define DRM_VC4_WAIT_SEQNO 0x01
35 #define DRM_VC4_WAIT_BO 0x02
36 #define DRM_VC4_CREATE_BO 0x03
37 #define DRM_VC4_MMAP_BO 0x04
38 #define DRM_VC4_CREATE_SHADER_BO 0x05
39 #define DRM_VC4_GET_HANG_STATE 0x06
40 #define DRM_VC4_GET_PARAM 0x07
41 #define DRM_VC4_SET_TILING 0x08
42 #define DRM_VC4_GET_TILING 0x09
44 #define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
45 #define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
46 #define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
47 #define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
48 #define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
49 #define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
50 #define DRM_IOCTL_VC4_GET_HANG_STATE DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_HANG_STATE, struct drm_vc4_get_hang_state)
51 #define DRM_IOCTL_VC4_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_PARAM, struct drm_vc4_get_param)
52 #define DRM_IOCTL_VC4_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_SET_TILING, struct drm_vc4_set_tiling)
53 #define DRM_IOCTL_VC4_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_VC4_GET_TILING, struct drm_vc4_get_tiling)
55 struct drm_vc4_submit_rcl_surface
{
56 __u32 hindex
; /* Handle index, or ~0 if not present. */
57 __u32 offset
; /* Offset to start of buffer. */
59 * Bits for either render config (color_write) or load/store packet.
60 * Bits should all be 0 for MSAA load/stores.
64 #define VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES (1 << 0)
69 * struct drm_vc4_submit_cl - ioctl argument for submitting commands to the 3D
72 * Drivers typically use GPU BOs to store batchbuffers / command lists and
73 * their associated state. However, because the VC4 lacks an MMU, we have to
74 * do validation of memory accesses by the GPU commands. If we were to store
75 * our commands in BOs, we'd need to do uncached readback from them to do the
76 * validation process, which is too expensive. Instead, userspace accumulates
77 * commands and associated state in plain memory, then the kernel copies the
78 * data to its own address space, and then validates and stores it in a GPU
81 struct drm_vc4_submit_cl
{
82 /* Pointer to the binner command list.
84 * This is the first set of commands executed, which runs the
85 * coordinate shader to determine where primitives land on the screen,
86 * then writes out the state updates and draw calls necessary per tile
87 * to the tile allocation BO.
91 /* Pointer to the shader records.
93 * Shader records are the structures read by the hardware that contain
94 * pointers to uniforms, shaders, and vertex attributes. The
95 * reference to the shader record has enough information to determine
96 * how many pointers are necessary (fixed number for shaders/uniforms,
97 * and an attribute count), so those BO indices into bo_handles are
98 * just stored as __u32s before each shader record passed in.
102 /* Pointer to uniform data and texture handles for the textures
103 * referenced by the shader.
105 * For each shader state record, there is a set of uniform data in the
106 * order referenced by the record (FS, VS, then CS). Each set of
107 * uniform data has a __u32 index into bo_handles per texture
108 * sample operation, in the order the QPU_W_TMUn_S writes appear in
109 * the program. Following the texture BO handle indices is the actual
112 * The individual uniform state blocks don't have sizes passed in,
113 * because the kernel has to determine the sizes anyway during shader
119 /* Size in bytes of the binner command list. */
121 /* Size in bytes of the set of shader records. */
122 __u32 shader_rec_size
;
123 /* Number of shader records.
125 * This could just be computed from the contents of shader_records and
126 * the address bits of references to them from the bin CL, but it
127 * keeps the kernel from having to resize some allocations it makes.
129 __u32 shader_rec_count
;
130 /* Size in bytes of the uniform state. */
133 /* Number of BO handles passed in (size is that times 4). */
134 __u32 bo_handle_count
;
143 struct drm_vc4_submit_rcl_surface color_read
;
144 struct drm_vc4_submit_rcl_surface color_write
;
145 struct drm_vc4_submit_rcl_surface zs_read
;
146 struct drm_vc4_submit_rcl_surface zs_write
;
147 struct drm_vc4_submit_rcl_surface msaa_color_write
;
148 struct drm_vc4_submit_rcl_surface msaa_zs_write
;
149 __u32 clear_color
[2];
155 #define VC4_SUBMIT_CL_USE_CLEAR_COLOR (1 << 0)
158 /* Returned value of the seqno of this render job (for the
165 * struct drm_vc4_wait_seqno - ioctl argument for waiting for
166 * DRM_VC4_SUBMIT_CL completion using its returned seqno.
168 * timeout_ns is the timeout in nanoseconds, where "0" means "don't
169 * block, just return the status."
171 struct drm_vc4_wait_seqno
{
177 * struct drm_vc4_wait_bo - ioctl argument for waiting for
178 * completion of the last DRM_VC4_SUBMIT_CL on a BO.
180 * This is useful for cases where multiple processes might be
181 * rendering to a BO and you want to wait for all rendering to be
184 struct drm_vc4_wait_bo
{
191 * struct drm_vc4_create_bo - ioctl argument for creating VC4 BOs.
193 * There are currently no values for the flags argument, but it may be
194 * used in a future extension.
196 struct drm_vc4_create_bo
{
199 /** Returned GEM handle for the BO. */
205 * struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
207 * This doesn't actually perform an mmap. Instead, it returns the
208 * offset you need to use in an mmap on the DRM device node. This
209 * means that tools like valgrind end up knowing about the mapped
212 * There are currently no values for the flags argument, but it may be
213 * used in a future extension.
215 struct drm_vc4_mmap_bo
{
216 /** Handle for the object being mapped. */
219 /** offset into the drm node to use for subsequent mmap call. */
224 * struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
227 * Since allowing a shader to be overwritten while it's also being
228 * executed from would allow privlege escalation, shaders must be
229 * created using this ioctl, and they can't be mmapped later.
231 struct drm_vc4_create_shader_bo
{
232 /* Size of the data argument. */
234 /* Flags, currently must be 0. */
237 /* Pointer to the data. */
240 /** Returned GEM handle for the BO. */
242 /* Pad, must be 0. */
246 struct drm_vc4_get_hang_state_bo
{
254 * struct drm_vc4_hang_state - ioctl argument for collecting state
255 * from a GPU hang for analysis.
257 struct drm_vc4_get_hang_state
{
258 /** Pointer to array of struct drm_vc4_get_hang_state_bo. */
261 * On input, the size of the bo array. Output is the number
262 * of bos to be returned.
266 __u32 start_bin
, start_render
;
271 __u32 ct0ra0
, ct1ra0
;
285 /* Pad that we may save more registers into in the future. */
289 #define DRM_VC4_PARAM_V3D_IDENT0 0
290 #define DRM_VC4_PARAM_V3D_IDENT1 1
291 #define DRM_VC4_PARAM_V3D_IDENT2 2
292 #define DRM_VC4_PARAM_SUPPORTS_BRANCHES 3
293 #define DRM_VC4_PARAM_SUPPORTS_ETC1 4
294 #define DRM_VC4_PARAM_SUPPORTS_THREADED_FS 5
296 struct drm_vc4_get_param
{
302 struct drm_vc4_get_tiling
{
308 struct drm_vc4_set_tiling
{
314 #if defined(__cplusplus)
318 #endif /* _UAPI_VC4_DRM_H_ */