headers: drm: Sync with drm-next
[drm/libdrm.git] / etnaviv / etnaviv_drm.h
blobaf024d90453ddc5376892b13efd8e6c51043e97e
1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 * Copyright (C) 2015 Etnaviv Project
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ETNAVIV_DRM_H__
19 #define __ETNAVIV_DRM_H__
21 #include "drm.h"
23 #if defined(__cplusplus)
24 extern "C" {
25 #endif
27 /* Please note that modifications to all structs defined here are
28 * subject to backwards-compatibility constraints:
29 * 1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
30 * user/kernel compatibility
31 * 2) Keep fields aligned to their size
32 * 3) Because of how drm_ioctl() works, we can add new fields at
33 * the end of an ioctl if some care is taken: drm_ioctl() will
34 * zero out the new fields at the tail of the ioctl, so a zero
35 * value should have a backwards compatible meaning. And for
36 * output params, userspace won't see the newly added output
37 * fields.. so that has to be somehow ok.
40 /* timeouts are specified in clock-monotonic absolute times (to simplify
41 * restarting interrupted ioctls). The following struct is logically the
42 * same as 'struct timespec' but 32/64b ABI safe.
44 struct drm_etnaviv_timespec {
45 __s64 tv_sec; /* seconds */
46 __s64 tv_nsec; /* nanoseconds */
49 #define ETNAVIV_PARAM_GPU_MODEL 0x01
50 #define ETNAVIV_PARAM_GPU_REVISION 0x02
51 #define ETNAVIV_PARAM_GPU_FEATURES_0 0x03
52 #define ETNAVIV_PARAM_GPU_FEATURES_1 0x04
53 #define ETNAVIV_PARAM_GPU_FEATURES_2 0x05
54 #define ETNAVIV_PARAM_GPU_FEATURES_3 0x06
55 #define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
56 #define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
57 #define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
58 #define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
59 #define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
60 #define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
61 #define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
62 #define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
63 #define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
65 #define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
66 #define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
67 #define ETNAVIV_PARAM_GPU_THREAD_COUNT 0x12
68 #define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE 0x13
69 #define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT 0x14
70 #define ETNAVIV_PARAM_GPU_PIXEL_PIPES 0x15
71 #define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
72 #define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17
73 #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18
74 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19
75 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a
76 #define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b
77 #define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
78 #define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
79 #define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
81 #define ETNA_MAX_PIPES 4
83 struct drm_etnaviv_param {
84 __u32 pipe; /* in */
85 __u32 param; /* in, ETNAVIV_PARAM_x */
86 __u64 value; /* out (get_param) or in (set_param) */
90 * GEM buffers:
93 #define ETNA_BO_CACHE_MASK 0x000f0000
94 /* cache modes */
95 #define ETNA_BO_CACHED 0x00010000
96 #define ETNA_BO_WC 0x00020000
97 #define ETNA_BO_UNCACHED 0x00040000
98 /* map flags */
99 #define ETNA_BO_FORCE_MMU 0x00100000
101 struct drm_etnaviv_gem_new {
102 __u64 size; /* in */
103 __u32 flags; /* in, mask of ETNA_BO_x */
104 __u32 handle; /* out */
107 struct drm_etnaviv_gem_info {
108 __u32 handle; /* in */
109 __u32 pad;
110 __u64 offset; /* out, offset to pass to mmap() */
113 #define ETNA_PREP_READ 0x01
114 #define ETNA_PREP_WRITE 0x02
115 #define ETNA_PREP_NOSYNC 0x04
117 struct drm_etnaviv_gem_cpu_prep {
118 __u32 handle; /* in */
119 __u32 op; /* in, mask of ETNA_PREP_x */
120 struct drm_etnaviv_timespec timeout; /* in */
123 struct drm_etnaviv_gem_cpu_fini {
124 __u32 handle; /* in */
125 __u32 flags; /* in, placeholder for now, no defined values */
129 * Cmdstream Submission:
132 /* The value written into the cmdstream is logically:
133 * relocbuf->gpuaddr + reloc_offset
135 * NOTE that reloc's must be sorted by order of increasing submit_offset,
136 * otherwise EINVAL.
138 struct drm_etnaviv_gem_submit_reloc {
139 __u32 submit_offset; /* in, offset from submit_bo */
140 __u32 reloc_idx; /* in, index of reloc_bo buffer */
141 __u64 reloc_offset; /* in, offset from start of reloc_bo */
142 __u32 flags; /* in, placeholder for now, no defined values */
145 /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
146 * cmdstream buffer(s) themselves or reloc entries) has one (and only
147 * one) entry in the submit->bos[] table.
149 * As a optimization, the current buffer (gpu virtual address) can be
150 * passed back through the 'presumed' field. If on a subsequent reloc,
151 * userspace passes back a 'presumed' address that is still valid,
152 * then patching the cmdstream for this entry is skipped. This can
153 * avoid kernel needing to map/access the cmdstream bo in the common
154 * case.
155 * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed'
156 * field is interpreted as the fixed location to map the bo into the gpu
157 * virtual address space. If the kernel is unable to map the buffer at
158 * this location the submit will fail. This means userspace is responsible
159 * for the whole gpu virtual address management.
161 #define ETNA_SUBMIT_BO_READ 0x0001
162 #define ETNA_SUBMIT_BO_WRITE 0x0002
163 struct drm_etnaviv_gem_submit_bo {
164 __u32 flags; /* in, mask of ETNA_SUBMIT_BO_x */
165 __u32 handle; /* in, GEM handle */
166 __u64 presumed; /* in/out, presumed buffer address */
169 /* performance monitor request (pmr) */
170 #define ETNA_PM_PROCESS_PRE 0x0001
171 #define ETNA_PM_PROCESS_POST 0x0002
172 struct drm_etnaviv_gem_submit_pmr {
173 __u32 flags; /* in, when to process request (ETNA_PM_PROCESS_x) */
174 __u8 domain; /* in, pm domain */
175 __u8 pad;
176 __u16 signal; /* in, pm signal */
177 __u32 sequence; /* in, sequence number */
178 __u32 read_offset; /* in, offset from read_bo */
179 __u32 read_idx; /* in, index of read_bo buffer */
182 /* Each cmdstream submit consists of a table of buffers involved, and
183 * one or more cmdstream buffers. This allows for conditional execution
184 * (context-restore), and IB buffers needed for per tile/bin draw cmds.
186 #define ETNA_SUBMIT_NO_IMPLICIT 0x0001
187 #define ETNA_SUBMIT_FENCE_FD_IN 0x0002
188 #define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
189 #define ETNA_SUBMIT_SOFTPIN 0x0008
190 #define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
191 ETNA_SUBMIT_FENCE_FD_IN | \
192 ETNA_SUBMIT_FENCE_FD_OUT| \
193 ETNA_SUBMIT_SOFTPIN)
194 #define ETNA_PIPE_3D 0x00
195 #define ETNA_PIPE_2D 0x01
196 #define ETNA_PIPE_VG 0x02
197 struct drm_etnaviv_gem_submit {
198 __u32 fence; /* out */
199 __u32 pipe; /* in */
200 __u32 exec_state; /* in, initial execution state (ETNA_PIPE_x) */
201 __u32 nr_bos; /* in, number of submit_bo's */
202 __u32 nr_relocs; /* in, number of submit_reloc's */
203 __u32 stream_size; /* in, cmdstream size */
204 __u64 bos; /* in, ptr to array of submit_bo's */
205 __u64 relocs; /* in, ptr to array of submit_reloc's */
206 __u64 stream; /* in, ptr to cmdstream */
207 __u32 flags; /* in, mask of ETNA_SUBMIT_x */
208 __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
209 __u64 pmrs; /* in, ptr to array of submit_pmr's */
210 __u32 nr_pmrs; /* in, number of submit_pmr's */
211 __u32 pad;
214 /* The normal way to synchronize with the GPU is just to CPU_PREP on
215 * a buffer if you need to access it from the CPU (other cmdstream
216 * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
217 * handle the required synchronization under the hood). This ioctl
218 * mainly just exists as a way to implement the gallium pipe_fence
219 * APIs without requiring a dummy bo to synchronize on.
221 #define ETNA_WAIT_NONBLOCK 0x01
222 struct drm_etnaviv_wait_fence {
223 __u32 pipe; /* in */
224 __u32 fence; /* in */
225 __u32 flags; /* in, mask of ETNA_WAIT_x */
226 __u32 pad;
227 struct drm_etnaviv_timespec timeout; /* in */
230 #define ETNA_USERPTR_READ 0x01
231 #define ETNA_USERPTR_WRITE 0x02
232 struct drm_etnaviv_gem_userptr {
233 __u64 user_ptr; /* in, page aligned user pointer */
234 __u64 user_size; /* in, page aligned user size */
235 __u32 flags; /* in, flags */
236 __u32 handle; /* out, non-zero handle */
239 struct drm_etnaviv_gem_wait {
240 __u32 pipe; /* in */
241 __u32 handle; /* in, bo to be waited for */
242 __u32 flags; /* in, mask of ETNA_WAIT_x */
243 __u32 pad;
244 struct drm_etnaviv_timespec timeout; /* in */
248 * Performance Monitor (PM):
251 struct drm_etnaviv_pm_domain {
252 __u32 pipe; /* in */
253 __u8 iter; /* in/out, select pm domain at index iter */
254 __u8 id; /* out, id of domain */
255 __u16 nr_signals; /* out, how many signals does this domain provide */
256 char name[64]; /* out, name of domain */
259 struct drm_etnaviv_pm_signal {
260 __u32 pipe; /* in */
261 __u8 domain; /* in, pm domain index */
262 __u8 pad;
263 __u16 iter; /* in/out, select pm source at index iter */
264 __u16 id; /* out, id of signal */
265 char name[64]; /* out, name of domain */
268 #define DRM_ETNAVIV_GET_PARAM 0x00
269 /* placeholder:
270 #define DRM_ETNAVIV_SET_PARAM 0x01
272 #define DRM_ETNAVIV_GEM_NEW 0x02
273 #define DRM_ETNAVIV_GEM_INFO 0x03
274 #define DRM_ETNAVIV_GEM_CPU_PREP 0x04
275 #define DRM_ETNAVIV_GEM_CPU_FINI 0x05
276 #define DRM_ETNAVIV_GEM_SUBMIT 0x06
277 #define DRM_ETNAVIV_WAIT_FENCE 0x07
278 #define DRM_ETNAVIV_GEM_USERPTR 0x08
279 #define DRM_ETNAVIV_GEM_WAIT 0x09
280 #define DRM_ETNAVIV_PM_QUERY_DOM 0x0a
281 #define DRM_ETNAVIV_PM_QUERY_SIG 0x0b
282 #define DRM_ETNAVIV_NUM_IOCTLS 0x0c
284 #define DRM_IOCTL_ETNAVIV_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
285 #define DRM_IOCTL_ETNAVIV_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
286 #define DRM_IOCTL_ETNAVIV_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
287 #define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
288 #define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
289 #define DRM_IOCTL_ETNAVIV_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
290 #define DRM_IOCTL_ETNAVIV_WAIT_FENCE DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
291 #define DRM_IOCTL_ETNAVIV_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
292 #define DRM_IOCTL_ETNAVIV_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
293 #define DRM_IOCTL_ETNAVIV_PM_QUERY_DOM DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_DOM, struct drm_etnaviv_pm_domain)
294 #define DRM_IOCTL_ETNAVIV_PM_QUERY_SIG DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_PM_QUERY_SIG, struct drm_etnaviv_pm_signal)
296 #if defined(__cplusplus)
298 #endif
300 #endif /* __ETNAVIV_DRM_H__ */