2 * Copyright (C) 2014 Etnaviv Project
3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "etnaviv_gpu.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_mmu.h"
22 #include "common.xml.h"
23 #include "state.xml.h"
24 #include "state_hi.xml.h"
25 #include "state_3d.xml.h"
26 #include "cmdstream.xml.h"
29 * Command Buffer helper:
33 static inline void OUT(struct etnaviv_cmdbuf
*buffer
, u32 data
)
35 u32
*vaddr
= (u32
*)buffer
->vaddr
;
37 BUG_ON(buffer
->user_size
>= buffer
->size
);
39 vaddr
[buffer
->user_size
/ 4] = data
;
40 buffer
->user_size
+= 4;
43 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf
*buffer
,
46 u32 index
= reg
>> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR
;
48 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
50 /* write a register via cmd stream */
51 OUT(buffer
, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE
|
52 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
53 VIV_FE_LOAD_STATE_HEADER_OFFSET(index
));
57 static inline void CMD_END(struct etnaviv_cmdbuf
*buffer
)
59 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
61 OUT(buffer
, VIV_FE_END_HEADER_OP_END
);
64 static inline void CMD_WAIT(struct etnaviv_cmdbuf
*buffer
)
66 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
68 OUT(buffer
, VIV_FE_WAIT_HEADER_OP_WAIT
| 200);
71 static inline void CMD_LINK(struct etnaviv_cmdbuf
*buffer
,
72 u16 prefetch
, u32 address
)
74 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
76 OUT(buffer
, VIV_FE_LINK_HEADER_OP_LINK
|
77 VIV_FE_LINK_HEADER_PREFETCH(prefetch
));
81 static inline void CMD_STALL(struct etnaviv_cmdbuf
*buffer
,
84 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
86 OUT(buffer
, VIV_FE_STALL_HEADER_OP_STALL
);
87 OUT(buffer
, VIV_FE_STALL_TOKEN_FROM(from
) | VIV_FE_STALL_TOKEN_TO(to
));
90 static inline void CMD_SEM(struct etnaviv_cmdbuf
*buffer
, u32 from
, u32 to
)
92 CMD_LOAD_STATE(buffer
, VIVS_GL_SEMAPHORE_TOKEN
,
93 VIVS_GL_SEMAPHORE_TOKEN_FROM(from
) |
94 VIVS_GL_SEMAPHORE_TOKEN_TO(to
));
97 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu
*gpu
,
98 struct etnaviv_cmdbuf
*buffer
, u8 pipe
)
103 * This assumes that if we're switching to 2D, we're switching
104 * away from 3D, and vice versa. Hence, if we're switching to
105 * the 2D core, we need to flush the 3D depth and color caches,
106 * otherwise we need to flush the 2D pixel engine cache.
108 if (gpu
->exec_state
== ETNA_PIPE_2D
)
109 flush
= VIVS_GL_FLUSH_CACHE_PE2D
;
110 else if (gpu
->exec_state
== ETNA_PIPE_3D
)
111 flush
= VIVS_GL_FLUSH_CACHE_DEPTH
| VIVS_GL_FLUSH_CACHE_COLOR
;
113 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
, flush
);
114 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
115 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
117 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
118 VIVS_GL_PIPE_SELECT_PIPE(pipe
));
121 static void etnaviv_buffer_dump(struct etnaviv_gpu
*gpu
,
122 struct etnaviv_cmdbuf
*buf
, u32 off
, u32 len
)
124 u32 size
= buf
->size
;
125 u32
*ptr
= buf
->vaddr
+ off
;
127 dev_info(gpu
->dev
, "virt %p phys 0x%08x free 0x%08x\n",
128 ptr
, etnaviv_iommu_get_cmdbuf_va(gpu
, buf
) + off
, size
- len
* 4 - off
);
130 print_hex_dump(KERN_INFO
, "cmd ", DUMP_PREFIX_OFFSET
, 16, 4,
135 * Safely replace the WAIT of a waitlink with a new command and argument.
136 * The GPU may be executing this WAIT while we're modifying it, so we have
137 * to write it in a specific order to avoid the GPU branching to somewhere
138 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
140 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf
*buffer
,
141 unsigned int wl_offset
, u32 cmd
, u32 arg
)
143 u32
*lw
= buffer
->vaddr
+ wl_offset
;
152 * Ensure that there is space in the command buffer to contiguously write
153 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
155 static u32
etnaviv_buffer_reserve(struct etnaviv_gpu
*gpu
,
156 struct etnaviv_cmdbuf
*buffer
, unsigned int cmd_dwords
)
158 if (buffer
->user_size
+ cmd_dwords
* sizeof(u64
) > buffer
->size
)
159 buffer
->user_size
= 0;
161 return etnaviv_iommu_get_cmdbuf_va(gpu
, buffer
) + buffer
->user_size
;
164 u16
etnaviv_buffer_init(struct etnaviv_gpu
*gpu
)
166 struct etnaviv_cmdbuf
*buffer
= gpu
->buffer
;
168 /* initialize buffer */
169 buffer
->user_size
= 0;
172 CMD_LINK(buffer
, 2, etnaviv_iommu_get_cmdbuf_va(gpu
, buffer
) +
173 buffer
->user_size
- 4);
175 return buffer
->user_size
/ 8;
178 u16
etnaviv_buffer_config_mmuv2(struct etnaviv_gpu
*gpu
, u32 mtlb_addr
, u32 safe_addr
)
180 struct etnaviv_cmdbuf
*buffer
= gpu
->buffer
;
182 buffer
->user_size
= 0;
184 if (gpu
->identity
.features
& chipFeatures_PIPE_3D
) {
185 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
186 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D
));
187 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
188 mtlb_addr
| VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K
);
189 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_SAFE_ADDRESS
, safe_addr
);
190 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
191 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
194 if (gpu
->identity
.features
& chipFeatures_PIPE_2D
) {
195 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
196 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D
));
197 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
198 mtlb_addr
| VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K
);
199 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_SAFE_ADDRESS
, safe_addr
);
200 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
201 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
206 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
208 return buffer
->user_size
/ 8;
211 void etnaviv_buffer_end(struct etnaviv_gpu
*gpu
)
213 struct etnaviv_cmdbuf
*buffer
= gpu
->buffer
;
214 unsigned int waitlink_offset
= buffer
->user_size
- 16;
215 u32 link_target
, flush
= 0;
217 if (gpu
->exec_state
== ETNA_PIPE_2D
)
218 flush
= VIVS_GL_FLUSH_CACHE_PE2D
;
219 else if (gpu
->exec_state
== ETNA_PIPE_3D
)
220 flush
= VIVS_GL_FLUSH_CACHE_DEPTH
|
221 VIVS_GL_FLUSH_CACHE_COLOR
|
222 VIVS_GL_FLUSH_CACHE_TEXTURE
|
223 VIVS_GL_FLUSH_CACHE_TEXTUREVS
|
224 VIVS_GL_FLUSH_CACHE_SHADER_L2
;
227 unsigned int dwords
= 7;
229 link_target
= etnaviv_buffer_reserve(gpu
, buffer
, dwords
);
231 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
232 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
233 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
, flush
);
234 if (gpu
->exec_state
== ETNA_PIPE_3D
)
235 CMD_LOAD_STATE(buffer
, VIVS_TS_FLUSH_CACHE
,
236 VIVS_TS_FLUSH_CACHE_FLUSH
);
237 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
238 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
241 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
242 VIV_FE_LINK_HEADER_OP_LINK
|
243 VIV_FE_LINK_HEADER_PREFETCH(dwords
),
246 /* Replace the last link-wait with an "END" command */
247 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
248 VIV_FE_END_HEADER_OP_END
, 0);
252 /* Append a command buffer to the ring buffer. */
253 void etnaviv_buffer_queue(struct etnaviv_gpu
*gpu
, unsigned int event
,
254 struct etnaviv_cmdbuf
*cmdbuf
)
256 struct etnaviv_cmdbuf
*buffer
= gpu
->buffer
;
257 unsigned int waitlink_offset
= buffer
->user_size
- 16;
258 u32 return_target
, return_dwords
;
259 u32 link_target
, link_dwords
;
261 if (drm_debug
& DRM_UT_DRIVER
)
262 etnaviv_buffer_dump(gpu
, buffer
, 0, 0x50);
264 link_target
= etnaviv_iommu_get_cmdbuf_va(gpu
, cmdbuf
);
265 link_dwords
= cmdbuf
->size
/ 8;
268 * If we need maintanence prior to submitting this buffer, we will
269 * need to append a mmu flush load state, followed by a new
270 * link to this buffer - a total of four additional words.
272 if (gpu
->mmu
->need_flush
|| gpu
->switch_context
) {
273 u32 target
, extra_dwords
;
279 if (gpu
->mmu
->need_flush
) {
280 if (gpu
->mmu
->version
== ETNAVIV_IOMMU_V1
)
286 /* pipe switch commands */
287 if (gpu
->switch_context
)
290 target
= etnaviv_buffer_reserve(gpu
, buffer
, extra_dwords
);
292 if (gpu
->mmu
->need_flush
) {
293 /* Add the MMU flush */
294 if (gpu
->mmu
->version
== ETNAVIV_IOMMU_V1
) {
295 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_MMU
,
296 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU
|
297 VIVS_GL_FLUSH_MMU_FLUSH_UNK1
|
298 VIVS_GL_FLUSH_MMU_FLUSH_UNK2
|
299 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU
|
300 VIVS_GL_FLUSH_MMU_FLUSH_UNK4
);
302 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
303 VIVS_MMUv2_CONFIGURATION_MODE_MASK
|
304 VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK
|
305 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH
);
306 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
,
308 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
,
312 gpu
->mmu
->need_flush
= false;
315 if (gpu
->switch_context
) {
316 etnaviv_cmd_select_pipe(gpu
, buffer
, cmdbuf
->exec_state
);
317 gpu
->exec_state
= cmdbuf
->exec_state
;
318 gpu
->switch_context
= false;
321 /* And the link to the submitted buffer */
322 CMD_LINK(buffer
, link_dwords
, link_target
);
324 /* Update the link target to point to above instructions */
325 link_target
= target
;
326 link_dwords
= extra_dwords
;
330 * Append a LINK to the submitted command buffer to return to
331 * the ring buffer. return_target is the ring target address.
332 * We need at most 7 dwords in the return target: 2 cache flush +
333 * 2 semaphore stall + 1 event + 1 wait + 1 link.
336 return_target
= etnaviv_buffer_reserve(gpu
, buffer
, return_dwords
);
337 CMD_LINK(cmdbuf
, return_dwords
, return_target
);
340 * Append a cache flush, stall, event, wait and link pointing back to
341 * the wait command to the ring buffer.
343 if (gpu
->exec_state
== ETNA_PIPE_2D
) {
344 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
,
345 VIVS_GL_FLUSH_CACHE_PE2D
);
347 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
,
348 VIVS_GL_FLUSH_CACHE_DEPTH
|
349 VIVS_GL_FLUSH_CACHE_COLOR
);
350 CMD_LOAD_STATE(buffer
, VIVS_TS_FLUSH_CACHE
,
351 VIVS_TS_FLUSH_CACHE_FLUSH
);
353 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
354 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
355 CMD_LOAD_STATE(buffer
, VIVS_GL_EVENT
, VIVS_GL_EVENT_EVENT_ID(event
) |
356 VIVS_GL_EVENT_FROM_PE
);
358 CMD_LINK(buffer
, 2, etnaviv_iommu_get_cmdbuf_va(gpu
, buffer
) +
359 buffer
->user_size
- 4);
361 if (drm_debug
& DRM_UT_DRIVER
)
362 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
363 return_target
, etnaviv_iommu_get_cmdbuf_va(gpu
, cmdbuf
), cmdbuf
->vaddr
);
365 if (drm_debug
& DRM_UT_DRIVER
) {
366 print_hex_dump(KERN_INFO
, "cmd ", DUMP_PREFIX_OFFSET
, 16, 4,
367 cmdbuf
->vaddr
, cmdbuf
->size
, 0);
369 pr_info("link op: %p\n", buffer
->vaddr
+ waitlink_offset
);
370 pr_info("addr: 0x%08x\n", link_target
);
371 pr_info("back: 0x%08x\n", return_target
);
372 pr_info("event: %d\n", event
);
376 * Kick off the submitted command by replacing the previous
377 * WAIT with a link to the address in the ring buffer.
379 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
380 VIV_FE_LINK_HEADER_OP_LINK
|
381 VIV_FE_LINK_HEADER_PREFETCH(link_dwords
),
384 if (drm_debug
& DRM_UT_DRIVER
)
385 etnaviv_buffer_dump(gpu
, buffer
, 0, 0x50);