1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014-2018 Etnaviv Project
6 #include <drm/drm_drv.h>
8 #include "etnaviv_cmdbuf.h"
9 #include "etnaviv_gpu.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
13 #include "common.xml.h"
14 #include "state.xml.h"
15 #include "state_hi.xml.h"
16 #include "state_3d.xml.h"
17 #include "cmdstream.xml.h"
20 * Command Buffer helper:
24 static inline void OUT(struct etnaviv_cmdbuf
*buffer
, u32 data
)
26 u32
*vaddr
= (u32
*)buffer
->vaddr
;
28 BUG_ON(buffer
->user_size
>= buffer
->size
);
30 vaddr
[buffer
->user_size
/ 4] = data
;
31 buffer
->user_size
+= 4;
34 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf
*buffer
,
37 u32 index
= reg
>> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR
;
39 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
41 /* write a register via cmd stream */
42 OUT(buffer
, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE
|
43 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
44 VIV_FE_LOAD_STATE_HEADER_OFFSET(index
));
48 static inline void CMD_END(struct etnaviv_cmdbuf
*buffer
)
50 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
52 OUT(buffer
, VIV_FE_END_HEADER_OP_END
);
55 static inline void CMD_WAIT(struct etnaviv_cmdbuf
*buffer
)
57 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
59 OUT(buffer
, VIV_FE_WAIT_HEADER_OP_WAIT
| 200);
62 static inline void CMD_LINK(struct etnaviv_cmdbuf
*buffer
,
63 u16 prefetch
, u32 address
)
65 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
67 OUT(buffer
, VIV_FE_LINK_HEADER_OP_LINK
|
68 VIV_FE_LINK_HEADER_PREFETCH(prefetch
));
72 static inline void CMD_STALL(struct etnaviv_cmdbuf
*buffer
,
75 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
77 OUT(buffer
, VIV_FE_STALL_HEADER_OP_STALL
);
78 OUT(buffer
, VIV_FE_STALL_TOKEN_FROM(from
) | VIV_FE_STALL_TOKEN_TO(to
));
81 static inline void CMD_SEM(struct etnaviv_cmdbuf
*buffer
, u32 from
, u32 to
)
83 CMD_LOAD_STATE(buffer
, VIVS_GL_SEMAPHORE_TOKEN
,
84 VIVS_GL_SEMAPHORE_TOKEN_FROM(from
) |
85 VIVS_GL_SEMAPHORE_TOKEN_TO(to
));
88 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu
*gpu
,
89 struct etnaviv_cmdbuf
*buffer
, u8 pipe
)
93 lockdep_assert_held(&gpu
->lock
);
96 * This assumes that if we're switching to 2D, we're switching
97 * away from 3D, and vice versa. Hence, if we're switching to
98 * the 2D core, we need to flush the 3D depth and color caches,
99 * otherwise we need to flush the 2D pixel engine cache.
101 if (gpu
->exec_state
== ETNA_PIPE_2D
)
102 flush
= VIVS_GL_FLUSH_CACHE_PE2D
;
103 else if (gpu
->exec_state
== ETNA_PIPE_3D
)
104 flush
= VIVS_GL_FLUSH_CACHE_DEPTH
| VIVS_GL_FLUSH_CACHE_COLOR
;
106 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
, flush
);
107 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
108 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
110 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
111 VIVS_GL_PIPE_SELECT_PIPE(pipe
));
114 static void etnaviv_buffer_dump(struct etnaviv_gpu
*gpu
,
115 struct etnaviv_cmdbuf
*buf
, u32 off
, u32 len
)
117 u32 size
= buf
->size
;
118 u32
*ptr
= buf
->vaddr
+ off
;
120 dev_info(gpu
->dev
, "virt %p phys 0x%08x free 0x%08x\n",
121 ptr
, etnaviv_cmdbuf_get_va(buf
,
122 &gpu
->mmu_context
->cmdbuf_mapping
) +
123 off
, size
- len
* 4 - off
);
125 print_hex_dump(KERN_INFO
, "cmd ", DUMP_PREFIX_OFFSET
, 16, 4,
130 * Safely replace the WAIT of a waitlink with a new command and argument.
131 * The GPU may be executing this WAIT while we're modifying it, so we have
132 * to write it in a specific order to avoid the GPU branching to somewhere
133 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
135 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf
*buffer
,
136 unsigned int wl_offset
, u32 cmd
, u32 arg
)
138 u32
*lw
= buffer
->vaddr
+ wl_offset
;
147 * Ensure that there is space in the command buffer to contiguously write
148 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
150 static u32
etnaviv_buffer_reserve(struct etnaviv_gpu
*gpu
,
151 struct etnaviv_cmdbuf
*buffer
, unsigned int cmd_dwords
)
153 if (buffer
->user_size
+ cmd_dwords
* sizeof(u64
) > buffer
->size
)
154 buffer
->user_size
= 0;
156 return etnaviv_cmdbuf_get_va(buffer
,
157 &gpu
->mmu_context
->cmdbuf_mapping
) +
161 u16
etnaviv_buffer_init(struct etnaviv_gpu
*gpu
)
163 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
165 lockdep_assert_held(&gpu
->lock
);
167 /* initialize buffer */
168 buffer
->user_size
= 0;
172 etnaviv_cmdbuf_get_va(buffer
, &gpu
->mmu_context
->cmdbuf_mapping
)
173 + buffer
->user_size
- 4);
175 return buffer
->user_size
/ 8;
178 u16
etnaviv_buffer_config_mmuv2(struct etnaviv_gpu
*gpu
, u32 mtlb_addr
, u32 safe_addr
)
180 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
182 lockdep_assert_held(&gpu
->lock
);
184 buffer
->user_size
= 0;
186 if (gpu
->identity
.features
& chipFeatures_PIPE_3D
) {
187 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
188 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D
));
189 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
190 mtlb_addr
| VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K
);
191 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_SAFE_ADDRESS
, safe_addr
);
192 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
193 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
196 if (gpu
->identity
.features
& chipFeatures_PIPE_2D
) {
197 CMD_LOAD_STATE(buffer
, VIVS_GL_PIPE_SELECT
,
198 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D
));
199 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
200 mtlb_addr
| VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K
);
201 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_SAFE_ADDRESS
, safe_addr
);
202 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
203 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
208 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
210 return buffer
->user_size
/ 8;
213 u16
etnaviv_buffer_config_pta(struct etnaviv_gpu
*gpu
, unsigned short id
)
215 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
217 lockdep_assert_held(&gpu
->lock
);
219 buffer
->user_size
= 0;
221 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_PTA_CONFIG
,
222 VIVS_MMUv2_PTA_CONFIG_INDEX(id
));
226 buffer
->user_size
= ALIGN(buffer
->user_size
, 8);
228 return buffer
->user_size
/ 8;
231 void etnaviv_buffer_end(struct etnaviv_gpu
*gpu
)
233 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
234 unsigned int waitlink_offset
= buffer
->user_size
- 16;
235 u32 link_target
, flush
= 0;
237 lockdep_assert_held(&gpu
->lock
);
239 if (gpu
->exec_state
== ETNA_PIPE_2D
)
240 flush
= VIVS_GL_FLUSH_CACHE_PE2D
;
241 else if (gpu
->exec_state
== ETNA_PIPE_3D
)
242 flush
= VIVS_GL_FLUSH_CACHE_DEPTH
|
243 VIVS_GL_FLUSH_CACHE_COLOR
|
244 VIVS_GL_FLUSH_CACHE_TEXTURE
|
245 VIVS_GL_FLUSH_CACHE_TEXTUREVS
|
246 VIVS_GL_FLUSH_CACHE_SHADER_L2
;
249 unsigned int dwords
= 7;
251 link_target
= etnaviv_buffer_reserve(gpu
, buffer
, dwords
);
253 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
254 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
255 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
, flush
);
256 if (gpu
->exec_state
== ETNA_PIPE_3D
)
257 CMD_LOAD_STATE(buffer
, VIVS_TS_FLUSH_CACHE
,
258 VIVS_TS_FLUSH_CACHE_FLUSH
);
259 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
260 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
263 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
264 VIV_FE_LINK_HEADER_OP_LINK
|
265 VIV_FE_LINK_HEADER_PREFETCH(dwords
),
268 /* Replace the last link-wait with an "END" command */
269 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
270 VIV_FE_END_HEADER_OP_END
, 0);
274 /* Append a 'sync point' to the ring buffer. */
275 void etnaviv_sync_point_queue(struct etnaviv_gpu
*gpu
, unsigned int event
)
277 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
278 unsigned int waitlink_offset
= buffer
->user_size
- 16;
281 lockdep_assert_held(&gpu
->lock
);
284 * We need at most 3 dwords in the return target:
285 * 1 event + 1 end + 1 wait + 1 link.
288 target
= etnaviv_buffer_reserve(gpu
, buffer
, dwords
);
290 /* Signal sync point event */
291 CMD_LOAD_STATE(buffer
, VIVS_GL_EVENT
, VIVS_GL_EVENT_EVENT_ID(event
) |
292 VIVS_GL_EVENT_FROM_PE
);
294 /* Stop the FE to 'pause' the GPU */
297 /* Append waitlink */
300 etnaviv_cmdbuf_get_va(buffer
, &gpu
->mmu_context
->cmdbuf_mapping
)
301 + buffer
->user_size
- 4);
304 * Kick off the 'sync point' command by replacing the previous
305 * WAIT with a link to the address in the ring buffer.
307 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
308 VIV_FE_LINK_HEADER_OP_LINK
|
309 VIV_FE_LINK_HEADER_PREFETCH(dwords
),
313 /* Append a command buffer to the ring buffer. */
314 void etnaviv_buffer_queue(struct etnaviv_gpu
*gpu
, u32 exec_state
,
315 struct etnaviv_iommu_context
*mmu_context
, unsigned int event
,
316 struct etnaviv_cmdbuf
*cmdbuf
)
318 struct etnaviv_cmdbuf
*buffer
= &gpu
->buffer
;
319 unsigned int waitlink_offset
= buffer
->user_size
- 16;
320 u32 return_target
, return_dwords
;
321 u32 link_target
, link_dwords
;
322 bool switch_context
= gpu
->exec_state
!= exec_state
;
323 bool switch_mmu_context
= gpu
->mmu_context
!= mmu_context
;
324 unsigned int new_flush_seq
= READ_ONCE(gpu
->mmu_context
->flush_seq
);
325 bool need_flush
= switch_mmu_context
|| gpu
->flush_seq
!= new_flush_seq
;
327 lockdep_assert_held(&gpu
->lock
);
329 if (drm_debug_enabled(DRM_UT_DRIVER
))
330 etnaviv_buffer_dump(gpu
, buffer
, 0, 0x50);
332 link_target
= etnaviv_cmdbuf_get_va(cmdbuf
,
333 &gpu
->mmu_context
->cmdbuf_mapping
);
334 link_dwords
= cmdbuf
->size
/ 8;
337 * If we need maintenance prior to submitting this buffer, we will
338 * need to append a mmu flush load state, followed by a new
339 * link to this buffer - a total of four additional words.
341 if (need_flush
|| switch_context
) {
342 u32 target
, extra_dwords
;
349 if (gpu
->mmu_context
->global
->version
== ETNAVIV_IOMMU_V1
)
355 /* pipe switch commands */
359 /* PTA load command */
360 if (switch_mmu_context
&& gpu
->sec_mode
== ETNA_SEC_KERNEL
)
363 target
= etnaviv_buffer_reserve(gpu
, buffer
, extra_dwords
);
365 * Switch MMU context if necessary. Must be done after the
366 * link target has been calculated, as the jump forward in the
367 * kernel ring still uses the last active MMU context before
370 if (switch_mmu_context
) {
371 struct etnaviv_iommu_context
*old_context
= gpu
->mmu_context
;
373 etnaviv_iommu_context_get(mmu_context
);
374 gpu
->mmu_context
= mmu_context
;
375 etnaviv_iommu_context_put(old_context
);
379 /* Add the MMU flush */
380 if (gpu
->mmu_context
->global
->version
== ETNAVIV_IOMMU_V1
) {
381 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_MMU
,
382 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU
|
383 VIVS_GL_FLUSH_MMU_FLUSH_UNK1
|
384 VIVS_GL_FLUSH_MMU_FLUSH_UNK2
|
385 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU
|
386 VIVS_GL_FLUSH_MMU_FLUSH_UNK4
);
388 u32 flush
= VIVS_MMUv2_CONFIGURATION_MODE_MASK
|
389 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH
;
391 if (switch_mmu_context
&&
392 gpu
->sec_mode
== ETNA_SEC_KERNEL
) {
394 etnaviv_iommuv2_get_pta_id(gpu
->mmu_context
);
395 CMD_LOAD_STATE(buffer
,
396 VIVS_MMUv2_PTA_CONFIG
,
397 VIVS_MMUv2_PTA_CONFIG_INDEX(id
));
400 if (gpu
->sec_mode
== ETNA_SEC_NONE
)
401 flush
|= etnaviv_iommuv2_get_mtlb_addr(gpu
->mmu_context
);
403 CMD_LOAD_STATE(buffer
, VIVS_MMUv2_CONFIGURATION
,
405 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
,
407 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
,
411 gpu
->flush_seq
= new_flush_seq
;
414 if (switch_context
) {
415 etnaviv_cmd_select_pipe(gpu
, buffer
, exec_state
);
416 gpu
->exec_state
= exec_state
;
419 /* And the link to the submitted buffer */
420 link_target
= etnaviv_cmdbuf_get_va(cmdbuf
,
421 &gpu
->mmu_context
->cmdbuf_mapping
);
422 CMD_LINK(buffer
, link_dwords
, link_target
);
424 /* Update the link target to point to above instructions */
425 link_target
= target
;
426 link_dwords
= extra_dwords
;
430 * Append a LINK to the submitted command buffer to return to
431 * the ring buffer. return_target is the ring target address.
432 * We need at most 7 dwords in the return target: 2 cache flush +
433 * 2 semaphore stall + 1 event + 1 wait + 1 link.
436 return_target
= etnaviv_buffer_reserve(gpu
, buffer
, return_dwords
);
437 CMD_LINK(cmdbuf
, return_dwords
, return_target
);
440 * Append a cache flush, stall, event, wait and link pointing back to
441 * the wait command to the ring buffer.
443 if (gpu
->exec_state
== ETNA_PIPE_2D
) {
444 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
,
445 VIVS_GL_FLUSH_CACHE_PE2D
);
447 CMD_LOAD_STATE(buffer
, VIVS_GL_FLUSH_CACHE
,
448 VIVS_GL_FLUSH_CACHE_DEPTH
|
449 VIVS_GL_FLUSH_CACHE_COLOR
);
450 CMD_LOAD_STATE(buffer
, VIVS_TS_FLUSH_CACHE
,
451 VIVS_TS_FLUSH_CACHE_FLUSH
);
453 CMD_SEM(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
454 CMD_STALL(buffer
, SYNC_RECIPIENT_FE
, SYNC_RECIPIENT_PE
);
455 CMD_LOAD_STATE(buffer
, VIVS_GL_EVENT
, VIVS_GL_EVENT_EVENT_ID(event
) |
456 VIVS_GL_EVENT_FROM_PE
);
459 etnaviv_cmdbuf_get_va(buffer
, &gpu
->mmu_context
->cmdbuf_mapping
)
460 + buffer
->user_size
- 4);
462 if (drm_debug_enabled(DRM_UT_DRIVER
))
463 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
465 etnaviv_cmdbuf_get_va(cmdbuf
, &gpu
->mmu_context
->cmdbuf_mapping
),
468 if (drm_debug_enabled(DRM_UT_DRIVER
)) {
469 print_hex_dump(KERN_INFO
, "cmd ", DUMP_PREFIX_OFFSET
, 16, 4,
470 cmdbuf
->vaddr
, cmdbuf
->size
, 0);
472 pr_info("link op: %p\n", buffer
->vaddr
+ waitlink_offset
);
473 pr_info("addr: 0x%08x\n", link_target
);
474 pr_info("back: 0x%08x\n", return_target
);
475 pr_info("event: %d\n", event
);
479 * Kick off the submitted command by replacing the previous
480 * WAIT with a link to the address in the ring buffer.
482 etnaviv_buffer_replace_wait(buffer
, waitlink_offset
,
483 VIV_FE_LINK_HEADER_OP_LINK
|
484 VIV_FE_LINK_HEADER_PREFETCH(link_dwords
),
487 if (drm_debug_enabled(DRM_UT_DRIVER
))
488 etnaviv_buffer_dump(gpu
, buffer
, 0, 0x50);