Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / gpu / drm / etnaviv / etnaviv_buffer.c
blob99ad2f073c6e44bef9789cefc42ef6301155e4e1
1 /*
2 * Copyright (C) 2014 Etnaviv Project
3 * Author: Christian Gmeiner <christian.gmeiner@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "etnaviv_cmdbuf.h"
19 #include "etnaviv_gpu.h"
20 #include "etnaviv_gem.h"
21 #include "etnaviv_mmu.h"
23 #include "common.xml.h"
24 #include "state.xml.h"
25 #include "state_hi.xml.h"
26 #include "state_3d.xml.h"
27 #include "cmdstream.xml.h"
30 * Command Buffer helper:
34 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
36 u32 *vaddr = (u32 *)buffer->vaddr;
38 BUG_ON(buffer->user_size >= buffer->size);
40 vaddr[buffer->user_size / 4] = data;
41 buffer->user_size += 4;
44 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
45 u32 reg, u32 value)
47 u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
49 buffer->user_size = ALIGN(buffer->user_size, 8);
51 /* write a register via cmd stream */
52 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
53 VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
54 VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
55 OUT(buffer, value);
58 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
60 buffer->user_size = ALIGN(buffer->user_size, 8);
62 OUT(buffer, VIV_FE_END_HEADER_OP_END);
65 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
67 buffer->user_size = ALIGN(buffer->user_size, 8);
69 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
72 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
73 u16 prefetch, u32 address)
75 buffer->user_size = ALIGN(buffer->user_size, 8);
77 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
78 VIV_FE_LINK_HEADER_PREFETCH(prefetch));
79 OUT(buffer, address);
82 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
83 u32 from, u32 to)
85 buffer->user_size = ALIGN(buffer->user_size, 8);
87 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
88 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
91 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
93 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
94 VIVS_GL_SEMAPHORE_TOKEN_FROM(from) |
95 VIVS_GL_SEMAPHORE_TOKEN_TO(to));
98 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
99 struct etnaviv_cmdbuf *buffer, u8 pipe)
101 u32 flush = 0;
103 lockdep_assert_held(&gpu->lock);
106 * This assumes that if we're switching to 2D, we're switching
107 * away from 3D, and vice versa. Hence, if we're switching to
108 * the 2D core, we need to flush the 3D depth and color caches,
109 * otherwise we need to flush the 2D pixel engine cache.
111 if (gpu->exec_state == ETNA_PIPE_2D)
112 flush = VIVS_GL_FLUSH_CACHE_PE2D;
113 else if (gpu->exec_state == ETNA_PIPE_3D)
114 flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
116 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
117 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
118 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
120 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
121 VIVS_GL_PIPE_SELECT_PIPE(pipe));
124 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
125 struct etnaviv_cmdbuf *buf, u32 off, u32 len)
127 u32 size = buf->size;
128 u32 *ptr = buf->vaddr + off;
130 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
131 ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off);
133 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
134 ptr, len * 4, 0);
138 * Safely replace the WAIT of a waitlink with a new command and argument.
139 * The GPU may be executing this WAIT while we're modifying it, so we have
140 * to write it in a specific order to avoid the GPU branching to somewhere
141 * else. 'wl_offset' is the offset to the first byte of the WAIT command.
143 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
144 unsigned int wl_offset, u32 cmd, u32 arg)
146 u32 *lw = buffer->vaddr + wl_offset;
148 lw[1] = arg;
149 mb();
150 lw[0] = cmd;
151 mb();
155 * Ensure that there is space in the command buffer to contiguously write
156 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
158 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
159 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
161 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
162 buffer->user_size = 0;
164 return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size;
167 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
169 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
171 lockdep_assert_held(&gpu->lock);
173 /* initialize buffer */
174 buffer->user_size = 0;
176 CMD_WAIT(buffer);
177 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
178 buffer->user_size - 4);
180 return buffer->user_size / 8;
183 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
185 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
187 lockdep_assert_held(&gpu->lock);
189 buffer->user_size = 0;
191 if (gpu->identity.features & chipFeatures_PIPE_3D) {
192 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
193 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_3D));
194 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
195 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
196 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
197 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
198 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
201 if (gpu->identity.features & chipFeatures_PIPE_2D) {
202 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
203 VIVS_GL_PIPE_SELECT_PIPE(ETNA_PIPE_2D));
204 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
205 mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K);
206 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
207 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
208 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
211 CMD_END(buffer);
213 buffer->user_size = ALIGN(buffer->user_size, 8);
215 return buffer->user_size / 8;
218 void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
220 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
221 unsigned int waitlink_offset = buffer->user_size - 16;
222 u32 link_target, flush = 0;
224 lockdep_assert_held(&gpu->lock);
226 if (gpu->exec_state == ETNA_PIPE_2D)
227 flush = VIVS_GL_FLUSH_CACHE_PE2D;
228 else if (gpu->exec_state == ETNA_PIPE_3D)
229 flush = VIVS_GL_FLUSH_CACHE_DEPTH |
230 VIVS_GL_FLUSH_CACHE_COLOR |
231 VIVS_GL_FLUSH_CACHE_TEXTURE |
232 VIVS_GL_FLUSH_CACHE_TEXTUREVS |
233 VIVS_GL_FLUSH_CACHE_SHADER_L2;
235 if (flush) {
236 unsigned int dwords = 7;
238 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
240 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
241 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
242 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
243 if (gpu->exec_state == ETNA_PIPE_3D)
244 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
245 VIVS_TS_FLUSH_CACHE_FLUSH);
246 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
247 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
248 CMD_END(buffer);
250 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
251 VIV_FE_LINK_HEADER_OP_LINK |
252 VIV_FE_LINK_HEADER_PREFETCH(dwords),
253 link_target);
254 } else {
255 /* Replace the last link-wait with an "END" command */
256 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
257 VIV_FE_END_HEADER_OP_END, 0);
261 /* Append a 'sync point' to the ring buffer. */
262 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
264 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
265 unsigned int waitlink_offset = buffer->user_size - 16;
266 u32 dwords, target;
268 lockdep_assert_held(&gpu->lock);
271 * We need at most 3 dwords in the return target:
272 * 1 event + 1 end + 1 wait + 1 link.
274 dwords = 4;
275 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
277 /* Signal sync point event */
278 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
279 VIVS_GL_EVENT_FROM_PE);
281 /* Stop the FE to 'pause' the GPU */
282 CMD_END(buffer);
284 /* Append waitlink */
285 CMD_WAIT(buffer);
286 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
287 buffer->user_size - 4);
290 * Kick off the 'sync point' command by replacing the previous
291 * WAIT with a link to the address in the ring buffer.
293 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
294 VIV_FE_LINK_HEADER_OP_LINK |
295 VIV_FE_LINK_HEADER_PREFETCH(dwords),
296 target);
299 /* Append a command buffer to the ring buffer. */
300 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
301 unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
303 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
304 unsigned int waitlink_offset = buffer->user_size - 16;
305 u32 return_target, return_dwords;
306 u32 link_target, link_dwords;
307 bool switch_context = gpu->exec_state != exec_state;
309 lockdep_assert_held(&gpu->lock);
311 if (drm_debug & DRM_UT_DRIVER)
312 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
314 link_target = etnaviv_cmdbuf_get_va(cmdbuf);
315 link_dwords = cmdbuf->size / 8;
318 * If we need maintanence prior to submitting this buffer, we will
319 * need to append a mmu flush load state, followed by a new
320 * link to this buffer - a total of four additional words.
322 if (gpu->mmu->need_flush || switch_context) {
323 u32 target, extra_dwords;
325 /* link command */
326 extra_dwords = 1;
328 /* flush command */
329 if (gpu->mmu->need_flush) {
330 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
331 extra_dwords += 1;
332 else
333 extra_dwords += 3;
336 /* pipe switch commands */
337 if (switch_context)
338 extra_dwords += 4;
340 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
342 if (gpu->mmu->need_flush) {
343 /* Add the MMU flush */
344 if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
345 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
346 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
347 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
348 VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
349 VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
350 VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
351 } else {
352 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
353 VIVS_MMUv2_CONFIGURATION_MODE_MASK |
354 VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
355 VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH);
356 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
357 SYNC_RECIPIENT_PE);
358 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
359 SYNC_RECIPIENT_PE);
362 gpu->mmu->need_flush = false;
365 if (switch_context) {
366 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
367 gpu->exec_state = exec_state;
370 /* And the link to the submitted buffer */
371 CMD_LINK(buffer, link_dwords, link_target);
373 /* Update the link target to point to above instructions */
374 link_target = target;
375 link_dwords = extra_dwords;
379 * Append a LINK to the submitted command buffer to return to
380 * the ring buffer. return_target is the ring target address.
381 * We need at most 7 dwords in the return target: 2 cache flush +
382 * 2 semaphore stall + 1 event + 1 wait + 1 link.
384 return_dwords = 7;
385 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
386 CMD_LINK(cmdbuf, return_dwords, return_target);
389 * Append a cache flush, stall, event, wait and link pointing back to
390 * the wait command to the ring buffer.
392 if (gpu->exec_state == ETNA_PIPE_2D) {
393 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
394 VIVS_GL_FLUSH_CACHE_PE2D);
395 } else {
396 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
397 VIVS_GL_FLUSH_CACHE_DEPTH |
398 VIVS_GL_FLUSH_CACHE_COLOR);
399 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
400 VIVS_TS_FLUSH_CACHE_FLUSH);
402 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
403 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
404 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
405 VIVS_GL_EVENT_FROM_PE);
406 CMD_WAIT(buffer);
407 CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) +
408 buffer->user_size - 4);
410 if (drm_debug & DRM_UT_DRIVER)
411 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
412 return_target, etnaviv_cmdbuf_get_va(cmdbuf),
413 cmdbuf->vaddr);
415 if (drm_debug & DRM_UT_DRIVER) {
416 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
417 cmdbuf->vaddr, cmdbuf->size, 0);
419 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
420 pr_info("addr: 0x%08x\n", link_target);
421 pr_info("back: 0x%08x\n", return_target);
422 pr_info("event: %d\n", event);
426 * Kick off the submitted command by replacing the previous
427 * WAIT with a link to the address in the ring buffer.
429 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
430 VIV_FE_LINK_HEADER_OP_LINK |
431 VIV_FE_LINK_HEADER_PREFETCH(link_dwords),
432 link_target);
434 if (drm_debug & DRM_UT_DRIVER)
435 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
437 gpu->lastctx = cmdbuf->ctx;