Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_fifo.c
blob39f2b03888e7e5b7beb107cd0a32aa0345a328be
1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/drmP.h>
30 #include <drm/ttm/ttm_placement.h>
32 bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
38 if (!(dev_priv->capabilities & SVGA_CAP_3D))
39 return false;
41 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
42 uint32_t result;
44 if (!dev_priv->has_mob)
45 return false;
47 spin_lock(&dev_priv->cap_lock);
48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
50 spin_unlock(&dev_priv->cap_lock);
52 return (result != 0);
55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
56 return false;
58 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
59 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
60 return false;
62 hwversion = ioread32(fifo_mem +
63 ((fifo->capabilities &
64 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
65 SVGA_FIFO_3D_HWVERSION_REVISED :
66 SVGA_FIFO_3D_HWVERSION));
68 if (hwversion == 0)
69 return false;
71 if (hwversion < SVGA3D_HWVERSION_WS8_B1)
72 return false;
74 /* Non-Screen Object path does not support surfaces */
75 if (!dev_priv->sou_priv)
76 return false;
78 return true;
81 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
84 uint32_t caps;
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
87 return false;
89 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
90 if (caps & SVGA_FIFO_CAP_PITCHLOCK)
91 return true;
93 return false;
96 int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
99 uint32_t max;
100 uint32_t min;
101 uint32_t dummy;
103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
104 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
105 if (unlikely(fifo->static_buffer == NULL))
106 return -ENOMEM;
108 fifo->dynamic_buffer = NULL;
109 fifo->reserved_size = 0;
110 fifo->using_bounce_buffer = false;
112 mutex_init(&fifo->fifo_mutex);
113 init_rwsem(&fifo->rwsem);
116 * Allow mapping the first page read-only to user-space.
119 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
123 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
124 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
125 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
126 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
128 min = 4;
129 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
130 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
131 min <<= 2;
133 if (min < PAGE_SIZE)
134 min = PAGE_SIZE;
136 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
137 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
138 wmb();
139 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
140 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
141 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
142 mb();
144 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
146 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
147 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
148 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
150 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
151 (unsigned int) max,
152 (unsigned int) min,
153 (unsigned int) fifo->capabilities);
155 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
156 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
157 vmw_marker_queue_init(&fifo->marker_queue);
158 return vmw_fifo_send_fence(dev_priv, &dummy);
161 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
163 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
164 static DEFINE_SPINLOCK(ping_lock);
165 unsigned long irq_flags;
168 * The ping_lock is needed because we don't have an atomic
169 * test-and-set of the SVGA_FIFO_BUSY register.
171 spin_lock_irqsave(&ping_lock, irq_flags);
172 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
173 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
174 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
176 spin_unlock_irqrestore(&ping_lock, irq_flags);
179 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
183 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
184 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
187 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
189 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
190 dev_priv->config_done_state);
191 vmw_write(dev_priv, SVGA_REG_ENABLE,
192 dev_priv->enable_state);
193 vmw_write(dev_priv, SVGA_REG_TRACES,
194 dev_priv->traces_state);
196 vmw_marker_queue_takedown(&fifo->marker_queue);
198 if (likely(fifo->static_buffer != NULL)) {
199 vfree(fifo->static_buffer);
200 fifo->static_buffer = NULL;
203 if (likely(fifo->dynamic_buffer != NULL)) {
204 vfree(fifo->dynamic_buffer);
205 fifo->dynamic_buffer = NULL;
209 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
211 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
212 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
213 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
214 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
215 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
217 return ((max - next_cmd) + (stop - min) <= bytes);
220 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
221 uint32_t bytes, bool interruptible,
222 unsigned long timeout)
224 int ret = 0;
225 unsigned long end_jiffies = jiffies + timeout;
226 DEFINE_WAIT(__wait);
228 DRM_INFO("Fifo wait noirq.\n");
230 for (;;) {
231 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
232 (interruptible) ?
233 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
234 if (!vmw_fifo_is_full(dev_priv, bytes))
235 break;
236 if (time_after_eq(jiffies, end_jiffies)) {
237 ret = -EBUSY;
238 DRM_ERROR("SVGA device lockup.\n");
239 break;
241 schedule_timeout(1);
242 if (interruptible && signal_pending(current)) {
243 ret = -ERESTARTSYS;
244 break;
247 finish_wait(&dev_priv->fifo_queue, &__wait);
248 wake_up_all(&dev_priv->fifo_queue);
249 DRM_INFO("Fifo noirq exit.\n");
250 return ret;
253 static int vmw_fifo_wait(struct vmw_private *dev_priv,
254 uint32_t bytes, bool interruptible,
255 unsigned long timeout)
257 long ret = 1L;
258 unsigned long irq_flags;
260 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
261 return 0;
263 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
264 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
265 return vmw_fifo_wait_noirq(dev_priv, bytes,
266 interruptible, timeout);
268 spin_lock(&dev_priv->waiter_lock);
269 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
270 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
271 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
272 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
273 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
274 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
275 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
277 spin_unlock(&dev_priv->waiter_lock);
279 if (interruptible)
280 ret = wait_event_interruptible_timeout
281 (dev_priv->fifo_queue,
282 !vmw_fifo_is_full(dev_priv, bytes), timeout);
283 else
284 ret = wait_event_timeout
285 (dev_priv->fifo_queue,
286 !vmw_fifo_is_full(dev_priv, bytes), timeout);
288 if (unlikely(ret == 0))
289 ret = -EBUSY;
290 else if (likely(ret > 0))
291 ret = 0;
293 spin_lock(&dev_priv->waiter_lock);
294 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
295 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
296 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
297 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
298 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
300 spin_unlock(&dev_priv->waiter_lock);
302 return ret;
306 * Reserve @bytes number of bytes in the fifo.
308 * This function will return NULL (error) on two conditions:
309 * If it timeouts waiting for fifo space, or if @bytes is larger than the
310 * available fifo space.
312 * Returns:
313 * Pointer to the fifo, or null on error (possible hardware hang).
315 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
317 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
318 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
319 uint32_t max;
320 uint32_t min;
321 uint32_t next_cmd;
322 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
323 int ret;
325 mutex_lock(&fifo_state->fifo_mutex);
326 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
327 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
328 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
330 if (unlikely(bytes >= (max - min)))
331 goto out_err;
333 BUG_ON(fifo_state->reserved_size != 0);
334 BUG_ON(fifo_state->dynamic_buffer != NULL);
336 fifo_state->reserved_size = bytes;
338 while (1) {
339 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
340 bool need_bounce = false;
341 bool reserve_in_place = false;
343 if (next_cmd >= stop) {
344 if (likely((next_cmd + bytes < max ||
345 (next_cmd + bytes == max && stop > min))))
346 reserve_in_place = true;
348 else if (vmw_fifo_is_full(dev_priv, bytes)) {
349 ret = vmw_fifo_wait(dev_priv, bytes,
350 false, 3 * HZ);
351 if (unlikely(ret != 0))
352 goto out_err;
353 } else
354 need_bounce = true;
356 } else {
358 if (likely((next_cmd + bytes < stop)))
359 reserve_in_place = true;
360 else {
361 ret = vmw_fifo_wait(dev_priv, bytes,
362 false, 3 * HZ);
363 if (unlikely(ret != 0))
364 goto out_err;
368 if (reserve_in_place) {
369 if (reserveable || bytes <= sizeof(uint32_t)) {
370 fifo_state->using_bounce_buffer = false;
372 if (reserveable)
373 iowrite32(bytes, fifo_mem +
374 SVGA_FIFO_RESERVED);
375 return fifo_mem + (next_cmd >> 2);
376 } else {
377 need_bounce = true;
381 if (need_bounce) {
382 fifo_state->using_bounce_buffer = true;
383 if (bytes < fifo_state->static_buffer_size)
384 return fifo_state->static_buffer;
385 else {
386 fifo_state->dynamic_buffer = vmalloc(bytes);
387 return fifo_state->dynamic_buffer;
391 out_err:
392 fifo_state->reserved_size = 0;
393 mutex_unlock(&fifo_state->fifo_mutex);
394 return NULL;
397 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
398 __le32 __iomem *fifo_mem,
399 uint32_t next_cmd,
400 uint32_t max, uint32_t min, uint32_t bytes)
402 uint32_t chunk_size = max - next_cmd;
403 uint32_t rest;
404 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
405 fifo_state->dynamic_buffer : fifo_state->static_buffer;
407 if (bytes < chunk_size)
408 chunk_size = bytes;
410 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
411 mb();
412 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
413 rest = bytes - chunk_size;
414 if (rest)
415 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
416 rest);
419 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
420 __le32 __iomem *fifo_mem,
421 uint32_t next_cmd,
422 uint32_t max, uint32_t min, uint32_t bytes)
424 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
425 fifo_state->dynamic_buffer : fifo_state->static_buffer;
427 while (bytes > 0) {
428 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
429 next_cmd += sizeof(uint32_t);
430 if (unlikely(next_cmd == max))
431 next_cmd = min;
432 mb();
433 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
434 mb();
435 bytes -= sizeof(uint32_t);
439 void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
441 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
442 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
443 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
444 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
445 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
446 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
448 BUG_ON((bytes & 3) != 0);
449 BUG_ON(bytes > fifo_state->reserved_size);
451 fifo_state->reserved_size = 0;
453 if (fifo_state->using_bounce_buffer) {
454 if (reserveable)
455 vmw_fifo_res_copy(fifo_state, fifo_mem,
456 next_cmd, max, min, bytes);
457 else
458 vmw_fifo_slow_copy(fifo_state, fifo_mem,
459 next_cmd, max, min, bytes);
461 if (fifo_state->dynamic_buffer) {
462 vfree(fifo_state->dynamic_buffer);
463 fifo_state->dynamic_buffer = NULL;
468 down_write(&fifo_state->rwsem);
469 if (fifo_state->using_bounce_buffer || reserveable) {
470 next_cmd += bytes;
471 if (next_cmd >= max)
472 next_cmd -= max - min;
473 mb();
474 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
477 if (reserveable)
478 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
479 mb();
480 up_write(&fifo_state->rwsem);
481 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
482 mutex_unlock(&fifo_state->fifo_mutex);
485 int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
487 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
488 struct svga_fifo_cmd_fence *cmd_fence;
489 void *fm;
490 int ret = 0;
491 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
493 fm = vmw_fifo_reserve(dev_priv, bytes);
494 if (unlikely(fm == NULL)) {
495 *seqno = atomic_read(&dev_priv->marker_seq);
496 ret = -ENOMEM;
497 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
498 false, 3*HZ);
499 goto out_err;
502 do {
503 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
504 } while (*seqno == 0);
506 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
509 * Don't request hardware to send a fence. The
510 * waiting code in vmwgfx_irq.c will emulate this.
513 vmw_fifo_commit(dev_priv, 0);
514 return 0;
517 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
518 cmd_fence = (struct svga_fifo_cmd_fence *)
519 ((unsigned long)fm + sizeof(__le32));
521 iowrite32(*seqno, &cmd_fence->fence);
522 vmw_fifo_commit(dev_priv, bytes);
523 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
524 vmw_update_seqno(dev_priv, fifo_state);
526 out_err:
527 return ret;
531 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
532 * legacy query commands.
534 * @dev_priv: The device private structure.
535 * @cid: The hardware context id used for the query.
537 * See the vmw_fifo_emit_dummy_query documentation.
539 static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
540 uint32_t cid)
543 * A query wait without a preceding query end will
544 * actually finish all queries for this cid
545 * without writing to the query result structure.
548 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
549 struct {
550 SVGA3dCmdHeader header;
551 SVGA3dCmdWaitForQuery body;
552 } *cmd;
554 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
556 if (unlikely(cmd == NULL)) {
557 DRM_ERROR("Out of fifo space for dummy query.\n");
558 return -ENOMEM;
561 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
562 cmd->header.size = sizeof(cmd->body);
563 cmd->body.cid = cid;
564 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
566 if (bo->mem.mem_type == TTM_PL_VRAM) {
567 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
568 cmd->body.guestResult.offset = bo->offset;
569 } else {
570 cmd->body.guestResult.gmrId = bo->mem.start;
571 cmd->body.guestResult.offset = 0;
574 vmw_fifo_commit(dev_priv, sizeof(*cmd));
576 return 0;
580 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
581 * guest-backed resource query commands.
583 * @dev_priv: The device private structure.
584 * @cid: The hardware context id used for the query.
586 * See the vmw_fifo_emit_dummy_query documentation.
588 static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
589 uint32_t cid)
592 * A query wait without a preceding query end will
593 * actually finish all queries for this cid
594 * without writing to the query result structure.
597 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
598 struct {
599 SVGA3dCmdHeader header;
600 SVGA3dCmdWaitForGBQuery body;
601 } *cmd;
603 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
605 if (unlikely(cmd == NULL)) {
606 DRM_ERROR("Out of fifo space for dummy query.\n");
607 return -ENOMEM;
610 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
611 cmd->header.size = sizeof(cmd->body);
612 cmd->body.cid = cid;
613 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
614 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
615 cmd->body.mobid = bo->mem.start;
616 cmd->body.offset = 0;
618 vmw_fifo_commit(dev_priv, sizeof(*cmd));
620 return 0;
625 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
626 * appropriate resource query commands.
628 * @dev_priv: The device private structure.
629 * @cid: The hardware context id used for the query.
631 * This function is used to emit a dummy occlusion query with
632 * no primitives rendered between query begin and query end.
633 * It's used to provide a query barrier, in order to know that when
634 * this query is finished, all preceding queries are also finished.
636 * A Query results structure should have been initialized at the start
637 * of the dev_priv->dummy_query_bo buffer object. And that buffer object
638 * must also be either reserved or pinned when this function is called.
640 * Returns -ENOMEM on failure to reserve fifo space.
642 int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
643 uint32_t cid)
645 if (dev_priv->has_mob)
646 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
648 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);