treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
blob5914e79d3429fab541008f518000cc2df15e396e
1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 static void convert_to_hw_box(struct virtio_gpu_box *dst,
44 const struct drm_virtgpu_3d_box *src)
46 dst->x = cpu_to_le32(src->x);
47 dst->y = cpu_to_le32(src->y);
48 dst->z = cpu_to_le32(src->z);
49 dst->w = cpu_to_le32(src->w);
50 dst->h = cpu_to_le32(src->h);
51 dst->d = cpu_to_le32(src->d);
54 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
56 struct drm_device *dev = vq->vdev->priv;
57 struct virtio_gpu_device *vgdev = dev->dev_private;
59 schedule_work(&vgdev->ctrlq.dequeue_work);
62 void virtio_gpu_cursor_ack(struct virtqueue *vq)
64 struct drm_device *dev = vq->vdev->priv;
65 struct virtio_gpu_device *vgdev = dev->dev_private;
67 schedule_work(&vgdev->cursorq.dequeue_work);
70 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
72 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
73 VBUFFER_SIZE,
74 __alignof__(struct virtio_gpu_vbuffer),
75 0, NULL);
76 if (!vgdev->vbufs)
77 return -ENOMEM;
78 return 0;
81 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
83 kmem_cache_destroy(vgdev->vbufs);
84 vgdev->vbufs = NULL;
87 static struct virtio_gpu_vbuffer*
88 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
89 int size, int resp_size, void *resp_buf,
90 virtio_gpu_resp_cb resp_cb)
92 struct virtio_gpu_vbuffer *vbuf;
94 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
95 if (!vbuf)
96 return ERR_PTR(-ENOMEM);
98 BUG_ON(size > MAX_INLINE_CMD_SIZE);
99 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
100 vbuf->size = size;
102 vbuf->resp_cb = resp_cb;
103 vbuf->resp_size = resp_size;
104 if (resp_size <= MAX_INLINE_RESP_SIZE)
105 vbuf->resp_buf = (void *)vbuf->buf + size;
106 else
107 vbuf->resp_buf = resp_buf;
108 BUG_ON(!vbuf->resp_buf);
109 return vbuf;
112 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
113 struct virtio_gpu_vbuffer **vbuffer_p,
114 int size)
116 struct virtio_gpu_vbuffer *vbuf;
118 vbuf = virtio_gpu_get_vbuf(vgdev, size,
119 sizeof(struct virtio_gpu_ctrl_hdr),
120 NULL, NULL);
121 if (IS_ERR(vbuf)) {
122 *vbuffer_p = NULL;
123 return ERR_CAST(vbuf);
125 *vbuffer_p = vbuf;
126 return vbuf->buf;
129 static struct virtio_gpu_update_cursor*
130 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
131 struct virtio_gpu_vbuffer **vbuffer_p)
133 struct virtio_gpu_vbuffer *vbuf;
135 vbuf = virtio_gpu_get_vbuf
136 (vgdev, sizeof(struct virtio_gpu_update_cursor),
137 0, NULL, NULL);
138 if (IS_ERR(vbuf)) {
139 *vbuffer_p = NULL;
140 return ERR_CAST(vbuf);
142 *vbuffer_p = vbuf;
143 return (struct virtio_gpu_update_cursor *)vbuf->buf;
146 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
147 virtio_gpu_resp_cb cb,
148 struct virtio_gpu_vbuffer **vbuffer_p,
149 int cmd_size, int resp_size,
150 void *resp_buf)
152 struct virtio_gpu_vbuffer *vbuf;
154 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
155 resp_size, resp_buf, cb);
156 if (IS_ERR(vbuf)) {
157 *vbuffer_p = NULL;
158 return ERR_CAST(vbuf);
160 *vbuffer_p = vbuf;
161 return (struct virtio_gpu_command *)vbuf->buf;
164 static void free_vbuf(struct virtio_gpu_device *vgdev,
165 struct virtio_gpu_vbuffer *vbuf)
167 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
168 kfree(vbuf->resp_buf);
169 kvfree(vbuf->data_buf);
170 kmem_cache_free(vgdev->vbufs, vbuf);
173 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
175 struct virtio_gpu_vbuffer *vbuf;
176 unsigned int len;
177 int freed = 0;
179 while ((vbuf = virtqueue_get_buf(vq, &len))) {
180 list_add_tail(&vbuf->list, reclaim_list);
181 freed++;
183 if (freed == 0)
184 DRM_DEBUG("Huh? zero vbufs reclaimed");
187 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
189 struct virtio_gpu_device *vgdev =
190 container_of(work, struct virtio_gpu_device,
191 ctrlq.dequeue_work);
192 struct list_head reclaim_list;
193 struct virtio_gpu_vbuffer *entry, *tmp;
194 struct virtio_gpu_ctrl_hdr *resp;
195 u64 fence_id = 0;
197 INIT_LIST_HEAD(&reclaim_list);
198 spin_lock(&vgdev->ctrlq.qlock);
199 do {
200 virtqueue_disable_cb(vgdev->ctrlq.vq);
201 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
203 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
204 spin_unlock(&vgdev->ctrlq.qlock);
206 list_for_each_entry(entry, &reclaim_list, list) {
207 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
209 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
211 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
212 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
213 struct virtio_gpu_ctrl_hdr *cmd;
214 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
215 DRM_ERROR("response 0x%x (command 0x%x)\n",
216 le32_to_cpu(resp->type),
217 le32_to_cpu(cmd->type));
218 } else
219 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
221 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
222 u64 f = le64_to_cpu(resp->fence_id);
224 if (fence_id > f) {
225 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
226 __func__, fence_id, f);
227 } else {
228 fence_id = f;
231 if (entry->resp_cb)
232 entry->resp_cb(vgdev, entry);
234 wake_up(&vgdev->ctrlq.ack_queue);
236 if (fence_id)
237 virtio_gpu_fence_event_process(vgdev, fence_id);
239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240 if (entry->objs)
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 list_del(&entry->list);
243 free_vbuf(vgdev, entry);
247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
249 struct virtio_gpu_device *vgdev =
250 container_of(work, struct virtio_gpu_device,
251 cursorq.dequeue_work);
252 struct list_head reclaim_list;
253 struct virtio_gpu_vbuffer *entry, *tmp;
255 INIT_LIST_HEAD(&reclaim_list);
256 spin_lock(&vgdev->cursorq.qlock);
257 do {
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 list_del(&entry->list);
265 free_vbuf(vgdev, entry);
267 wake_up(&vgdev->cursorq.ack_queue);
270 /* Create sg_table from a vmalloc'd buffer. */
271 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
273 int ret, s, i;
274 struct sg_table *sgt;
275 struct scatterlist *sg;
276 struct page *pg;
278 if (WARN_ON(!PAGE_ALIGNED(data)))
279 return NULL;
281 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
282 if (!sgt)
283 return NULL;
285 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
286 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
287 if (ret) {
288 kfree(sgt);
289 return NULL;
292 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
293 pg = vmalloc_to_page(data);
294 if (!pg) {
295 sg_free_table(sgt);
296 kfree(sgt);
297 return NULL;
300 s = min_t(int, PAGE_SIZE, size);
301 sg_set_page(sg, pg, s, 0);
303 size -= s;
304 data += s;
307 return sgt;
310 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
311 struct virtio_gpu_vbuffer *vbuf,
312 struct scatterlist *vout)
313 __releases(&vgdev->ctrlq.qlock)
314 __acquires(&vgdev->ctrlq.qlock)
316 struct virtqueue *vq = vgdev->ctrlq.vq;
317 struct scatterlist *sgs[3], vcmd, vresp;
318 int outcnt = 0, incnt = 0;
319 bool notify = false;
320 int ret;
322 if (!vgdev->vqs_ready)
323 return notify;
325 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
326 sgs[outcnt + incnt] = &vcmd;
327 outcnt++;
329 if (vout) {
330 sgs[outcnt + incnt] = vout;
331 outcnt++;
334 if (vbuf->resp_size) {
335 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
336 sgs[outcnt + incnt] = &vresp;
337 incnt++;
340 retry:
341 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
342 if (ret == -ENOSPC) {
343 spin_unlock(&vgdev->ctrlq.qlock);
344 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
345 spin_lock(&vgdev->ctrlq.qlock);
346 goto retry;
347 } else {
348 trace_virtio_gpu_cmd_queue(vq,
349 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
351 notify = virtqueue_kick_prepare(vq);
353 return notify;
356 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
357 struct virtio_gpu_vbuffer *vbuf,
358 struct virtio_gpu_ctrl_hdr *hdr,
359 struct virtio_gpu_fence *fence)
361 struct virtqueue *vq = vgdev->ctrlq.vq;
362 struct scatterlist *vout = NULL, sg;
363 struct sg_table *sgt = NULL;
364 bool notify;
365 int outcnt = 0;
367 if (vbuf->data_size) {
368 if (is_vmalloc_addr(vbuf->data_buf)) {
369 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
370 &outcnt);
371 if (!sgt)
372 return;
373 vout = sgt->sgl;
374 } else {
375 sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
376 vout = &sg;
377 outcnt = 1;
381 again:
382 spin_lock(&vgdev->ctrlq.qlock);
385 * Make sure we have enouth space in the virtqueue. If not
386 * wait here until we have.
388 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
389 * to wait for free space, which can result in fence ids being
390 * submitted out-of-order.
392 if (vq->num_free < 2 + outcnt) {
393 spin_unlock(&vgdev->ctrlq.qlock);
394 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
395 goto again;
398 if (hdr && fence) {
399 virtio_gpu_fence_emit(vgdev, hdr, fence);
400 if (vbuf->objs) {
401 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
402 virtio_gpu_array_unlock_resv(vbuf->objs);
405 notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
406 spin_unlock(&vgdev->ctrlq.qlock);
407 if (notify) {
408 if (vgdev->disable_notify)
409 vgdev->pending_notify = true;
410 else
411 virtqueue_notify(vgdev->ctrlq.vq);
414 if (sgt) {
415 sg_free_table(sgt);
416 kfree(sgt);
420 void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
422 vgdev->disable_notify = true;
425 void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
427 vgdev->disable_notify = false;
429 if (!vgdev->pending_notify)
430 return;
431 vgdev->pending_notify = false;
432 virtqueue_notify(vgdev->ctrlq.vq);
435 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
436 struct virtio_gpu_vbuffer *vbuf)
438 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
441 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
442 struct virtio_gpu_vbuffer *vbuf)
444 struct virtqueue *vq = vgdev->cursorq.vq;
445 struct scatterlist *sgs[1], ccmd;
446 bool notify;
447 int ret;
448 int outcnt;
450 if (!vgdev->vqs_ready)
451 return;
453 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
454 sgs[0] = &ccmd;
455 outcnt = 1;
457 spin_lock(&vgdev->cursorq.qlock);
458 retry:
459 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
460 if (ret == -ENOSPC) {
461 spin_unlock(&vgdev->cursorq.qlock);
462 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
463 spin_lock(&vgdev->cursorq.qlock);
464 goto retry;
465 } else {
466 trace_virtio_gpu_cmd_queue(vq,
467 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
469 notify = virtqueue_kick_prepare(vq);
472 spin_unlock(&vgdev->cursorq.qlock);
474 if (notify)
475 virtqueue_notify(vq);
478 /* just create gem objects for userspace and long lived objects,
479 * just use dma_alloced pages for the queue objects?
482 /* create a basic resource */
483 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
484 struct virtio_gpu_object *bo,
485 struct virtio_gpu_object_params *params,
486 struct virtio_gpu_object_array *objs,
487 struct virtio_gpu_fence *fence)
489 struct virtio_gpu_resource_create_2d *cmd_p;
490 struct virtio_gpu_vbuffer *vbuf;
492 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
493 memset(cmd_p, 0, sizeof(*cmd_p));
494 vbuf->objs = objs;
496 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
497 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
498 cmd_p->format = cpu_to_le32(params->format);
499 cmd_p->width = cpu_to_le32(params->width);
500 cmd_p->height = cpu_to_le32(params->height);
502 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
503 bo->created = true;
506 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
507 uint32_t resource_id)
509 struct virtio_gpu_resource_unref *cmd_p;
510 struct virtio_gpu_vbuffer *vbuf;
512 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
513 memset(cmd_p, 0, sizeof(*cmd_p));
515 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
516 cmd_p->resource_id = cpu_to_le32(resource_id);
518 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
521 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
522 uint32_t resource_id,
523 struct virtio_gpu_fence *fence)
525 struct virtio_gpu_resource_detach_backing *cmd_p;
526 struct virtio_gpu_vbuffer *vbuf;
528 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
529 memset(cmd_p, 0, sizeof(*cmd_p));
531 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
532 cmd_p->resource_id = cpu_to_le32(resource_id);
534 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
537 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
538 uint32_t scanout_id, uint32_t resource_id,
539 uint32_t width, uint32_t height,
540 uint32_t x, uint32_t y)
542 struct virtio_gpu_set_scanout *cmd_p;
543 struct virtio_gpu_vbuffer *vbuf;
545 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
546 memset(cmd_p, 0, sizeof(*cmd_p));
548 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
549 cmd_p->resource_id = cpu_to_le32(resource_id);
550 cmd_p->scanout_id = cpu_to_le32(scanout_id);
551 cmd_p->r.width = cpu_to_le32(width);
552 cmd_p->r.height = cpu_to_le32(height);
553 cmd_p->r.x = cpu_to_le32(x);
554 cmd_p->r.y = cpu_to_le32(y);
556 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
559 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
560 uint32_t resource_id,
561 uint32_t x, uint32_t y,
562 uint32_t width, uint32_t height)
564 struct virtio_gpu_resource_flush *cmd_p;
565 struct virtio_gpu_vbuffer *vbuf;
567 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
568 memset(cmd_p, 0, sizeof(*cmd_p));
570 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
571 cmd_p->resource_id = cpu_to_le32(resource_id);
572 cmd_p->r.width = cpu_to_le32(width);
573 cmd_p->r.height = cpu_to_le32(height);
574 cmd_p->r.x = cpu_to_le32(x);
575 cmd_p->r.y = cpu_to_le32(y);
577 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
580 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
581 uint64_t offset,
582 uint32_t width, uint32_t height,
583 uint32_t x, uint32_t y,
584 struct virtio_gpu_object_array *objs,
585 struct virtio_gpu_fence *fence)
587 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
588 struct virtio_gpu_transfer_to_host_2d *cmd_p;
589 struct virtio_gpu_vbuffer *vbuf;
590 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
592 if (use_dma_api)
593 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
594 bo->pages->sgl, bo->pages->nents,
595 DMA_TO_DEVICE);
597 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
598 memset(cmd_p, 0, sizeof(*cmd_p));
599 vbuf->objs = objs;
601 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
602 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
603 cmd_p->offset = cpu_to_le64(offset);
604 cmd_p->r.width = cpu_to_le32(width);
605 cmd_p->r.height = cpu_to_le32(height);
606 cmd_p->r.x = cpu_to_le32(x);
607 cmd_p->r.y = cpu_to_le32(y);
609 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
612 static void
613 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
614 uint32_t resource_id,
615 struct virtio_gpu_mem_entry *ents,
616 uint32_t nents,
617 struct virtio_gpu_fence *fence)
619 struct virtio_gpu_resource_attach_backing *cmd_p;
620 struct virtio_gpu_vbuffer *vbuf;
622 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
623 memset(cmd_p, 0, sizeof(*cmd_p));
625 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
626 cmd_p->resource_id = cpu_to_le32(resource_id);
627 cmd_p->nr_entries = cpu_to_le32(nents);
629 vbuf->data_buf = ents;
630 vbuf->data_size = sizeof(*ents) * nents;
632 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
635 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
636 struct virtio_gpu_vbuffer *vbuf)
638 struct virtio_gpu_resp_display_info *resp =
639 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
640 int i;
642 spin_lock(&vgdev->display_info_lock);
643 for (i = 0; i < vgdev->num_scanouts; i++) {
644 vgdev->outputs[i].info = resp->pmodes[i];
645 if (resp->pmodes[i].enabled) {
646 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
647 le32_to_cpu(resp->pmodes[i].r.width),
648 le32_to_cpu(resp->pmodes[i].r.height),
649 le32_to_cpu(resp->pmodes[i].r.x),
650 le32_to_cpu(resp->pmodes[i].r.y));
651 } else {
652 DRM_DEBUG("output %d: disabled", i);
656 vgdev->display_info_pending = false;
657 spin_unlock(&vgdev->display_info_lock);
658 wake_up(&vgdev->resp_wq);
660 if (!drm_helper_hpd_irq_event(vgdev->ddev))
661 drm_kms_helper_hotplug_event(vgdev->ddev);
664 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
665 struct virtio_gpu_vbuffer *vbuf)
667 struct virtio_gpu_get_capset_info *cmd =
668 (struct virtio_gpu_get_capset_info *)vbuf->buf;
669 struct virtio_gpu_resp_capset_info *resp =
670 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
671 int i = le32_to_cpu(cmd->capset_index);
673 spin_lock(&vgdev->display_info_lock);
674 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
675 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
676 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
677 spin_unlock(&vgdev->display_info_lock);
678 wake_up(&vgdev->resp_wq);
681 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
682 struct virtio_gpu_vbuffer *vbuf)
684 struct virtio_gpu_get_capset *cmd =
685 (struct virtio_gpu_get_capset *)vbuf->buf;
686 struct virtio_gpu_resp_capset *resp =
687 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
688 struct virtio_gpu_drv_cap_cache *cache_ent;
690 spin_lock(&vgdev->display_info_lock);
691 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
692 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
693 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
694 memcpy(cache_ent->caps_cache, resp->capset_data,
695 cache_ent->size);
696 /* Copy must occur before is_valid is signalled. */
697 smp_wmb();
698 atomic_set(&cache_ent->is_valid, 1);
699 break;
702 spin_unlock(&vgdev->display_info_lock);
703 wake_up_all(&vgdev->resp_wq);
706 static int virtio_get_edid_block(void *data, u8 *buf,
707 unsigned int block, size_t len)
709 struct virtio_gpu_resp_edid *resp = data;
710 size_t start = block * EDID_LENGTH;
712 if (start + len > le32_to_cpu(resp->size))
713 return -1;
714 memcpy(buf, resp->edid + start, len);
715 return 0;
718 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
719 struct virtio_gpu_vbuffer *vbuf)
721 struct virtio_gpu_cmd_get_edid *cmd =
722 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
723 struct virtio_gpu_resp_edid *resp =
724 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
725 uint32_t scanout = le32_to_cpu(cmd->scanout);
726 struct virtio_gpu_output *output;
727 struct edid *new_edid, *old_edid;
729 if (scanout >= vgdev->num_scanouts)
730 return;
731 output = vgdev->outputs + scanout;
733 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
734 drm_connector_update_edid_property(&output->conn, new_edid);
736 spin_lock(&vgdev->display_info_lock);
737 old_edid = output->edid;
738 output->edid = new_edid;
739 spin_unlock(&vgdev->display_info_lock);
741 kfree(old_edid);
742 wake_up(&vgdev->resp_wq);
745 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
747 struct virtio_gpu_ctrl_hdr *cmd_p;
748 struct virtio_gpu_vbuffer *vbuf;
749 void *resp_buf;
751 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
752 GFP_KERNEL);
753 if (!resp_buf)
754 return -ENOMEM;
756 cmd_p = virtio_gpu_alloc_cmd_resp
757 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
758 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
759 resp_buf);
760 memset(cmd_p, 0, sizeof(*cmd_p));
762 vgdev->display_info_pending = true;
763 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
764 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
765 return 0;
768 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
770 struct virtio_gpu_get_capset_info *cmd_p;
771 struct virtio_gpu_vbuffer *vbuf;
772 void *resp_buf;
774 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
775 GFP_KERNEL);
776 if (!resp_buf)
777 return -ENOMEM;
779 cmd_p = virtio_gpu_alloc_cmd_resp
780 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
781 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
782 resp_buf);
783 memset(cmd_p, 0, sizeof(*cmd_p));
785 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
786 cmd_p->capset_index = cpu_to_le32(idx);
787 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
788 return 0;
791 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
792 int idx, int version,
793 struct virtio_gpu_drv_cap_cache **cache_p)
795 struct virtio_gpu_get_capset *cmd_p;
796 struct virtio_gpu_vbuffer *vbuf;
797 int max_size;
798 struct virtio_gpu_drv_cap_cache *cache_ent;
799 struct virtio_gpu_drv_cap_cache *search_ent;
800 void *resp_buf;
802 *cache_p = NULL;
804 if (idx >= vgdev->num_capsets)
805 return -EINVAL;
807 if (version > vgdev->capsets[idx].max_version)
808 return -EINVAL;
810 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
811 if (!cache_ent)
812 return -ENOMEM;
814 max_size = vgdev->capsets[idx].max_size;
815 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
816 if (!cache_ent->caps_cache) {
817 kfree(cache_ent);
818 return -ENOMEM;
821 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
822 GFP_KERNEL);
823 if (!resp_buf) {
824 kfree(cache_ent->caps_cache);
825 kfree(cache_ent);
826 return -ENOMEM;
829 cache_ent->version = version;
830 cache_ent->id = vgdev->capsets[idx].id;
831 atomic_set(&cache_ent->is_valid, 0);
832 cache_ent->size = max_size;
833 spin_lock(&vgdev->display_info_lock);
834 /* Search while under lock in case it was added by another task. */
835 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
836 if (search_ent->id == vgdev->capsets[idx].id &&
837 search_ent->version == version) {
838 *cache_p = search_ent;
839 break;
842 if (!*cache_p)
843 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
844 spin_unlock(&vgdev->display_info_lock);
846 if (*cache_p) {
847 /* Entry was found, so free everything that was just created. */
848 kfree(resp_buf);
849 kfree(cache_ent->caps_cache);
850 kfree(cache_ent);
851 return 0;
854 cmd_p = virtio_gpu_alloc_cmd_resp
855 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
856 sizeof(struct virtio_gpu_resp_capset) + max_size,
857 resp_buf);
858 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
859 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
860 cmd_p->capset_version = cpu_to_le32(version);
861 *cache_p = cache_ent;
862 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
864 return 0;
867 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
869 struct virtio_gpu_cmd_get_edid *cmd_p;
870 struct virtio_gpu_vbuffer *vbuf;
871 void *resp_buf;
872 int scanout;
874 if (WARN_ON(!vgdev->has_edid))
875 return -EINVAL;
877 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
878 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
879 GFP_KERNEL);
880 if (!resp_buf)
881 return -ENOMEM;
883 cmd_p = virtio_gpu_alloc_cmd_resp
884 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
885 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
886 resp_buf);
887 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
888 cmd_p->scanout = cpu_to_le32(scanout);
889 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
892 return 0;
895 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
896 uint32_t nlen, const char *name)
898 struct virtio_gpu_ctx_create *cmd_p;
899 struct virtio_gpu_vbuffer *vbuf;
901 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
902 memset(cmd_p, 0, sizeof(*cmd_p));
904 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
905 cmd_p->hdr.ctx_id = cpu_to_le32(id);
906 cmd_p->nlen = cpu_to_le32(nlen);
907 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
908 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
909 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
912 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
913 uint32_t id)
915 struct virtio_gpu_ctx_destroy *cmd_p;
916 struct virtio_gpu_vbuffer *vbuf;
918 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
919 memset(cmd_p, 0, sizeof(*cmd_p));
921 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
922 cmd_p->hdr.ctx_id = cpu_to_le32(id);
923 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
926 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
927 uint32_t ctx_id,
928 struct virtio_gpu_object_array *objs)
930 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
931 struct virtio_gpu_ctx_resource *cmd_p;
932 struct virtio_gpu_vbuffer *vbuf;
934 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
935 memset(cmd_p, 0, sizeof(*cmd_p));
936 vbuf->objs = objs;
938 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
939 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
940 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
941 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
945 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
946 uint32_t ctx_id,
947 struct virtio_gpu_object_array *objs)
949 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
950 struct virtio_gpu_ctx_resource *cmd_p;
951 struct virtio_gpu_vbuffer *vbuf;
953 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
954 memset(cmd_p, 0, sizeof(*cmd_p));
955 vbuf->objs = objs;
957 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
958 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
959 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
960 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
963 void
964 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
965 struct virtio_gpu_object *bo,
966 struct virtio_gpu_object_params *params,
967 struct virtio_gpu_object_array *objs,
968 struct virtio_gpu_fence *fence)
970 struct virtio_gpu_resource_create_3d *cmd_p;
971 struct virtio_gpu_vbuffer *vbuf;
973 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
974 memset(cmd_p, 0, sizeof(*cmd_p));
975 vbuf->objs = objs;
977 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
978 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
979 cmd_p->format = cpu_to_le32(params->format);
980 cmd_p->width = cpu_to_le32(params->width);
981 cmd_p->height = cpu_to_le32(params->height);
983 cmd_p->target = cpu_to_le32(params->target);
984 cmd_p->bind = cpu_to_le32(params->bind);
985 cmd_p->depth = cpu_to_le32(params->depth);
986 cmd_p->array_size = cpu_to_le32(params->array_size);
987 cmd_p->last_level = cpu_to_le32(params->last_level);
988 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
989 cmd_p->flags = cpu_to_le32(params->flags);
991 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
992 bo->created = true;
995 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
996 uint32_t ctx_id,
997 uint64_t offset, uint32_t level,
998 struct drm_virtgpu_3d_box *box,
999 struct virtio_gpu_object_array *objs,
1000 struct virtio_gpu_fence *fence)
1002 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1003 struct virtio_gpu_transfer_host_3d *cmd_p;
1004 struct virtio_gpu_vbuffer *vbuf;
1005 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1007 if (use_dma_api)
1008 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
1009 bo->pages->sgl, bo->pages->nents,
1010 DMA_TO_DEVICE);
1012 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1013 memset(cmd_p, 0, sizeof(*cmd_p));
1015 vbuf->objs = objs;
1017 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1018 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1019 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1020 convert_to_hw_box(&cmd_p->box, box);
1021 cmd_p->offset = cpu_to_le64(offset);
1022 cmd_p->level = cpu_to_le32(level);
1024 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1027 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1028 uint32_t ctx_id,
1029 uint64_t offset, uint32_t level,
1030 struct drm_virtgpu_3d_box *box,
1031 struct virtio_gpu_object_array *objs,
1032 struct virtio_gpu_fence *fence)
1034 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1035 struct virtio_gpu_transfer_host_3d *cmd_p;
1036 struct virtio_gpu_vbuffer *vbuf;
1038 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1039 memset(cmd_p, 0, sizeof(*cmd_p));
1041 vbuf->objs = objs;
1043 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1044 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1045 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1046 convert_to_hw_box(&cmd_p->box, box);
1047 cmd_p->offset = cpu_to_le64(offset);
1048 cmd_p->level = cpu_to_le32(level);
1050 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1053 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1054 void *data, uint32_t data_size,
1055 uint32_t ctx_id,
1056 struct virtio_gpu_object_array *objs,
1057 struct virtio_gpu_fence *fence)
1059 struct virtio_gpu_cmd_submit *cmd_p;
1060 struct virtio_gpu_vbuffer *vbuf;
1062 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1063 memset(cmd_p, 0, sizeof(*cmd_p));
1065 vbuf->data_buf = data;
1066 vbuf->data_size = data_size;
1067 vbuf->objs = objs;
1069 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1070 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1071 cmd_p->size = cpu_to_le32(data_size);
1073 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1076 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1077 struct virtio_gpu_object *obj,
1078 struct virtio_gpu_fence *fence)
1080 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1081 struct virtio_gpu_mem_entry *ents;
1082 struct scatterlist *sg;
1083 int si, nents, ret;
1085 if (WARN_ON_ONCE(!obj->created))
1086 return -EINVAL;
1087 if (WARN_ON_ONCE(obj->pages))
1088 return -EINVAL;
1090 ret = drm_gem_shmem_pin(&obj->base.base);
1091 if (ret < 0)
1092 return -EINVAL;
1094 obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
1095 if (obj->pages == NULL) {
1096 drm_gem_shmem_unpin(&obj->base.base);
1097 return -EINVAL;
1100 if (use_dma_api) {
1101 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1102 obj->pages->sgl, obj->pages->nents,
1103 DMA_TO_DEVICE);
1104 nents = obj->mapped;
1105 } else {
1106 nents = obj->pages->nents;
1109 /* gets freed when the ring has consumed it */
1110 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1111 GFP_KERNEL);
1112 if (!ents) {
1113 DRM_ERROR("failed to allocate ent list\n");
1114 return -ENOMEM;
1117 for_each_sg(obj->pages->sgl, sg, nents, si) {
1118 ents[si].addr = cpu_to_le64(use_dma_api
1119 ? sg_dma_address(sg)
1120 : sg_phys(sg));
1121 ents[si].length = cpu_to_le32(sg->length);
1122 ents[si].padding = 0;
1125 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1126 ents, nents,
1127 fence);
1128 return 0;
1131 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1132 struct virtio_gpu_object *obj)
1134 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1136 if (WARN_ON_ONCE(!obj->pages))
1137 return;
1139 if (use_dma_api && obj->mapped) {
1140 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1141 /* detach backing and wait for the host process it ... */
1142 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1143 dma_fence_wait(&fence->f, true);
1144 dma_fence_put(&fence->f);
1146 /* ... then tear down iommu mappings */
1147 dma_unmap_sg(vgdev->vdev->dev.parent,
1148 obj->pages->sgl, obj->mapped,
1149 DMA_TO_DEVICE);
1150 obj->mapped = 0;
1151 } else {
1152 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1155 sg_free_table(obj->pages);
1156 obj->pages = NULL;
1158 drm_gem_shmem_unpin(&obj->base.base);
1161 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1162 struct virtio_gpu_output *output)
1164 struct virtio_gpu_vbuffer *vbuf;
1165 struct virtio_gpu_update_cursor *cur_p;
1167 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1168 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1169 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1170 virtio_gpu_queue_cursor(vgdev, vbuf);